1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *      This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31/*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *	$NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57#include <sys/cdefs.h>
58__FBSDID("$FreeBSD$");
59
60#include "opt_ddb.h"
61#include "opt_kstack_pages.h"
62#include "opt_platform.h"
63
64#include <sys/param.h>
65#include <sys/proc.h>
66#include <sys/systm.h>
67#include <sys/bio.h>
68#include <sys/buf.h>
69#include <sys/bus.h>
70#include <sys/cons.h>
71#include <sys/cpu.h>
72#include <sys/eventhandler.h>
73#include <sys/exec.h>
74#include <sys/imgact.h>
75#include <sys/kdb.h>
76#include <sys/kernel.h>
77#include <sys/ktr.h>
78#include <sys/linker.h>
79#include <sys/lock.h>
80#include <sys/malloc.h>
81#include <sys/mbuf.h>
82#include <sys/msgbuf.h>
83#include <sys/mutex.h>
84#include <sys/ptrace.h>
85#include <sys/reboot.h>
86#include <sys/rwlock.h>
87#include <sys/signalvar.h>
88#include <sys/syscallsubr.h>
89#include <sys/sysctl.h>
90#include <sys/sysent.h>
91#include <sys/sysproto.h>
92#include <sys/ucontext.h>
93#include <sys/uio.h>
94#include <sys/vmmeter.h>
95#include <sys/vnode.h>
96
97#include <net/netisr.h>
98
99#include <vm/vm.h>
100#include <vm/vm_extern.h>
101#include <vm/vm_kern.h>
102#include <vm/vm_page.h>
103#include <vm/vm_phys.h>
104#include <vm/vm_map.h>
105#include <vm/vm_object.h>
106#include <vm/vm_pager.h>
107
108#include <machine/altivec.h>
109#ifndef __powerpc64__
110#include <machine/bat.h>
111#endif
112#include <machine/cpu.h>
113#include <machine/elf.h>
114#include <machine/fpu.h>
115#include <machine/hid.h>
116#include <machine/ifunc.h>
117#include <machine/kdb.h>
118#include <machine/md_var.h>
119#include <machine/metadata.h>
120#include <machine/mmuvar.h>
121#include <machine/pcb.h>
122#include <machine/reg.h>
123#include <machine/sigframe.h>
124#include <machine/spr.h>
125#include <machine/trap.h>
126#include <machine/vmparam.h>
127#include <machine/ofw_machdep.h>
128
129#include <ddb/ddb.h>
130
131#include <dev/ofw/openfirm.h>
132#include <dev/ofw/ofw_subr.h>
133
134int cold = 1;
135#ifdef __powerpc64__
136int cacheline_size = 128;
137#else
138int cacheline_size = 32;
139#endif
140int hw_direct_map = 1;
141
142#ifdef BOOKE
143extern vm_paddr_t kernload;
144#endif
145
146extern void *ap_pcpu;
147
148struct pcpu __pcpu[MAXCPU] __aligned(PAGE_SIZE);
149static char init_kenv[2048];
150
151static struct trapframe frame0;
152
153char		machine[] = "powerpc";
154SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
155
156static void	cpu_startup(void *);
157SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
158
159SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
160	   CTLFLAG_RD, &cacheline_size, 0, "");
161
162uintptr_t	powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *,
163		    uint32_t);
164
165static void	fake_preload_metadata(void);
166
167long		Maxmem = 0;
168long		realmem = 0;
169
170/* Default MSR values set in the AIM/Book-E early startup code */
171register_t	psl_kernset;
172register_t	psl_userset;
173register_t	psl_userstatic;
174#ifdef __powerpc64__
175register_t	psl_userset32;
176#endif
177
178struct kva_md_info kmi;
179
180static void
181cpu_startup(void *dummy)
182{
183
184	/*
185	 * Initialise the decrementer-based clock.
186	 */
187	decr_init();
188
189	/*
190	 * Good {morning,afternoon,evening,night}.
191	 */
192	cpu_setup(PCPU_GET(cpuid));
193
194#ifdef PERFMON
195	perfmon_init();
196#endif
197	printf("real memory  = %ju (%ju MB)\n", ptoa((uintmax_t)physmem),
198	    ptoa((uintmax_t)physmem) / 1048576);
199	realmem = physmem;
200
201	if (bootverbose)
202		printf("available KVA = %zu (%zu MB)\n",
203		    virtual_end - virtual_avail,
204		    (virtual_end - virtual_avail) / 1048576);
205
206	/*
207	 * Display any holes after the first chunk of extended memory.
208	 */
209	if (bootverbose) {
210		int indx;
211
212		printf("Physical memory chunk(s):\n");
213		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
214			vm_paddr_t size1 =
215			    phys_avail[indx + 1] - phys_avail[indx];
216
217			#ifdef __powerpc64__
218			printf("0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
219			#else
220			printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n",
221			#endif
222			    (uintmax_t)phys_avail[indx],
223			    (uintmax_t)phys_avail[indx + 1] - 1,
224			    (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE);
225		}
226	}
227
228	vm_ksubmap_init(&kmi);
229
230	printf("avail memory = %ju (%ju MB)\n",
231	    ptoa((uintmax_t)vm_free_count()),
232	    ptoa((uintmax_t)vm_free_count()) / 1048576);
233
234	/*
235	 * Set up buffers, so they can be used to read disk labels.
236	 */
237	bufinit();
238	vm_pager_bufferinit();
239}
240
241extern vm_offset_t	__startkernel, __endkernel;
242extern unsigned char	__bss_start[];
243extern unsigned char	__sbss_start[];
244extern unsigned char	__sbss_end[];
245extern unsigned char	_end[];
246
247void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
248    void *mdp, uint32_t mdp_cookie);
249void aim_cpu_init(vm_offset_t toc);
250void booke_cpu_init(void);
251
252#ifdef DDB
253static void	load_external_symtab(void);
254#endif
255
256uintptr_t
257powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
258    uint32_t mdp_cookie)
259{
260	struct		pcpu *pc;
261	struct cpuref	bsp;
262	vm_offset_t	startkernel, endkernel;
263	char		*env;
264	void		*kmdp = NULL;
265        bool		ofw_bootargs = false;
266	bool		symbols_provided = false;
267#ifdef DDB
268	vm_offset_t ksym_start;
269	vm_offset_t ksym_end;
270	vm_offset_t ksym_sz;
271#endif
272
273	/* First guess at start/end kernel positions */
274	startkernel = __startkernel;
275	endkernel = __endkernel;
276
277	/*
278	 * If the metadata pointer cookie is not set to the magic value,
279	 * the number in mdp should be treated as nonsense.
280	 */
281	if (mdp_cookie != 0xfb5d104d)
282		mdp = NULL;
283
284#if !defined(BOOKE)
285	/*
286	 * On BOOKE the BSS is already cleared and some variables
287	 * initialized.  Do not wipe them out.
288	 */
289	bzero(__sbss_start, __sbss_end - __sbss_start);
290	bzero(__bss_start, _end - __bss_start);
291#endif
292
293	cpu_feature_setup();
294
295#ifdef AIM
296	aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie);
297#endif
298
299	/*
300	 * At this point, we are executing in our correct memory space.
301	 * Book-E started there, and AIM has done an rfi and restarted
302	 * execution from _start.
303	 *
304	 * We may still be in real mode, however. If we are running out of
305	 * the direct map on 64 bit, this is possible to do.
306	 */
307
308	/*
309	 * Parse metadata if present and fetch parameters.  Must be done
310	 * before console is inited so cninit gets the right value of
311	 * boothowto.
312	 */
313	if (mdp != NULL) {
314		/*
315		 * Starting up from loader.
316		 *
317		 * Full metadata has been provided, but we need to figure
318		 * out the correct address to relocate it to.
319		 */
320		char *envp = NULL;
321		uintptr_t md_offset = 0;
322		vm_paddr_t kernelstartphys, kernelendphys;
323
324#ifdef AIM
325		if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS)
326			md_offset = DMAP_BASE_ADDRESS;
327#else /* BOOKE */
328		md_offset = VM_MIN_KERNEL_ADDRESS - kernload;
329#endif
330
331		preload_metadata = mdp;
332		if (md_offset > 0) {
333			/* Translate phys offset into DMAP offset. */
334			preload_metadata += md_offset;
335			preload_bootstrap_relocate(md_offset);
336		}
337		kmdp = preload_search_by_type("elf kernel");
338		if (kmdp != NULL) {
339			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
340			envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
341			if (envp != NULL)
342				envp += md_offset;
343			init_static_kenv(envp, 0);
344			if (fdt == 0) {
345				fdt = MD_FETCH(kmdp, MODINFOMD_DTBP, uintptr_t);
346				if (fdt != 0)
347					fdt += md_offset;
348			}
349			kernelstartphys = MD_FETCH(kmdp, MODINFO_ADDR,
350			    vm_offset_t);
351			/* kernelstartphys is already relocated. */
352			kernelendphys = MD_FETCH(kmdp, MODINFOMD_KERNEND,
353			    vm_offset_t);
354			if (kernelendphys != 0)
355				kernelendphys += md_offset;
356			endkernel = ulmax(endkernel, kernelendphys);
357#ifdef DDB
358			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
359			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
360			ksym_sz = *(Elf_Size*)ksym_start;
361
362			db_fetch_ksymtab(ksym_start, ksym_end, md_offset);
363			/* Symbols provided by loader. */
364			symbols_provided = true;
365#endif
366		}
367	} else {
368		/*
369		 * Self-loading kernel, we have to fake up metadata.
370		 *
371		 * Since we are creating the metadata from the final
372		 * memory space, we don't need to call
373		 * preload_boostrap_relocate().
374		 */
375		fake_preload_metadata();
376		kmdp = preload_search_by_type("elf kernel");
377		init_static_kenv(init_kenv, sizeof(init_kenv));
378		ofw_bootargs = true;
379	}
380
381	/* Store boot environment state */
382	OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry);
383
384	/*
385	 * Init params/tunables that can be overridden by the loader
386	 */
387	init_param1();
388
389	/*
390	 * Start initializing proc0 and thread0.
391	 */
392	proc_linkup0(&proc0, &thread0);
393	thread0.td_frame = &frame0;
394#ifdef __powerpc64__
395	__asm __volatile("mr 13,%0" :: "r"(&thread0));
396#else
397	__asm __volatile("mr 2,%0" :: "r"(&thread0));
398#endif
399
400	/*
401	 * Init mutexes, which we use heavily in PMAP
402	 */
403	mutex_init();
404
405	/*
406	 * Install the OF client interface
407	 */
408	OF_bootstrap();
409
410#ifdef DDB
411	if (!symbols_provided && hw_direct_map)
412		load_external_symtab();
413#endif
414
415	if (ofw_bootargs)
416		ofw_parse_bootargs();
417
418	/*
419	 * Initialize the console before printing anything.
420	 */
421	cninit();
422
423#ifdef AIM
424	aim_cpu_init(toc);
425#else /* BOOKE */
426	booke_cpu_init();
427
428	/* Make sure the kernel icache is valid before we go too much further */
429	__syncicache((caddr_t)startkernel, endkernel - startkernel);
430#endif
431
432	/*
433	 * Choose a platform module so we can get the physical memory map.
434	 */
435
436	platform_probe_and_attach();
437
438	/*
439	 * Set up per-cpu data for the BSP now that the platform can tell
440	 * us which that is.
441	 */
442	if (platform_smp_get_bsp(&bsp) != 0)
443		bsp.cr_cpuid = 0;
444	pc = &__pcpu[bsp.cr_cpuid];
445	__asm __volatile("mtsprg 0, %0" :: "r"(pc));
446	pcpu_init(pc, bsp.cr_cpuid, sizeof(struct pcpu));
447	pc->pc_curthread = &thread0;
448	thread0.td_oncpu = bsp.cr_cpuid;
449	pc->pc_cpuid = bsp.cr_cpuid;
450	pc->pc_hwref = bsp.cr_hwref;
451
452	/*
453	 * Init KDB
454	 */
455	kdb_init();
456
457	/*
458	 * Bring up MMU
459	 */
460	pmap_mmu_init();
461	link_elf_ireloc(kmdp);
462	pmap_bootstrap(startkernel, endkernel);
463	mtmsr(psl_kernset & ~PSL_EE);
464
465	/*
466	 * Initialize params/tunables that are derived from memsize
467	 */
468	init_param2(physmem);
469
470	/*
471	 * Grab booted kernel's name
472	 */
473        env = kern_getenv("kernelname");
474        if (env != NULL) {
475		strlcpy(kernelname, env, sizeof(kernelname));
476		freeenv(env);
477	}
478
479	/*
480	 * Finish setting up thread0.
481	 */
482	thread0.td_pcb = (struct pcb *)
483	    ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
484	    sizeof(struct pcb)) & ~15UL);
485	bzero((void *)thread0.td_pcb, sizeof(struct pcb));
486	pc->pc_curpcb = thread0.td_pcb;
487
488	/* Initialise the message buffer. */
489	msgbufinit(msgbufp, msgbufsize);
490
491#ifdef KDB
492	if (boothowto & RB_KDB)
493		kdb_enter(KDB_WHY_BOOTFLAGS,
494		    "Boot flags requested debugger");
495#endif
496
497	return (((uintptr_t)thread0.td_pcb -
498	    (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
499}
500
501#ifdef DDB
502/*
503 * On powernv and some booke systems, we might not have symbols loaded via
504 * loader. However, if the user passed the kernel in as the initrd as well,
505 * we can manually load it via reinterpreting the initrd copy of the kernel.
506 *
507 * In the BOOKE case, we don't actually have a DMAP yet, so we have to use
508 * temporary maps to inspect the memory, but write DMAP addresses to the
509 * configuration variables.
510 */
511static void
512load_external_symtab(void) {
513	phandle_t chosen;
514	vm_paddr_t start, end;
515	pcell_t cell[2];
516	ssize_t size;
517	u_char *kernelimg;		/* Temporary map */
518	u_char *kernelimg_final;	/* Final location */
519
520	int i;
521
522	Elf_Ehdr *ehdr;
523	Elf_Phdr *phdr;
524	Elf_Shdr *shdr;
525
526	vm_offset_t ksym_start, ksym_sz, kstr_start, kstr_sz,
527	    ksym_start_final, kstr_start_final;
528
529	if (!hw_direct_map)
530		return;
531
532	chosen = OF_finddevice("/chosen");
533	if (chosen <= 0)
534		return;
535
536	if (!OF_hasprop(chosen, "linux,initrd-start") ||
537	    !OF_hasprop(chosen, "linux,initrd-end"))
538		return;
539
540	size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell));
541	if (size == 4)
542		start = cell[0];
543	else if (size == 8)
544		start = (uint64_t)cell[0] << 32 | cell[1];
545	else
546		return;
547
548	size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell));
549	if (size == 4)
550		end = cell[0];
551	else if (size == 8)
552		end = (uint64_t)cell[0] << 32 | cell[1];
553	else
554		return;
555
556	if (!(end - start > 0))
557		return;
558
559	kernelimg_final = (u_char *) PHYS_TO_DMAP(start);
560#ifdef	AIM
561	kernelimg = kernelimg_final;
562#else	/* BOOKE */
563	kernelimg = (u_char *)pmap_early_io_map(start, PAGE_SIZE);
564#endif
565	ehdr = (Elf_Ehdr *)kernelimg;
566
567	if (!IS_ELF(*ehdr)) {
568#ifdef	BOOKE
569		pmap_early_io_unmap(start, PAGE_SIZE);
570#endif
571		return;
572	}
573
574#ifdef	BOOKE
575	pmap_early_io_unmap(start, PAGE_SIZE);
576	kernelimg = (u_char *)pmap_early_io_map(start, (end - start));
577#endif
578
579	phdr = (Elf_Phdr *)(kernelimg + ehdr->e_phoff);
580	shdr = (Elf_Shdr *)(kernelimg + ehdr->e_shoff);
581
582	ksym_start = 0;
583	ksym_sz = 0;
584	ksym_start_final = 0;
585	kstr_start = 0;
586	kstr_sz = 0;
587	kstr_start_final = 0;
588	for (i = 0; i < ehdr->e_shnum; i++) {
589		if (shdr[i].sh_type == SHT_SYMTAB) {
590			ksym_start = (vm_offset_t)(kernelimg +
591			    shdr[i].sh_offset);
592			ksym_start_final = (vm_offset_t)
593			    (kernelimg_final + shdr[i].sh_offset);
594			ksym_sz = (vm_offset_t)(shdr[i].sh_size);
595			kstr_start = (vm_offset_t)(kernelimg +
596			    shdr[shdr[i].sh_link].sh_offset);
597			kstr_start_final = (vm_offset_t)
598			    (kernelimg_final +
599			    shdr[shdr[i].sh_link].sh_offset);
600
601			kstr_sz = (vm_offset_t)
602			    (shdr[shdr[i].sh_link].sh_size);
603		}
604	}
605
606	if (ksym_start != 0 && kstr_start != 0 && ksym_sz != 0 &&
607	    kstr_sz != 0 && ksym_start < kstr_start) {
608		/*
609		 * We can't use db_fetch_ksymtab() here, because we need to
610		 * feed in DMAP addresses that are not mapped yet on booke.
611		 *
612		 * Write the variables directly, where db_init() will pick
613		 * them up later, after the DMAP is up.
614		 */
615		ksymtab = ksym_start_final;
616		ksymtab_size = ksym_sz;
617		kstrtab = kstr_start_final;
618		ksymtab_relbase = (__startkernel - KERNBASE);
619	}
620
621#ifdef	BOOKE
622	pmap_early_io_unmap(start, (end - start));
623#endif
624
625};
626#endif
627
628/*
629 * When not being loaded from loader, we need to create our own metadata
630 * so we can interact with the kernel linker.
631 */
632static void
633fake_preload_metadata(void) {
634	/* We depend on dword alignment here. */
635	static uint32_t fake_preload[36] __aligned(8);
636	int i = 0;
637
638	fake_preload[i++] = MODINFO_NAME;
639	fake_preload[i++] = strlen("kernel") + 1;
640	strcpy((char*)&fake_preload[i], "kernel");
641	/* ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
642	i += 2;
643
644	fake_preload[i++] = MODINFO_TYPE;
645	fake_preload[i++] = strlen("elf kernel") + 1;
646	strcpy((char*)&fake_preload[i], "elf kernel");
647	/* ['e' 'l' 'f' ' '] ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
648	i += 3;
649
650#ifdef __powerpc64__
651	/* Padding -- Fields start on u_long boundaries */
652	fake_preload[i++] = 0;
653#endif
654
655	fake_preload[i++] = MODINFO_ADDR;
656	fake_preload[i++] = sizeof(vm_offset_t);
657	*(vm_offset_t *)&fake_preload[i] =
658	    (vm_offset_t)(__startkernel);
659	i += (sizeof(vm_offset_t) / 4);
660
661	fake_preload[i++] = MODINFO_SIZE;
662	fake_preload[i++] = sizeof(vm_offset_t);
663	*(vm_offset_t *)&fake_preload[i] =
664	    (vm_offset_t)(__endkernel) - (vm_offset_t)(__startkernel);
665	i += (sizeof(vm_offset_t) / 4);
666
667	/*
668	 * MODINFOMD_SSYM and MODINFOMD_ESYM cannot be provided here,
669	 * as the memory comes from outside the loaded ELF sections.
670	 *
671	 * If the symbols are being provided by other means (MFS), the
672	 * tables will be loaded into the debugger directly.
673	 */
674
675	/* Null field at end to mark end of data. */
676	fake_preload[i++] = 0;
677	fake_preload[i] = 0;
678	preload_metadata = (void*)fake_preload;
679}
680
681/*
682 * Flush the D-cache for non-DMA I/O so that the I-cache can
683 * be made coherent later.
684 */
685void
686cpu_flush_dcache(void *ptr, size_t len)
687{
688	register_t addr, off;
689
690	/*
691	 * Align the address to a cacheline and adjust the length
692	 * accordingly. Then round the length to a multiple of the
693	 * cacheline for easy looping.
694	 */
695	addr = (uintptr_t)ptr;
696	off = addr & (cacheline_size - 1);
697	addr -= off;
698	len = roundup2(len + off, cacheline_size);
699
700	while (len > 0) {
701		__asm __volatile ("dcbf 0,%0" :: "r"(addr));
702		__asm __volatile ("sync");
703		addr += cacheline_size;
704		len -= cacheline_size;
705	}
706}
707
708int
709ptrace_set_pc(struct thread *td, unsigned long addr)
710{
711	struct trapframe *tf;
712
713	tf = td->td_frame;
714	tf->srr0 = (register_t)addr;
715
716	return (0);
717}
718
719void
720spinlock_enter(void)
721{
722	struct thread *td;
723	register_t msr;
724
725	td = curthread;
726	if (td->td_md.md_spinlock_count == 0) {
727		nop_prio_mhigh();
728		msr = intr_disable();
729		td->td_md.md_spinlock_count = 1;
730		td->td_md.md_saved_msr = msr;
731		critical_enter();
732	} else
733		td->td_md.md_spinlock_count++;
734}
735
736void
737spinlock_exit(void)
738{
739	struct thread *td;
740	register_t msr;
741
742	td = curthread;
743	msr = td->td_md.md_saved_msr;
744	td->td_md.md_spinlock_count--;
745	if (td->td_md.md_spinlock_count == 0) {
746		critical_exit();
747		intr_restore(msr);
748		nop_prio_medium();
749	}
750}
751
752/*
753 * Simple ddb(4) command/hack to view any SPR on the running CPU.
754 * Uses a trivial asm function to perform the mfspr, and rewrites the mfspr
755 * instruction each time.
756 * XXX: Since it uses code modification, it won't work if the kernel code pages
757 * are marked RO.
758 */
759extern register_t get_spr(int);
760
761#ifdef DDB
762DB_SHOW_COMMAND(spr, db_show_spr)
763{
764	register_t spr;
765	volatile uint32_t *p;
766	int sprno, saved_sprno;
767
768	if (!have_addr)
769		return;
770
771	saved_sprno = sprno = (intptr_t) addr;
772	sprno = ((sprno & 0x3e0) >> 5) | ((sprno & 0x1f) << 5);
773	p = (uint32_t *)(void *)&get_spr;
774#ifdef __powerpc64__
775#if defined(_CALL_ELF) && _CALL_ELF == 2
776	/* Account for ELFv2 function prologue. */
777	p += 2;
778#else
779	p = *(volatile uint32_t * volatile *)p;
780#endif
781#endif
782	*p = (*p & ~0x001ff800) | (sprno << 11);
783	__syncicache(__DEVOLATILE(uint32_t *, p), cacheline_size);
784	spr = get_spr(sprno);
785
786	db_printf("SPR %d(%x): %lx\n", saved_sprno, saved_sprno,
787	    (unsigned long)spr);
788}
789
790DB_SHOW_COMMAND(frame, db_show_frame)
791{
792	struct trapframe *tf;
793	long reg;
794	int i;
795
796	tf = have_addr ? (struct trapframe *)addr : curthread->td_frame;
797
798	/*
799	 * Everything casts through long to simplify the printing.
800	 * 'long' is native register size anyway.
801	 */
802	db_printf("trap frame %p\n", tf);
803	for (i = 0; i < nitems(tf->fixreg); i++) {
804		reg = tf->fixreg[i];
805		db_printf("  r%d:\t%#lx (%ld)\n", i, reg, reg);
806	}
807	reg = tf->lr;
808	db_printf("  lr:\t%#lx\n", reg);
809	reg = tf->cr;
810	db_printf("  cr:\t%#lx\n", reg);
811	reg = tf->xer;
812	db_printf("  xer:\t%#lx\n", reg);
813	reg = tf->ctr;
814	db_printf("  ctr:\t%#lx (%ld)\n", reg, reg);
815	reg = tf->srr0;
816	db_printf("  srr0:\t%#lx\n", reg);
817	reg = tf->srr1;
818	db_printf("  srr1:\t%#lx\n", reg);
819	reg = tf->exc;
820	db_printf("  exc:\t%#lx\n", reg);
821	reg = tf->dar;
822	db_printf("  dar:\t%#lx\n", reg);
823#ifdef AIM
824	reg = tf->cpu.aim.dsisr;
825	db_printf("  dsisr:\t%#lx\n", reg);
826#else
827	reg = tf->cpu.booke.esr;
828	db_printf("  esr:\t%#lx\n", reg);
829	reg = tf->cpu.booke.dbcr0;
830	db_printf("  dbcr0:\t%#lx\n", reg);
831#endif
832}
833#endif
834
835#undef bzero
836void
837bzero(void *buf, size_t len)
838{
839	caddr_t	p;
840
841	p = buf;
842
843	while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
844		*p++ = 0;
845		len--;
846	}
847
848	while (len >= sizeof(u_long) * 8) {
849		*(u_long*) p = 0;
850		*((u_long*) p + 1) = 0;
851		*((u_long*) p + 2) = 0;
852		*((u_long*) p + 3) = 0;
853		len -= sizeof(u_long) * 8;
854		*((u_long*) p + 4) = 0;
855		*((u_long*) p + 5) = 0;
856		*((u_long*) p + 6) = 0;
857		*((u_long*) p + 7) = 0;
858		p += sizeof(u_long) * 8;
859	}
860
861	while (len >= sizeof(u_long)) {
862		*(u_long*) p = 0;
863		len -= sizeof(u_long);
864		p += sizeof(u_long);
865	}
866
867	while (len) {
868		*p++ = 0;
869		len--;
870	}
871}
872
873/* __stack_chk_fail_local() is called in secure-plt (32-bit). */
874#if !defined(__powerpc64__)
875extern void __stack_chk_fail(void);
876void __stack_chk_fail_local(void);
877
878void
879__stack_chk_fail_local(void)
880{
881
882	__stack_chk_fail();
883}
884#endif
885