1/*	$NetBSD: uvm_glue.c,v 1.179 2020/05/22 19:46:29 ad Exp $	*/
2
3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56 *  School of Computer Science
57 *  Carnegie Mellon University
58 *  Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.179 2020/05/22 19:46:29 ad Exp $");
66
67#include "opt_kgdb.h"
68#include "opt_kstack.h"
69#include "opt_uvmhist.h"
70
71/*
72 * uvm_glue.c: glue functions
73 */
74
75#include <sys/param.h>
76#include <sys/kernel.h>
77
78#include <sys/systm.h>
79#include <sys/proc.h>
80#include <sys/resourcevar.h>
81#include <sys/buf.h>
82#include <sys/syncobj.h>
83#include <sys/cpu.h>
84#include <sys/atomic.h>
85#include <sys/lwp.h>
86#include <sys/asan.h>
87
88#include <uvm/uvm.h>
89#include <uvm/uvm_pdpolicy.h>
90#include <uvm/uvm_pgflcache.h>
91
92/*
93 * uvm_kernacc: test if kernel can access a memory region.
94 *
95 * => Currently used only by /dev/kmem driver (dev/mm.c).
96 */
97bool
98uvm_kernacc(void *addr, size_t len, vm_prot_t prot)
99{
100	vaddr_t saddr = trunc_page((vaddr_t)addr);
101	vaddr_t eaddr = round_page(saddr + len);
102	bool rv;
103
104	vm_map_lock_read(kernel_map);
105	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
106	vm_map_unlock_read(kernel_map);
107
108	return rv;
109}
110
111#ifdef KGDB
112/*
113 * Change protections on kernel pages from addr to addr+len
114 * (presumably so debugger can plant a breakpoint).
115 *
116 * We force the protection change at the pmap level.  If we were
117 * to use vm_map_protect a change to allow writing would be lazily-
118 * applied meaning we would still take a protection fault, something
119 * we really don't want to do.  It would also fragment the kernel
120 * map unnecessarily.  We cannot use pmap_protect since it also won't
121 * enforce a write-enable request.  Using pmap_enter is the only way
122 * we can ensure the change takes place properly.
123 */
124void
125uvm_chgkprot(void *addr, size_t len, int rw)
126{
127	vm_prot_t prot;
128	paddr_t pa;
129	vaddr_t sva, eva;
130
131	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
132	eva = round_page((vaddr_t)addr + len);
133	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
134		/*
135		 * Extract physical address for the page.
136		 */
137		if (pmap_extract(pmap_kernel(), sva, &pa) == false)
138			panic("%s: invalid page", __func__);
139		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
140	}
141	pmap_update(pmap_kernel());
142}
143#endif
144
145/*
146 * uvm_vslock: wire user memory for I/O
147 *
148 * - called from physio and sys___sysctl
149 * - XXXCDC: consider nuking this (or making it a macro?)
150 */
151
152int
153uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
154{
155	struct vm_map *map;
156	vaddr_t start, end;
157	int error;
158
159	map = &vs->vm_map;
160	start = trunc_page((vaddr_t)addr);
161	end = round_page((vaddr_t)addr + len);
162	error = uvm_fault_wire(map, start, end, access_type, 0);
163	return error;
164}
165
166/*
167 * uvm_vsunlock: unwire user memory wired by uvm_vslock()
168 *
169 * - called from physio and sys___sysctl
170 * - XXXCDC: consider nuking this (or making it a macro?)
171 */
172
173void
174uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
175{
176	uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
177		round_page((vaddr_t)addr + len));
178}
179
180/*
181 * uvm_proc_fork: fork a virtual address space
182 *
183 * - the address space is copied as per parent map's inherit values
184 */
185void
186uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
187{
188
189	if (shared == true) {
190		p2->p_vmspace = NULL;
191		uvmspace_share(p1, p2);
192	} else {
193		p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
194	}
195
196	cpu_proc_fork(p1, p2);
197}
198
199/*
200 * uvm_lwp_fork: fork a thread
201 *
202 * - a new PCB structure is allocated for the child process,
203 *	and filled in by MD layer
204 * - if specified, the child gets a new user stack described by
205 *	stack and stacksize
206 * - NOTE: the kernel stack may be at a different location in the child
207 *	process, and thus addresses of automatic variables may be invalid
208 *	after cpu_lwp_fork returns in the child process.  We do nothing here
209 *	after cpu_lwp_fork returns.
210 */
211void
212uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
213    void (*func)(void *), void *arg)
214{
215
216	/* Fill stack with magic number. */
217	kstack_setup_magic(l2);
218
219	/*
220	 * cpu_lwp_fork() copy and update the pcb, and make the child ready
221 	 * to run.  If this is a normal user fork, the child will exit
222	 * directly to user mode via child_return() on its first time
223	 * slice and will not return here.  If this is a kernel thread,
224	 * the specified entry point will be executed.
225	 */
226	cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
227}
228
229#ifndef USPACE_ALIGN
230#define	USPACE_ALIGN	0
231#endif
232
233static pool_cache_t uvm_uarea_cache;
234#if defined(__HAVE_CPU_UAREA_ROUTINES)
235static pool_cache_t uvm_uarea_system_cache;
236#else
237#define uvm_uarea_system_cache uvm_uarea_cache
238#endif
239
240static void *
241uarea_poolpage_alloc(struct pool *pp, int flags)
242{
243
244	KASSERT((flags & PR_WAITOK) != 0);
245
246#if defined(PMAP_MAP_POOLPAGE)
247	while (USPACE == PAGE_SIZE &&
248	    (USPACE_ALIGN == 0 || USPACE_ALIGN == PAGE_SIZE)) {
249		struct vm_page *pg;
250		vaddr_t va;
251#if defined(PMAP_ALLOC_POOLPAGE)
252		pg = PMAP_ALLOC_POOLPAGE(0);
253#else
254		pg = uvm_pagealloc(NULL, 0, NULL, 0);
255#endif
256		if (pg == NULL) {
257			uvm_wait("uarea");
258			continue;
259		}
260		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
261		KASSERT(va != 0);
262		return (void *)va;
263	}
264#endif
265#if defined(__HAVE_CPU_UAREA_ROUTINES)
266	void *va = cpu_uarea_alloc(false);
267	if (va)
268		return (void *)va;
269#endif
270	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
271	    USPACE_ALIGN, UVM_KMF_WIRED | UVM_KMF_WAITVA);
272}
273
274static void
275uarea_poolpage_free(struct pool *pp, void *addr)
276{
277#if defined(PMAP_MAP_POOLPAGE)
278	if (USPACE == PAGE_SIZE &&
279	    (USPACE_ALIGN == 0 || USPACE_ALIGN == PAGE_SIZE)) {
280		paddr_t pa;
281
282		pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
283		KASSERT(pa != 0);
284		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
285		return;
286	}
287#endif
288#if defined(__HAVE_CPU_UAREA_ROUTINES)
289	if (cpu_uarea_free(addr))
290		return;
291#endif
292	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
293	    UVM_KMF_WIRED);
294}
295
296static struct pool_allocator uvm_uarea_allocator = {
297	.pa_alloc = uarea_poolpage_alloc,
298	.pa_free = uarea_poolpage_free,
299	.pa_pagesz = USPACE,
300};
301
302#if defined(__HAVE_CPU_UAREA_ROUTINES)
303static void *
304uarea_system_poolpage_alloc(struct pool *pp, int flags)
305{
306	void * const va = cpu_uarea_alloc(true);
307	if (va != NULL)
308		return va;
309
310	return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
311	    USPACE_ALIGN, UVM_KMF_WIRED |
312	    ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
313	    (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
314}
315
316static void
317uarea_system_poolpage_free(struct pool *pp, void *addr)
318{
319	if (cpu_uarea_free(addr))
320		return;
321
322	uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
323	    UVM_KMF_WIRED);
324}
325
326static struct pool_allocator uvm_uarea_system_allocator = {
327	.pa_alloc = uarea_system_poolpage_alloc,
328	.pa_free = uarea_system_poolpage_free,
329	.pa_pagesz = USPACE,
330};
331#endif /* __HAVE_CPU_UAREA_ROUTINES */
332
333void
334uvm_uarea_init(void)
335{
336	int flags = PR_NOTOUCH;
337
338	/*
339	 * specify PR_NOALIGN unless the alignment provided by
340	 * the backend (USPACE_ALIGN) is sufficient to provide
341	 * pool page size (UPSACE) alignment.
342	 */
343
344	if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
345	    (USPACE_ALIGN % USPACE) != 0) {
346		flags |= PR_NOALIGN;
347	}
348
349	uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
350	    "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
351#if defined(__HAVE_CPU_UAREA_ROUTINES)
352	uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN,
353	    0, flags, "uareasys", &uvm_uarea_system_allocator,
354	    IPL_NONE, NULL, NULL, NULL);
355#endif
356}
357
358/*
359 * uvm_uarea_alloc: allocate a u-area
360 */
361
362vaddr_t
363uvm_uarea_alloc(void)
364{
365
366	return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
367}
368
369vaddr_t
370uvm_uarea_system_alloc(struct cpu_info *ci)
371{
372#ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
373	if (__predict_false(ci != NULL))
374		return cpu_uarea_alloc_idlelwp(ci);
375#endif
376
377	return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK);
378}
379
380/*
381 * uvm_uarea_free: free a u-area
382 */
383
384void
385uvm_uarea_free(vaddr_t uaddr)
386{
387
388	kasan_mark((void *)uaddr, USPACE, USPACE, 0);
389	pool_cache_put(uvm_uarea_cache, (void *)uaddr);
390}
391
392void
393uvm_uarea_system_free(vaddr_t uaddr)
394{
395
396	kasan_mark((void *)uaddr, USPACE, USPACE, 0);
397	pool_cache_put(uvm_uarea_system_cache, (void *)uaddr);
398}
399
400vaddr_t
401uvm_lwp_getuarea(lwp_t *l)
402{
403
404	return (vaddr_t)l->l_addr - UAREA_PCB_OFFSET;
405}
406
407void
408uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
409{
410
411	l->l_addr = (void *)(addr + UAREA_PCB_OFFSET);
412}
413
414/*
415 * uvm_proc_exit: exit a virtual address space
416 *
417 * - borrow proc0's address space because freeing the vmspace
418 *   of the dead process may block.
419 */
420
421void
422uvm_proc_exit(struct proc *p)
423{
424	struct lwp *l = curlwp; /* XXX */
425	struct vmspace *ovm;
426
427	KASSERT(p == l->l_proc);
428	ovm = p->p_vmspace;
429	KASSERT(ovm != NULL);
430
431	if (__predict_false(ovm == proc0.p_vmspace))
432		return;
433
434	/*
435	 * borrow proc0's address space.
436	 */
437	kpreempt_disable();
438	pmap_deactivate(l);
439	p->p_vmspace = proc0.p_vmspace;
440	pmap_activate(l);
441	kpreempt_enable();
442
443	uvmspace_free(ovm);
444}
445
446void
447uvm_lwp_exit(struct lwp *l)
448{
449	vaddr_t va = uvm_lwp_getuarea(l);
450	bool system = (l->l_flag & LW_SYSTEM) != 0;
451
452	if (system)
453		uvm_uarea_system_free(va);
454	else
455		uvm_uarea_free(va);
456#ifdef DIAGNOSTIC
457	uvm_lwp_setuarea(l, (vaddr_t)NULL);
458#endif
459}
460
461/*
462 * uvm_init_limit: init per-process VM limits
463 *
464 * - called for process 0 and then inherited by all others.
465 */
466
467void
468uvm_init_limits(struct proc *p)
469{
470
471	/*
472	 * Set up the initial limits on process VM.  Set the maximum
473	 * resident set size to be all of (reasonably) available memory.
474	 * This causes any single, large process to start random page
475	 * replacement once it fills memory.
476	 */
477
478	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
479	p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
480	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
481	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
482	p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
483	p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
484	p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(VM_MAXUSER_ADDRESS,
485	    ctob((rlim_t)uvm_availmem()));
486}
487
488/*
489 * uvm_scheduler: process zero main loop.
490 */
491
492extern struct loadavg averunnable;
493
494void
495uvm_scheduler(void)
496{
497	lwp_t *l = curlwp;
498
499	lwp_lock(l);
500	l->l_class = SCHED_FIFO;
501	lwp_changepri(l, PRI_VM);
502	lwp_unlock(l);
503
504	/* Start the freelist cache. */
505	uvm_pgflcache_start();
506
507	for (;;) {
508		/* Update legacy stats for post-mortem debugging. */
509		uvm_update_uvmexp();
510
511		/* See if the pagedaemon needs to generate some free pages. */
512		uvm_kick_pdaemon();
513
514		/* Calculate process statistics. */
515		sched_pstats();
516		(void)kpause("uvm", false, hz, NULL);
517	}
518}
519
520/*
521 * uvm_idle: called from the idle loop.
522 */
523
524void
525uvm_idle(void)
526{
527	struct cpu_info *ci = curcpu();
528	struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
529
530	KASSERT(kpreempt_disabled());
531
532	if (!ci->ci_want_resched)
533		uvmpdpol_idle(ucpu);
534	if (!ci->ci_want_resched)
535		uvm_pageidlezero();
536
537}
538