17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
507b65a64Saguzovsk * Common Development and Distribution License (the "License").
607b65a64Saguzovsk * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
21783f4f5eSRoger A. Faulkner
22284ce987SPatrick Mooney /*
23284ce987SPatrick Mooney * Copyright 2013 OmniTI Computer Consulting, Inc. All rights reserved.
24284ce987SPatrick Mooney * Copyright 2017 Joyent, Inc.
25284ce987SPatrick Mooney */
261b3b16f3STheo Schlossnagle
277c478bd9Sstevel@tonic-gate /*
28783f4f5eSRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
297c478bd9Sstevel@tonic-gate * Use is subject to license terms.
307c478bd9Sstevel@tonic-gate */
317c478bd9Sstevel@tonic-gate
327c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
33*01355ae8SRichard Lowe /* All Rights Reserved */
347c478bd9Sstevel@tonic-gate
357c478bd9Sstevel@tonic-gate #include <sys/types.h>
367c478bd9Sstevel@tonic-gate #include <sys/inttypes.h>
377c478bd9Sstevel@tonic-gate #include <sys/param.h>
387c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
397c478bd9Sstevel@tonic-gate #include <sys/systm.h>
407c478bd9Sstevel@tonic-gate #include <sys/signal.h>
417c478bd9Sstevel@tonic-gate #include <sys/user.h>
427c478bd9Sstevel@tonic-gate #include <sys/errno.h>
437c478bd9Sstevel@tonic-gate #include <sys/var.h>
447c478bd9Sstevel@tonic-gate #include <sys/proc.h>
457c478bd9Sstevel@tonic-gate #include <sys/tuneable.h>
467c478bd9Sstevel@tonic-gate #include <sys/debug.h>
477c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
487c478bd9Sstevel@tonic-gate #include <sys/cred.h>
497c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
507c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
517c478bd9Sstevel@tonic-gate #include <sys/vm.h>
527c478bd9Sstevel@tonic-gate #include <sys/file.h>
537c478bd9Sstevel@tonic-gate #include <sys/mman.h>
547c478bd9Sstevel@tonic-gate #include <sys/vmparam.h>
557c478bd9Sstevel@tonic-gate #include <sys/fcntl.h>
567c478bd9Sstevel@tonic-gate #include <sys/lwpchan_impl.h>
57da6c28aaSamw #include <sys/nbmlock.h>
587c478bd9Sstevel@tonic-gate
597c478bd9Sstevel@tonic-gate #include <vm/hat.h>
607c478bd9Sstevel@tonic-gate #include <vm/as.h>
617c478bd9Sstevel@tonic-gate #include <vm/seg.h>
627c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h>
637c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
647c478bd9Sstevel@tonic-gate
657c478bd9Sstevel@tonic-gate int use_brk_lpg = 1;
667c478bd9Sstevel@tonic-gate int use_stk_lpg = 1;
677c478bd9Sstevel@tonic-gate
68d2a70789SRichard Lowe /*
69d2a70789SRichard Lowe * If set, we will not randomize mappings where the 'addr' argument is
70d2a70789SRichard Lowe * non-NULL and not an alignment.
71d2a70789SRichard Lowe */
72d2a70789SRichard Lowe int aslr_respect_mmap_hint = 1;
73d2a70789SRichard Lowe
747c478bd9Sstevel@tonic-gate static int brk_lpg(caddr_t nva);
757c478bd9Sstevel@tonic-gate static int grow_lpg(caddr_t sp);
767c478bd9Sstevel@tonic-gate
77d2a70789SRichard Lowe intptr_t
brk(caddr_t nva)787c478bd9Sstevel@tonic-gate brk(caddr_t nva)
797c478bd9Sstevel@tonic-gate {
807c478bd9Sstevel@tonic-gate int error;
817c478bd9Sstevel@tonic-gate proc_t *p = curproc;
827c478bd9Sstevel@tonic-gate
837c478bd9Sstevel@tonic-gate /*
847c478bd9Sstevel@tonic-gate * Serialize brk operations on an address space.
857c478bd9Sstevel@tonic-gate * This also serves as the lock protecting p_brksize
867c478bd9Sstevel@tonic-gate * and p_brkpageszc.
877c478bd9Sstevel@tonic-gate */
887c478bd9Sstevel@tonic-gate as_rangelock(p->p_as);
89d2a70789SRichard Lowe
90d2a70789SRichard Lowe /*
91d2a70789SRichard Lowe * As a special case to aid the implementation of sbrk(3C), if given a
92d2a70789SRichard Lowe * new brk of 0, return the current brk. We'll hide this in brk(3C).
93d2a70789SRichard Lowe */
94d2a70789SRichard Lowe if (nva == 0) {
95d2a70789SRichard Lowe intptr_t base = (intptr_t)(p->p_brkbase + p->p_brksize);
96d2a70789SRichard Lowe as_rangeunlock(p->p_as);
97d2a70789SRichard Lowe return (base);
98d2a70789SRichard Lowe }
99d2a70789SRichard Lowe
1007c478bd9Sstevel@tonic-gate if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
1017c478bd9Sstevel@tonic-gate error = brk_lpg(nva);
1027c478bd9Sstevel@tonic-gate } else {
1037c478bd9Sstevel@tonic-gate error = brk_internal(nva, p->p_brkpageszc);
1047c478bd9Sstevel@tonic-gate }
1057c478bd9Sstevel@tonic-gate as_rangeunlock(p->p_as);
1067c478bd9Sstevel@tonic-gate return ((error != 0 ? set_errno(error) : 0));
1077c478bd9Sstevel@tonic-gate }
1087c478bd9Sstevel@tonic-gate
1097c478bd9Sstevel@tonic-gate /*
1107c478bd9Sstevel@tonic-gate * Algorithm: call arch-specific map_pgsz to get best page size to use,
1117c478bd9Sstevel@tonic-gate * then call brk_internal().
1127c478bd9Sstevel@tonic-gate * Returns 0 on success.
1137c478bd9Sstevel@tonic-gate */
1147c478bd9Sstevel@tonic-gate static int
brk_lpg(caddr_t nva)1157c478bd9Sstevel@tonic-gate brk_lpg(caddr_t nva)
1167c478bd9Sstevel@tonic-gate {
1177c478bd9Sstevel@tonic-gate struct proc *p = curproc;
1187c478bd9Sstevel@tonic-gate size_t pgsz, len;
119ec25b48fSsusans caddr_t addr, brkend;
1207c478bd9Sstevel@tonic-gate caddr_t bssbase = p->p_bssbase;
1217c478bd9Sstevel@tonic-gate caddr_t brkbase = p->p_brkbase;
1227c478bd9Sstevel@tonic-gate int oszc, szc;
1237c478bd9Sstevel@tonic-gate int err;
1247c478bd9Sstevel@tonic-gate
1257c478bd9Sstevel@tonic-gate oszc = p->p_brkpageszc;
1267c478bd9Sstevel@tonic-gate
1277c478bd9Sstevel@tonic-gate /*
1287c478bd9Sstevel@tonic-gate * If p_brkbase has not yet been set, the first call
1297c478bd9Sstevel@tonic-gate * to brk_internal() will initialize it.
1307c478bd9Sstevel@tonic-gate */
1317c478bd9Sstevel@tonic-gate if (brkbase == 0) {
1327c478bd9Sstevel@tonic-gate return (brk_internal(nva, oszc));
1337c478bd9Sstevel@tonic-gate }
1347c478bd9Sstevel@tonic-gate
1357c478bd9Sstevel@tonic-gate len = nva - bssbase;
1367c478bd9Sstevel@tonic-gate
137ec25b48fSsusans pgsz = map_pgsz(MAPPGSZ_HEAP, p, bssbase, len, 0);
1387c478bd9Sstevel@tonic-gate szc = page_szc(pgsz);
1397c478bd9Sstevel@tonic-gate
1407c478bd9Sstevel@tonic-gate /*
1417c478bd9Sstevel@tonic-gate * Covers two cases:
1427c478bd9Sstevel@tonic-gate * 1. page_szc() returns -1 for invalid page size, so we want to
1437c478bd9Sstevel@tonic-gate * ignore it in that case.
1447c478bd9Sstevel@tonic-gate * 2. By design we never decrease page size, as it is more stable.
1457c478bd9Sstevel@tonic-gate */
1467c478bd9Sstevel@tonic-gate if (szc <= oszc) {
1477c478bd9Sstevel@tonic-gate err = brk_internal(nva, oszc);
1487c478bd9Sstevel@tonic-gate /* If failed, back off to base page size. */
1497c478bd9Sstevel@tonic-gate if (err != 0 && oszc != 0) {
1507c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0);
1517c478bd9Sstevel@tonic-gate }
1527c478bd9Sstevel@tonic-gate return (err);
1537c478bd9Sstevel@tonic-gate }
1547c478bd9Sstevel@tonic-gate
1557c478bd9Sstevel@tonic-gate err = brk_internal(nva, szc);
1567c478bd9Sstevel@tonic-gate /* If using szc failed, map with base page size and return. */
1577c478bd9Sstevel@tonic-gate if (err != 0) {
1587c478bd9Sstevel@tonic-gate if (szc != 0) {
1597c478bd9Sstevel@tonic-gate err = brk_internal(nva, 0);
1607c478bd9Sstevel@tonic-gate }
1617c478bd9Sstevel@tonic-gate return (err);
1627c478bd9Sstevel@tonic-gate }
1637c478bd9Sstevel@tonic-gate
164ec25b48fSsusans /*
165ec25b48fSsusans * Round up brk base to a large page boundary and remap
166ec25b48fSsusans * anything in the segment already faulted in beyond that
167ec25b48fSsusans * point.
168ec25b48fSsusans */
169ec25b48fSsusans addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz);
170ec25b48fSsusans brkend = brkbase + p->p_brksize;
171ec25b48fSsusans len = brkend - addr;
172ec25b48fSsusans /* Check that len is not negative. Update page size code for heap. */
173ec25b48fSsusans if (addr >= p->p_bssbase && brkend > addr && IS_P2ALIGNED(len, pgsz)) {
1747c478bd9Sstevel@tonic-gate (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
175ec25b48fSsusans p->p_brkpageszc = szc;
1767c478bd9Sstevel@tonic-gate }
1777c478bd9Sstevel@tonic-gate
1787c478bd9Sstevel@tonic-gate ASSERT(err == 0);
1797c478bd9Sstevel@tonic-gate return (err); /* should always be 0 */
1807c478bd9Sstevel@tonic-gate }
1817c478bd9Sstevel@tonic-gate
1827c478bd9Sstevel@tonic-gate /*
1837c478bd9Sstevel@tonic-gate * Returns 0 on success.
1847c478bd9Sstevel@tonic-gate */
1857c478bd9Sstevel@tonic-gate int
brk_internal(caddr_t nva,uint_t brkszc)1867c478bd9Sstevel@tonic-gate brk_internal(caddr_t nva, uint_t brkszc)
1877c478bd9Sstevel@tonic-gate {
1887c478bd9Sstevel@tonic-gate caddr_t ova; /* current break address */
1897c478bd9Sstevel@tonic-gate size_t size;
1907c478bd9Sstevel@tonic-gate int error;
1917c478bd9Sstevel@tonic-gate struct proc *p = curproc;
1927c478bd9Sstevel@tonic-gate struct as *as = p->p_as;
1937c478bd9Sstevel@tonic-gate size_t pgsz;
1947c478bd9Sstevel@tonic-gate uint_t szc;
1957c478bd9Sstevel@tonic-gate rctl_qty_t as_rctl;
1967c478bd9Sstevel@tonic-gate
1977c478bd9Sstevel@tonic-gate /*
1987c478bd9Sstevel@tonic-gate * extend heap to brkszc alignment but use current p->p_brkpageszc
1997c478bd9Sstevel@tonic-gate * for the newly created segment. This allows the new extension
2007c478bd9Sstevel@tonic-gate * segment to be concatenated successfully with the existing brk
2017c478bd9Sstevel@tonic-gate * segment.
2027c478bd9Sstevel@tonic-gate */
2037c478bd9Sstevel@tonic-gate if ((szc = brkszc) != 0) {
2047c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc);
2057c478bd9Sstevel@tonic-gate ASSERT(pgsz > PAGESIZE);
2067c478bd9Sstevel@tonic-gate } else {
2077c478bd9Sstevel@tonic-gate pgsz = PAGESIZE;
2087c478bd9Sstevel@tonic-gate }
2097c478bd9Sstevel@tonic-gate
2107c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock);
2117c478bd9Sstevel@tonic-gate as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA],
2127c478bd9Sstevel@tonic-gate p->p_rctls, p);
2137c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock);
2147c478bd9Sstevel@tonic-gate
2157c478bd9Sstevel@tonic-gate /*
2167c478bd9Sstevel@tonic-gate * If p_brkbase has not yet been set, the first call
2177c478bd9Sstevel@tonic-gate * to brk() will initialize it.
2187c478bd9Sstevel@tonic-gate */
2197c478bd9Sstevel@tonic-gate if (p->p_brkbase == 0)
2207c478bd9Sstevel@tonic-gate p->p_brkbase = nva;
2217c478bd9Sstevel@tonic-gate
2227c478bd9Sstevel@tonic-gate /*
2237c478bd9Sstevel@tonic-gate * Before multiple page size support existed p_brksize was the value
2247c478bd9Sstevel@tonic-gate * not rounded to the pagesize (i.e. it stored the exact user request
2257c478bd9Sstevel@tonic-gate * for heap size). If pgsz is greater than PAGESIZE calculate the
2267c478bd9Sstevel@tonic-gate * heap size as the real new heap size by rounding it up to pgsz.
2277c478bd9Sstevel@tonic-gate * This is useful since we may want to know where the heap ends
2287c478bd9Sstevel@tonic-gate * without knowing heap pagesize (e.g. some old code) and also if
2297c478bd9Sstevel@tonic-gate * heap pagesize changes we can update p_brkpageszc but delay adding
2307c478bd9Sstevel@tonic-gate * new mapping yet still know from p_brksize where the heap really
2317c478bd9Sstevel@tonic-gate * ends. The user requested heap end is stored in libc variable.
2327c478bd9Sstevel@tonic-gate */
2337c478bd9Sstevel@tonic-gate if (pgsz > PAGESIZE) {
2347c478bd9Sstevel@tonic-gate caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
2357c478bd9Sstevel@tonic-gate size = tnva - p->p_brkbase;
2367c478bd9Sstevel@tonic-gate if (tnva < p->p_brkbase || (size > p->p_brksize &&
2377c478bd9Sstevel@tonic-gate size > (size_t)as_rctl)) {
2387c478bd9Sstevel@tonic-gate szc = 0;
2397c478bd9Sstevel@tonic-gate pgsz = PAGESIZE;
2407c478bd9Sstevel@tonic-gate size = nva - p->p_brkbase;
2417c478bd9Sstevel@tonic-gate }
2427c478bd9Sstevel@tonic-gate } else {
2437c478bd9Sstevel@tonic-gate size = nva - p->p_brkbase;
2447c478bd9Sstevel@tonic-gate }
2457c478bd9Sstevel@tonic-gate
2467c478bd9Sstevel@tonic-gate /*
2477c478bd9Sstevel@tonic-gate * use PAGESIZE to roundup ova because we want to know the real value
2487c478bd9Sstevel@tonic-gate * of the current heap end in case p_brkpageszc changes since the last
2497c478bd9Sstevel@tonic-gate * p_brksize was computed.
2507c478bd9Sstevel@tonic-gate */
2517c478bd9Sstevel@tonic-gate nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
2527c478bd9Sstevel@tonic-gate ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize),
25360946fe0Smec PAGESIZE);
2547c478bd9Sstevel@tonic-gate
2557c478bd9Sstevel@tonic-gate if ((nva < p->p_brkbase) || (size > p->p_brksize &&
2567c478bd9Sstevel@tonic-gate size > as_rctl)) {
2577c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock);
2587c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p,
2597c478bd9Sstevel@tonic-gate RCA_SAFE);
2607c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock);
2617c478bd9Sstevel@tonic-gate return (ENOMEM);
2627c478bd9Sstevel@tonic-gate }
2637c478bd9Sstevel@tonic-gate
2647c478bd9Sstevel@tonic-gate if (nva > ova) {
2657c478bd9Sstevel@tonic-gate struct segvn_crargs crargs =
2667c478bd9Sstevel@tonic-gate SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
2677c478bd9Sstevel@tonic-gate
2687c478bd9Sstevel@tonic-gate if (!(p->p_datprot & PROT_EXEC)) {
2697c478bd9Sstevel@tonic-gate crargs.prot &= ~PROT_EXEC;
2707c478bd9Sstevel@tonic-gate }
2717c478bd9Sstevel@tonic-gate
2727c478bd9Sstevel@tonic-gate /*
2737c478bd9Sstevel@tonic-gate * Add new zfod mapping to extend UNIX data segment
274ec25b48fSsusans * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
275ec25b48fSsusans * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
276ec25b48fSsusans * page sizes if ova is not aligned to szc's pgsz.
2777c478bd9Sstevel@tonic-gate */
278ec25b48fSsusans if (szc > 0) {
279ec25b48fSsusans caddr_t rbss;
280ec25b48fSsusans
281ec25b48fSsusans rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase,
282ec25b48fSsusans pgsz);
283ec25b48fSsusans if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) {
284ec25b48fSsusans crargs.szc = p->p_brkpageszc ? p->p_brkpageszc :
285ec25b48fSsusans AS_MAP_NO_LPOOB;
286ec25b48fSsusans } else if (ova == rbss) {
287ec25b48fSsusans crargs.szc = szc;
288ec25b48fSsusans } else {
289ec25b48fSsusans crargs.szc = AS_MAP_HEAP;
290ec25b48fSsusans }
291ec25b48fSsusans } else {
292ec25b48fSsusans crargs.szc = AS_MAP_NO_LPOOB;
293ec25b48fSsusans }
2947c478bd9Sstevel@tonic-gate crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP;
2957c478bd9Sstevel@tonic-gate error = as_map(as, ova, (size_t)(nva - ova), segvn_create,
2967c478bd9Sstevel@tonic-gate &crargs);
2977c478bd9Sstevel@tonic-gate if (error) {
2987c478bd9Sstevel@tonic-gate return (error);
2997c478bd9Sstevel@tonic-gate }
3007c478bd9Sstevel@tonic-gate
3017c478bd9Sstevel@tonic-gate } else if (nva < ova) {
3027c478bd9Sstevel@tonic-gate /*
3037c478bd9Sstevel@tonic-gate * Release mapping to shrink UNIX data segment.
3047c478bd9Sstevel@tonic-gate */
3057c478bd9Sstevel@tonic-gate (void) as_unmap(as, nva, (size_t)(ova - nva));
3067c478bd9Sstevel@tonic-gate }
3077c478bd9Sstevel@tonic-gate p->p_brksize = size;
3087c478bd9Sstevel@tonic-gate return (0);
3097c478bd9Sstevel@tonic-gate }
3107c478bd9Sstevel@tonic-gate
3117c478bd9Sstevel@tonic-gate /*
3127c478bd9Sstevel@tonic-gate * Grow the stack to include sp. Return 1 if successful, 0 otherwise.
3137c478bd9Sstevel@tonic-gate * This routine assumes that the stack grows downward.
3147c478bd9Sstevel@tonic-gate */
3157c478bd9Sstevel@tonic-gate int
grow(caddr_t sp)3167c478bd9Sstevel@tonic-gate grow(caddr_t sp)
3177c478bd9Sstevel@tonic-gate {
3187c478bd9Sstevel@tonic-gate struct proc *p = curproc;
319ec25b48fSsusans struct as *as = p->p_as;
320ec25b48fSsusans size_t oldsize = p->p_stksize;
321ec25b48fSsusans size_t newsize;
3227c478bd9Sstevel@tonic-gate int err;
3237c478bd9Sstevel@tonic-gate
3247c478bd9Sstevel@tonic-gate /*
3257c478bd9Sstevel@tonic-gate * Serialize grow operations on an address space.
3267c478bd9Sstevel@tonic-gate * This also serves as the lock protecting p_stksize
3277c478bd9Sstevel@tonic-gate * and p_stkpageszc.
3287c478bd9Sstevel@tonic-gate */
329ec25b48fSsusans as_rangelock(as);
3307c478bd9Sstevel@tonic-gate if (use_stk_lpg && (p->p_flag & SAUTOLPG) != 0) {
3317c478bd9Sstevel@tonic-gate err = grow_lpg(sp);
3327c478bd9Sstevel@tonic-gate } else {
3337c478bd9Sstevel@tonic-gate err = grow_internal(sp, p->p_stkpageszc);
3347c478bd9Sstevel@tonic-gate }
335284ce987SPatrick Mooney newsize = p->p_stksize;
336ec25b48fSsusans as_rangeunlock(as);
337ec25b48fSsusans
338284ce987SPatrick Mooney if (err == 0 && newsize > oldsize) {
339ec25b48fSsusans ASSERT(IS_P2ALIGNED(oldsize, PAGESIZE));
340ec25b48fSsusans ASSERT(IS_P2ALIGNED(newsize, PAGESIZE));
341ec25b48fSsusans /*
342ec25b48fSsusans * Set up translations so the process doesn't have to fault in
343ec25b48fSsusans * the stack pages we just gave it.
344ec25b48fSsusans */
345ec25b48fSsusans (void) as_fault(as->a_hat, as, p->p_usrstack - newsize,
346ec25b48fSsusans newsize - oldsize, F_INVAL, S_WRITE);
347ec25b48fSsusans }
3487c478bd9Sstevel@tonic-gate return ((err == 0 ? 1 : 0));
3497c478bd9Sstevel@tonic-gate }
3507c478bd9Sstevel@tonic-gate
3517c478bd9Sstevel@tonic-gate /*
3527c478bd9Sstevel@tonic-gate * Algorithm: call arch-specific map_pgsz to get best page size to use,
3537c478bd9Sstevel@tonic-gate * then call grow_internal().
3547c478bd9Sstevel@tonic-gate * Returns 0 on success.
3557c478bd9Sstevel@tonic-gate */
3567c478bd9Sstevel@tonic-gate static int
grow_lpg(caddr_t sp)3577c478bd9Sstevel@tonic-gate grow_lpg(caddr_t sp)
3587c478bd9Sstevel@tonic-gate {
3597c478bd9Sstevel@tonic-gate struct proc *p = curproc;
3607c478bd9Sstevel@tonic-gate size_t pgsz;
3617c478bd9Sstevel@tonic-gate size_t len, newsize;
362ec25b48fSsusans caddr_t addr, saddr;
363ec25b48fSsusans caddr_t growend;
3647c478bd9Sstevel@tonic-gate int oszc, szc;
3657c478bd9Sstevel@tonic-gate int err;
3667c478bd9Sstevel@tonic-gate
3677c478bd9Sstevel@tonic-gate newsize = p->p_usrstack - sp;
3687c478bd9Sstevel@tonic-gate
3697c478bd9Sstevel@tonic-gate oszc = p->p_stkpageszc;
370ec25b48fSsusans pgsz = map_pgsz(MAPPGSZ_STK, p, sp, newsize, 0);
3717c478bd9Sstevel@tonic-gate szc = page_szc(pgsz);
3727c478bd9Sstevel@tonic-gate
3737c478bd9Sstevel@tonic-gate /*
3747c478bd9Sstevel@tonic-gate * Covers two cases:
3757c478bd9Sstevel@tonic-gate * 1. page_szc() returns -1 for invalid page size, so we want to
3767c478bd9Sstevel@tonic-gate * ignore it in that case.
3777c478bd9Sstevel@tonic-gate * 2. By design we never decrease page size, as it is more stable.
3787c478bd9Sstevel@tonic-gate * This shouldn't happen as the stack never shrinks.
3797c478bd9Sstevel@tonic-gate */
3807c478bd9Sstevel@tonic-gate if (szc <= oszc) {
3817c478bd9Sstevel@tonic-gate err = grow_internal(sp, oszc);
3827c478bd9Sstevel@tonic-gate /* failed, fall back to base page size */
3837c478bd9Sstevel@tonic-gate if (err != 0 && oszc != 0) {
3847c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0);
3857c478bd9Sstevel@tonic-gate }
3867c478bd9Sstevel@tonic-gate return (err);
3877c478bd9Sstevel@tonic-gate }
3887c478bd9Sstevel@tonic-gate
3897c478bd9Sstevel@tonic-gate /*
3907c478bd9Sstevel@tonic-gate * We've grown sufficiently to switch to a new page size.
391ec25b48fSsusans * So we are going to remap the whole segment with the new page size.
3927c478bd9Sstevel@tonic-gate */
3937c478bd9Sstevel@tonic-gate err = grow_internal(sp, szc);
3947c478bd9Sstevel@tonic-gate /* The grow with szc failed, so fall back to base page size. */
3957c478bd9Sstevel@tonic-gate if (err != 0) {
3967c478bd9Sstevel@tonic-gate if (szc != 0) {
3977c478bd9Sstevel@tonic-gate err = grow_internal(sp, 0);
3987c478bd9Sstevel@tonic-gate }
3997c478bd9Sstevel@tonic-gate return (err);
4007c478bd9Sstevel@tonic-gate }
4017c478bd9Sstevel@tonic-gate
402ec25b48fSsusans /*
403ec25b48fSsusans * Round up stack pointer to a large page boundary and remap
404ec25b48fSsusans * any pgsz pages in the segment already faulted in beyond that
405ec25b48fSsusans * point.
406ec25b48fSsusans */
407ec25b48fSsusans saddr = p->p_usrstack - p->p_stksize;
408ec25b48fSsusans addr = (caddr_t)P2ROUNDUP((uintptr_t)saddr, pgsz);
409ec25b48fSsusans growend = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack, pgsz);
410ec25b48fSsusans len = growend - addr;
411ec25b48fSsusans /* Check that len is not negative. Update page size code for stack. */
412ec25b48fSsusans if (addr >= saddr && growend > addr && IS_P2ALIGNED(len, pgsz)) {
4137c478bd9Sstevel@tonic-gate (void) as_setpagesize(p->p_as, addr, len, szc, B_FALSE);
414ec25b48fSsusans p->p_stkpageszc = szc;
4157c478bd9Sstevel@tonic-gate }
4167c478bd9Sstevel@tonic-gate
4177c478bd9Sstevel@tonic-gate ASSERT(err == 0);
4187c478bd9Sstevel@tonic-gate return (err); /* should always be 0 */
4197c478bd9Sstevel@tonic-gate }
4207c478bd9Sstevel@tonic-gate
4217c478bd9Sstevel@tonic-gate /*
4227c478bd9Sstevel@tonic-gate * This routine assumes that the stack grows downward.
4237c478bd9Sstevel@tonic-gate * Returns 0 on success, errno on failure.
4247c478bd9Sstevel@tonic-gate */
4257c478bd9Sstevel@tonic-gate int
grow_internal(caddr_t sp,uint_t growszc)4267c478bd9Sstevel@tonic-gate grow_internal(caddr_t sp, uint_t growszc)
4277c478bd9Sstevel@tonic-gate {
4287c478bd9Sstevel@tonic-gate struct proc *p = curproc;
429ec25b48fSsusans size_t newsize;
4307c478bd9Sstevel@tonic-gate size_t oldsize;
431284ce987SPatrick Mooney uintptr_t new_start;
4327c478bd9Sstevel@tonic-gate int error;
4337c478bd9Sstevel@tonic-gate size_t pgsz;
4347c478bd9Sstevel@tonic-gate uint_t szc;
4357c478bd9Sstevel@tonic-gate struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
4367c478bd9Sstevel@tonic-gate
4377c478bd9Sstevel@tonic-gate ASSERT(sp < p->p_usrstack);
438ec25b48fSsusans sp = (caddr_t)P2ALIGN((uintptr_t)sp, PAGESIZE);
4397c478bd9Sstevel@tonic-gate
4407c478bd9Sstevel@tonic-gate /*
4417c478bd9Sstevel@tonic-gate * grow to growszc alignment but use current p->p_stkpageszc for
4427c478bd9Sstevel@tonic-gate * the segvn_crargs szc passed to segvn_create. For memcntl to
4437c478bd9Sstevel@tonic-gate * increase the szc, this allows the new extension segment to be
4447c478bd9Sstevel@tonic-gate * concatenated successfully with the existing stack segment.
4457c478bd9Sstevel@tonic-gate */
4467c478bd9Sstevel@tonic-gate if ((szc = growszc) != 0) {
4477c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc);
4487c478bd9Sstevel@tonic-gate ASSERT(pgsz > PAGESIZE);
449ec25b48fSsusans newsize = p->p_usrstack - (caddr_t)P2ALIGN((uintptr_t)sp, pgsz);
4507c478bd9Sstevel@tonic-gate if (newsize > (size_t)p->p_stk_ctl) {
4517c478bd9Sstevel@tonic-gate szc = 0;
4527c478bd9Sstevel@tonic-gate pgsz = PAGESIZE;
4537c478bd9Sstevel@tonic-gate newsize = p->p_usrstack - sp;
4547c478bd9Sstevel@tonic-gate }
4557c478bd9Sstevel@tonic-gate } else {
4567c478bd9Sstevel@tonic-gate pgsz = PAGESIZE;
457ec25b48fSsusans newsize = p->p_usrstack - sp;
4587c478bd9Sstevel@tonic-gate }
4597c478bd9Sstevel@tonic-gate
4607c478bd9Sstevel@tonic-gate if (newsize > (size_t)p->p_stk_ctl) {
4617c478bd9Sstevel@tonic-gate (void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p,
4627c478bd9Sstevel@tonic-gate RCA_UNSAFE_ALL);
4637c478bd9Sstevel@tonic-gate
4647c478bd9Sstevel@tonic-gate return (ENOMEM);
4657c478bd9Sstevel@tonic-gate }
4667c478bd9Sstevel@tonic-gate
4677c478bd9Sstevel@tonic-gate oldsize = p->p_stksize;
4687c478bd9Sstevel@tonic-gate ASSERT(P2PHASE(oldsize, PAGESIZE) == 0);
4697c478bd9Sstevel@tonic-gate
4707c478bd9Sstevel@tonic-gate if (newsize <= oldsize) { /* prevent the stack from shrinking */
4717c478bd9Sstevel@tonic-gate return (0);
4727c478bd9Sstevel@tonic-gate }
4737c478bd9Sstevel@tonic-gate
4747c478bd9Sstevel@tonic-gate if (!(p->p_stkprot & PROT_EXEC)) {
4757c478bd9Sstevel@tonic-gate crargs.prot &= ~PROT_EXEC;
4767c478bd9Sstevel@tonic-gate }
4777c478bd9Sstevel@tonic-gate /*
478ec25b48fSsusans * extend stack with the proposed new growszc, which is different
479ec25b48fSsusans * than p_stkpageszc only on a memcntl to increase the stack pagesize.
480ec25b48fSsusans * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
481ec25b48fSsusans * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
482ec25b48fSsusans * if not aligned to szc's pgsz.
4837c478bd9Sstevel@tonic-gate */
484ec25b48fSsusans if (szc > 0) {
485ec25b48fSsusans caddr_t oldsp = p->p_usrstack - oldsize;
486ec25b48fSsusans caddr_t austk = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack,
487ec25b48fSsusans pgsz);
488ec25b48fSsusans
489ec25b48fSsusans if (IS_P2ALIGNED(p->p_usrstack, pgsz) || oldsp < austk) {
490ec25b48fSsusans crargs.szc = p->p_stkpageszc ? p->p_stkpageszc :
491ec25b48fSsusans AS_MAP_NO_LPOOB;
492ec25b48fSsusans } else if (oldsp == austk) {
493ec25b48fSsusans crargs.szc = szc;
494ec25b48fSsusans } else {
495ec25b48fSsusans crargs.szc = AS_MAP_STACK;
496ec25b48fSsusans }
497ec25b48fSsusans } else {
498ec25b48fSsusans crargs.szc = AS_MAP_NO_LPOOB;
499ec25b48fSsusans }
5007c478bd9Sstevel@tonic-gate crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
5017c478bd9Sstevel@tonic-gate
502284ce987SPatrick Mooney /*
503284ce987SPatrick Mooney * The stack is about to grow into its guard. This can be acceptable
504284ce987SPatrick Mooney * if the size restriction on the stack has been expanded since its
505284ce987SPatrick Mooney * initialization during exec(). In such cases, the guard segment will
506284ce987SPatrick Mooney * be shrunk, provided the new size is reasonable.
507284ce987SPatrick Mooney */
508284ce987SPatrick Mooney new_start = (uintptr_t)p->p_usrstack - newsize;
509284ce987SPatrick Mooney if (p->p_stkg_start != 0 && new_start > p->p_stkg_start &&
510284ce987SPatrick Mooney new_start < p->p_stkg_end) {
511284ce987SPatrick Mooney const size_t unmap_sz = p->p_stkg_end - new_start;
512284ce987SPatrick Mooney const size_t remain_sz = new_start - p->p_stkg_start;
513284ce987SPatrick Mooney extern size_t stack_guard_min_sz;
514284ce987SPatrick Mooney
515284ce987SPatrick Mooney /* Do not allow the guard to shrink below minimum size */
516284ce987SPatrick Mooney if (remain_sz < stack_guard_min_sz) {
517284ce987SPatrick Mooney return (ENOMEM);
518284ce987SPatrick Mooney }
519284ce987SPatrick Mooney
520284ce987SPatrick Mooney error = as_unmap(p->p_as, (caddr_t)new_start, unmap_sz);
521284ce987SPatrick Mooney if (error != 0) {
522284ce987SPatrick Mooney return (error);
523284ce987SPatrick Mooney }
524284ce987SPatrick Mooney p->p_stkg_end -= unmap_sz;
525284ce987SPatrick Mooney }
526284ce987SPatrick Mooney
527284ce987SPatrick Mooney if ((error = as_map(p->p_as, (caddr_t)new_start, newsize - oldsize,
5287c478bd9Sstevel@tonic-gate segvn_create, &crargs)) != 0) {
5297c478bd9Sstevel@tonic-gate if (error == EAGAIN) {
5307c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
531ae115bc7Smrj "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
5327c478bd9Sstevel@tonic-gate }
5337c478bd9Sstevel@tonic-gate return (error);
5347c478bd9Sstevel@tonic-gate }
5357c478bd9Sstevel@tonic-gate p->p_stksize = newsize;
5367c478bd9Sstevel@tonic-gate return (0);
5377c478bd9Sstevel@tonic-gate }
5387c478bd9Sstevel@tonic-gate
53960946fe0Smec /*
540d2a70789SRichard Lowe * Find address for user to map. If MAP_FIXED is not specified, we can pick
541d2a70789SRichard Lowe * any address we want, but we will first try the value in *addrp if it is
542d2a70789SRichard Lowe * non-NULL and _MAP_RANDOMIZE is not set. Thus this is implementing a way to
543d2a70789SRichard Lowe * try and get a preferred address.
54460946fe0Smec */
54560946fe0Smec int
choose_addr(struct as * as,caddr_t * addrp,size_t len,offset_t off,int vacalign,uint_t flags)54660946fe0Smec choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
54760946fe0Smec int vacalign, uint_t flags)
54860946fe0Smec {
54960946fe0Smec caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
55060946fe0Smec size_t lenp = len;
55160946fe0Smec
55260946fe0Smec ASSERT(AS_ISCLAIMGAP(as)); /* searches should be serialized */
55360946fe0Smec if (flags & MAP_FIXED) {
55460946fe0Smec (void) as_unmap(as, *addrp, len);
55560946fe0Smec return (0);
556d2a70789SRichard Lowe } else if (basep != NULL &&
557d2a70789SRichard Lowe ((flags & (MAP_ALIGN | _MAP_RANDOMIZE)) == 0) &&
55860946fe0Smec !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
55960946fe0Smec /* User supplied address was available */
56060946fe0Smec *addrp = basep;
56160946fe0Smec } else {
56260946fe0Smec /*
56360946fe0Smec * No user supplied address or the address supplied was not
56460946fe0Smec * available.
56560946fe0Smec */
56660946fe0Smec map_addr(addrp, len, off, vacalign, flags);
56760946fe0Smec }
56860946fe0Smec if (*addrp == NULL)
56960946fe0Smec return (ENOMEM);
57060946fe0Smec return (0);
57160946fe0Smec }
57260946fe0Smec
57360946fe0Smec
5747c478bd9Sstevel@tonic-gate /*
5757c478bd9Sstevel@tonic-gate * Used for MAP_ANON - fast way to get anonymous pages
5767c478bd9Sstevel@tonic-gate */
5777c478bd9Sstevel@tonic-gate static int
zmap(struct as * as,caddr_t * addrp,size_t len,uint_t uprot,int flags,offset_t pos)5787c478bd9Sstevel@tonic-gate zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags,
5797c478bd9Sstevel@tonic-gate offset_t pos)
5807c478bd9Sstevel@tonic-gate {
581ec25b48fSsusans struct segvn_crargs vn_a;
58260946fe0Smec int error;
5837c478bd9Sstevel@tonic-gate
5847c478bd9Sstevel@tonic-gate if (((PROT_ALL & uprot) != uprot))
5857c478bd9Sstevel@tonic-gate return (EACCES);
5867c478bd9Sstevel@tonic-gate
5877c478bd9Sstevel@tonic-gate if ((flags & MAP_FIXED) != 0) {
5887c478bd9Sstevel@tonic-gate caddr_t userlimit;
5897c478bd9Sstevel@tonic-gate
5907c478bd9Sstevel@tonic-gate /*
5917c478bd9Sstevel@tonic-gate * Use the user address. First verify that
5927c478bd9Sstevel@tonic-gate * the address to be used is page aligned.
5937c478bd9Sstevel@tonic-gate * Then make some simple bounds checks.
5947c478bd9Sstevel@tonic-gate */
5957c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
5967c478bd9Sstevel@tonic-gate return (EINVAL);
5977c478bd9Sstevel@tonic-gate
5987c478bd9Sstevel@tonic-gate userlimit = flags & _MAP_LOW32 ?
5997c478bd9Sstevel@tonic-gate (caddr_t)USERLIMIT32 : as->a_userlimit;
6007c478bd9Sstevel@tonic-gate switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) {
6017c478bd9Sstevel@tonic-gate case RANGE_OKAY:
6027c478bd9Sstevel@tonic-gate break;
6037c478bd9Sstevel@tonic-gate case RANGE_BADPROT:
6047c478bd9Sstevel@tonic-gate return (ENOTSUP);
6057c478bd9Sstevel@tonic-gate case RANGE_BADADDR:
6067c478bd9Sstevel@tonic-gate default:
6077c478bd9Sstevel@tonic-gate return (ENOMEM);
6087c478bd9Sstevel@tonic-gate }
60960946fe0Smec }
61060946fe0Smec /*
61160946fe0Smec * No need to worry about vac alignment for anonymous
61260946fe0Smec * pages since this is a "clone" object that doesn't
61360946fe0Smec * yet exist.
61460946fe0Smec */
61560946fe0Smec error = choose_addr(as, addrp, len, pos, ADDR_NOVACALIGN, flags);
61660946fe0Smec if (error != 0) {
61760946fe0Smec return (error);
6187c478bd9Sstevel@tonic-gate }
6197c478bd9Sstevel@tonic-gate
6207c478bd9Sstevel@tonic-gate /*
6217c478bd9Sstevel@tonic-gate * Use the seg_vn segment driver; passing in the NULL amp
6227c478bd9Sstevel@tonic-gate * gives the desired "cloning" effect.
6237c478bd9Sstevel@tonic-gate */
624ec25b48fSsusans vn_a.vp = NULL;
625ec25b48fSsusans vn_a.offset = 0;
626ec25b48fSsusans vn_a.type = flags & MAP_TYPE;
627ec25b48fSsusans vn_a.prot = uprot;
628ec25b48fSsusans vn_a.maxprot = PROT_ALL;
629ec25b48fSsusans vn_a.flags = flags & ~MAP_TYPE;
630ec25b48fSsusans vn_a.cred = CRED();
631ec25b48fSsusans vn_a.amp = NULL;
632ec25b48fSsusans vn_a.szc = 0;
633ec25b48fSsusans vn_a.lgrp_mem_policy_flags = 0;
634ec25b48fSsusans
635ec25b48fSsusans return (as_map(as, *addrp, len, segvn_create, &vn_a));
6367c478bd9Sstevel@tonic-gate }
6377c478bd9Sstevel@tonic-gate
638d2a70789SRichard Lowe #define RANDOMIZABLE_MAPPING(addr, flags) (((flags & MAP_FIXED) == 0) && \
639d2a70789SRichard Lowe !(((flags & MAP_ALIGN) == 0) && (addr != 0) && aslr_respect_mmap_hint))
640d2a70789SRichard Lowe
6417c478bd9Sstevel@tonic-gate static int
smmap_common(caddr_t * addrp,size_t len,int prot,int flags,struct file * fp,offset_t pos)6427c478bd9Sstevel@tonic-gate smmap_common(caddr_t *addrp, size_t len,
6437c478bd9Sstevel@tonic-gate int prot, int flags, struct file *fp, offset_t pos)
6447c478bd9Sstevel@tonic-gate {
6457c478bd9Sstevel@tonic-gate struct vnode *vp;
6467c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as;
6477c478bd9Sstevel@tonic-gate uint_t uprot, maxprot, type;
6487c478bd9Sstevel@tonic-gate int error;
649da6c28aaSamw int in_crit = 0;
6507c478bd9Sstevel@tonic-gate
6517c478bd9Sstevel@tonic-gate if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
6527c478bd9Sstevel@tonic-gate _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
6537c478bd9Sstevel@tonic-gate MAP_TEXT | MAP_INITDATA)) != 0) {
6547c478bd9Sstevel@tonic-gate /* | MAP_RENAME */ /* not implemented, let user know */
6557c478bd9Sstevel@tonic-gate return (EINVAL);
6567c478bd9Sstevel@tonic-gate }
6577c478bd9Sstevel@tonic-gate
6587c478bd9Sstevel@tonic-gate if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
6597c478bd9Sstevel@tonic-gate return (EINVAL);
6607c478bd9Sstevel@tonic-gate }
6617c478bd9Sstevel@tonic-gate
6627c478bd9Sstevel@tonic-gate if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
6637c478bd9Sstevel@tonic-gate return (EINVAL);
6647c478bd9Sstevel@tonic-gate }
6657c478bd9Sstevel@tonic-gate
666d2a70789SRichard Lowe if ((flags & (MAP_FIXED | _MAP_RANDOMIZE)) ==
667d2a70789SRichard Lowe (MAP_FIXED | _MAP_RANDOMIZE)) {
668d2a70789SRichard Lowe return (EINVAL);
669d2a70789SRichard Lowe }
670d2a70789SRichard Lowe
671d2a70789SRichard Lowe /*
672d2a70789SRichard Lowe * If it's not a fixed allocation and mmap ASLR is enabled, randomize
673d2a70789SRichard Lowe * it.
674d2a70789SRichard Lowe */
675d2a70789SRichard Lowe if (RANDOMIZABLE_MAPPING(*addrp, flags) &&
676d2a70789SRichard Lowe secflag_enabled(curproc, PROC_SEC_ASLR))
677d2a70789SRichard Lowe flags |= _MAP_RANDOMIZE;
678d2a70789SRichard Lowe
6797c478bd9Sstevel@tonic-gate #if defined(__sparc)
6807c478bd9Sstevel@tonic-gate /*
6817c478bd9Sstevel@tonic-gate * See if this is an "old mmap call". If so, remember this
6827c478bd9Sstevel@tonic-gate * fact and convert the flags value given to mmap to indicate
6837c478bd9Sstevel@tonic-gate * the specified address in the system call must be used.
6847c478bd9Sstevel@tonic-gate * _MAP_NEW is turned set by all new uses of mmap.
6857c478bd9Sstevel@tonic-gate */
6867c478bd9Sstevel@tonic-gate if ((flags & _MAP_NEW) == 0)
6877c478bd9Sstevel@tonic-gate flags |= MAP_FIXED;
6887c478bd9Sstevel@tonic-gate #endif
6897c478bd9Sstevel@tonic-gate flags &= ~_MAP_NEW;
6907c478bd9Sstevel@tonic-gate
6917c478bd9Sstevel@tonic-gate type = flags & MAP_TYPE;
6927c478bd9Sstevel@tonic-gate if (type != MAP_PRIVATE && type != MAP_SHARED)
6937c478bd9Sstevel@tonic-gate return (EINVAL);
6947c478bd9Sstevel@tonic-gate
6957c478bd9Sstevel@tonic-gate
6967c478bd9Sstevel@tonic-gate if (flags & MAP_ALIGN) {
6977c478bd9Sstevel@tonic-gate if (flags & MAP_FIXED)
6987c478bd9Sstevel@tonic-gate return (EINVAL);
6997c478bd9Sstevel@tonic-gate
7007c478bd9Sstevel@tonic-gate /* alignment needs to be a power of 2 >= page size */
7017c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
70260946fe0Smec !ISP2((uintptr_t)*addrp))
7037c478bd9Sstevel@tonic-gate return (EINVAL);
7047c478bd9Sstevel@tonic-gate }
7057c478bd9Sstevel@tonic-gate /*
7067c478bd9Sstevel@tonic-gate * Check for bad lengths and file position.
7077c478bd9Sstevel@tonic-gate * We let the VOP_MAP routine check for negative lengths
7087c478bd9Sstevel@tonic-gate * since on some vnode types this might be appropriate.
7097c478bd9Sstevel@tonic-gate */
7107c478bd9Sstevel@tonic-gate if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
7117c478bd9Sstevel@tonic-gate return (EINVAL);
7127c478bd9Sstevel@tonic-gate
7137c478bd9Sstevel@tonic-gate maxprot = PROT_ALL; /* start out allowing all accesses */
7147c478bd9Sstevel@tonic-gate uprot = prot | PROT_USER;
7157c478bd9Sstevel@tonic-gate
7167c478bd9Sstevel@tonic-gate if (fp == NULL) {
7177c478bd9Sstevel@tonic-gate ASSERT(flags & MAP_ANON);
718783f4f5eSRoger A. Faulkner /* discard lwpchan mappings, like munmap() */
719783f4f5eSRoger A. Faulkner if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
720783f4f5eSRoger A. Faulkner lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
7217c478bd9Sstevel@tonic-gate as_rangelock(as);
7227c478bd9Sstevel@tonic-gate error = zmap(as, addrp, len, uprot, flags, pos);
7237c478bd9Sstevel@tonic-gate as_rangeunlock(as);
7242c5124a1SPrashanth Sreenivasa /*
7252c5124a1SPrashanth Sreenivasa * Tell machine specific code that lwp has mapped shared memory
7262c5124a1SPrashanth Sreenivasa */
7272c5124a1SPrashanth Sreenivasa if (error == 0 && (flags & MAP_SHARED)) {
7282c5124a1SPrashanth Sreenivasa /* EMPTY */
7292c5124a1SPrashanth Sreenivasa LWP_MMODEL_SHARED_AS(*addrp, len);
7302c5124a1SPrashanth Sreenivasa }
7317c478bd9Sstevel@tonic-gate return (error);
7327c478bd9Sstevel@tonic-gate } else if ((flags & MAP_ANON) != 0)
7337c478bd9Sstevel@tonic-gate return (EINVAL);
7347c478bd9Sstevel@tonic-gate
7357c478bd9Sstevel@tonic-gate vp = fp->f_vnode;
7367c478bd9Sstevel@tonic-gate
7377c478bd9Sstevel@tonic-gate /* Can't execute code from "noexec" mounted filesystem. */
7387c478bd9Sstevel@tonic-gate if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0)
7397c478bd9Sstevel@tonic-gate maxprot &= ~PROT_EXEC;
7407c478bd9Sstevel@tonic-gate
7417c478bd9Sstevel@tonic-gate /*
7427c478bd9Sstevel@tonic-gate * These checks were added as part of large files.
7437c478bd9Sstevel@tonic-gate *
74449a63d68Speterte * Return ENXIO if the initial position is negative; return EOVERFLOW
7457c478bd9Sstevel@tonic-gate * if (offset + len) would overflow the maximum allowed offset for the
7467c478bd9Sstevel@tonic-gate * type of file descriptor being used.
7477c478bd9Sstevel@tonic-gate */
7487c478bd9Sstevel@tonic-gate if (vp->v_type == VREG) {
74949a63d68Speterte if (pos < 0)
75049a63d68Speterte return (ENXIO);
7517c478bd9Sstevel@tonic-gate if ((offset_t)len > (OFFSET_MAX(fp) - pos))
7527c478bd9Sstevel@tonic-gate return (EOVERFLOW);
7537c478bd9Sstevel@tonic-gate }
7547c478bd9Sstevel@tonic-gate
7557c478bd9Sstevel@tonic-gate if (type == MAP_SHARED && (fp->f_flag & FWRITE) == 0) {
7567c478bd9Sstevel@tonic-gate /* no write access allowed */
7577c478bd9Sstevel@tonic-gate maxprot &= ~PROT_WRITE;
7587c478bd9Sstevel@tonic-gate }
7597c478bd9Sstevel@tonic-gate
7607c478bd9Sstevel@tonic-gate /*
761*01355ae8SRichard Lowe * Verify that the specified protections are not greater than the
762*01355ae8SRichard Lowe * maximum allowable protections. Also test to make sure that the
763*01355ae8SRichard Lowe * file descriptor allows for read access since "write only" mappings
764*01355ae8SRichard Lowe * are hard to do since normally we do the read from the file before
765*01355ae8SRichard Lowe * the page can be written.
7667c478bd9Sstevel@tonic-gate */
7677c478bd9Sstevel@tonic-gate if (((maxprot & uprot) != uprot) || (fp->f_flag & FREAD) == 0)
7687c478bd9Sstevel@tonic-gate return (EACCES);
7697c478bd9Sstevel@tonic-gate
7707c478bd9Sstevel@tonic-gate /*
7717c478bd9Sstevel@tonic-gate * If the user specified an address, do some simple checks here
7727c478bd9Sstevel@tonic-gate */
7737c478bd9Sstevel@tonic-gate if ((flags & MAP_FIXED) != 0) {
7747c478bd9Sstevel@tonic-gate caddr_t userlimit;
7757c478bd9Sstevel@tonic-gate
7767c478bd9Sstevel@tonic-gate /*
7777c478bd9Sstevel@tonic-gate * Use the user address. First verify that
7787c478bd9Sstevel@tonic-gate * the address to be used is page aligned.
7797c478bd9Sstevel@tonic-gate * Then make some simple bounds checks.
7807c478bd9Sstevel@tonic-gate */
7817c478bd9Sstevel@tonic-gate if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
7827c478bd9Sstevel@tonic-gate return (EINVAL);
7837c478bd9Sstevel@tonic-gate
7847c478bd9Sstevel@tonic-gate userlimit = flags & _MAP_LOW32 ?
7857c478bd9Sstevel@tonic-gate (caddr_t)USERLIMIT32 : as->a_userlimit;
7867c478bd9Sstevel@tonic-gate switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) {
7877c478bd9Sstevel@tonic-gate case RANGE_OKAY:
7887c478bd9Sstevel@tonic-gate break;
7897c478bd9Sstevel@tonic-gate case RANGE_BADPROT:
7907c478bd9Sstevel@tonic-gate return (ENOTSUP);
7917c478bd9Sstevel@tonic-gate case RANGE_BADADDR:
7927c478bd9Sstevel@tonic-gate default:
7937c478bd9Sstevel@tonic-gate return (ENOMEM);
7947c478bd9Sstevel@tonic-gate }
7957c478bd9Sstevel@tonic-gate }
7967c478bd9Sstevel@tonic-gate
797da6c28aaSamw if ((prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) &&
798da6c28aaSamw nbl_need_check(vp)) {
799da6c28aaSamw int svmand;
800da6c28aaSamw nbl_op_t nop;
801da6c28aaSamw
802da6c28aaSamw nbl_start_crit(vp, RW_READER);
803da6c28aaSamw in_crit = 1;
804da6c28aaSamw error = nbl_svmand(vp, fp->f_cred, &svmand);
805da6c28aaSamw if (error != 0)
806da6c28aaSamw goto done;
807da6c28aaSamw if ((prot & PROT_WRITE) && (type == MAP_SHARED)) {
808da6c28aaSamw if (prot & (PROT_READ | PROT_EXEC)) {
809da6c28aaSamw nop = NBL_READWRITE;
810da6c28aaSamw } else {
811da6c28aaSamw nop = NBL_WRITE;
812da6c28aaSamw }
813da6c28aaSamw } else {
814da6c28aaSamw nop = NBL_READ;
815da6c28aaSamw }
816da6c28aaSamw if (nbl_conflict(vp, nop, 0, LONG_MAX, svmand, NULL)) {
817da6c28aaSamw error = EACCES;
818da6c28aaSamw goto done;
819da6c28aaSamw }
820da6c28aaSamw }
8217c478bd9Sstevel@tonic-gate
822783f4f5eSRoger A. Faulkner /* discard lwpchan mappings, like munmap() */
823783f4f5eSRoger A. Faulkner if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
824783f4f5eSRoger A. Faulkner lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
825783f4f5eSRoger A. Faulkner
8267c478bd9Sstevel@tonic-gate /*
8277c478bd9Sstevel@tonic-gate * Ok, now let the vnode map routine do its thing to set things up.
8287c478bd9Sstevel@tonic-gate */
8297c478bd9Sstevel@tonic-gate error = VOP_MAP(vp, pos, as,
830da6c28aaSamw addrp, len, uprot, maxprot, flags, fp->f_cred, NULL);
8317c478bd9Sstevel@tonic-gate
8327c478bd9Sstevel@tonic-gate if (error == 0) {
8332c5124a1SPrashanth Sreenivasa /*
8342c5124a1SPrashanth Sreenivasa * Tell machine specific code that lwp has mapped shared memory
8352c5124a1SPrashanth Sreenivasa */
8362c5124a1SPrashanth Sreenivasa if (flags & MAP_SHARED) {
8372c5124a1SPrashanth Sreenivasa /* EMPTY */
8382c5124a1SPrashanth Sreenivasa LWP_MMODEL_SHARED_AS(*addrp, len);
8392c5124a1SPrashanth Sreenivasa }
8407c478bd9Sstevel@tonic-gate if (vp->v_type == VREG &&
8417c478bd9Sstevel@tonic-gate (flags & (MAP_TEXT | MAP_INITDATA)) != 0) {
8427c478bd9Sstevel@tonic-gate /*
8437c478bd9Sstevel@tonic-gate * Mark this as an executable vnode
8447c478bd9Sstevel@tonic-gate */
8457c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock);
8467c478bd9Sstevel@tonic-gate vp->v_flag |= VVMEXEC;
8477c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
8487c478bd9Sstevel@tonic-gate }
8497c478bd9Sstevel@tonic-gate }
8507c478bd9Sstevel@tonic-gate
851da6c28aaSamw done:
852da6c28aaSamw if (in_crit)
853da6c28aaSamw nbl_end_crit(vp);
8547c478bd9Sstevel@tonic-gate return (error);
8557c478bd9Sstevel@tonic-gate }
8567c478bd9Sstevel@tonic-gate
8577c478bd9Sstevel@tonic-gate #ifdef _LP64
8587c478bd9Sstevel@tonic-gate /*
8597c478bd9Sstevel@tonic-gate * LP64 mmap(2) system call: 64-bit offset, 64-bit address.
8607c478bd9Sstevel@tonic-gate *
8617c478bd9Sstevel@tonic-gate * The "large file" mmap routine mmap64(2) is also mapped to this routine
8627c478bd9Sstevel@tonic-gate * by the 64-bit version of libc.
8637c478bd9Sstevel@tonic-gate *
8647c478bd9Sstevel@tonic-gate * Eventually, this should be the only version, and have smmap_common()
8657c478bd9Sstevel@tonic-gate * folded back into it again. Some day.
8667c478bd9Sstevel@tonic-gate */
8677c478bd9Sstevel@tonic-gate caddr_t
smmap64(caddr_t addr,size_t len,int prot,int flags,int fd,off_t pos)8687c478bd9Sstevel@tonic-gate smmap64(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos)
8697c478bd9Sstevel@tonic-gate {
8707c478bd9Sstevel@tonic-gate struct file *fp;
8717c478bd9Sstevel@tonic-gate int error;
8727c478bd9Sstevel@tonic-gate
8731b3b16f3STheo Schlossnagle if (fd == -1 && (flags & MAP_ANON) != 0)
8747c478bd9Sstevel@tonic-gate error = smmap_common(&addr, len, prot, flags,
8757c478bd9Sstevel@tonic-gate NULL, (offset_t)pos);
8767c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) {
8777c478bd9Sstevel@tonic-gate error = smmap_common(&addr, len, prot, flags,
8787c478bd9Sstevel@tonic-gate fp, (offset_t)pos);
8797c478bd9Sstevel@tonic-gate releasef(fd);
8807c478bd9Sstevel@tonic-gate } else
8817c478bd9Sstevel@tonic-gate error = EBADF;
8827c478bd9Sstevel@tonic-gate
8837c478bd9Sstevel@tonic-gate return (error ? (caddr_t)(uintptr_t)set_errno(error) : addr);
8847c478bd9Sstevel@tonic-gate }
8857c478bd9Sstevel@tonic-gate #endif /* _LP64 */
8867c478bd9Sstevel@tonic-gate
8877c478bd9Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) || defined(_ILP32)
8887c478bd9Sstevel@tonic-gate
8897c478bd9Sstevel@tonic-gate /*
8907c478bd9Sstevel@tonic-gate * ILP32 mmap(2) system call: 32-bit offset, 32-bit address.
8917c478bd9Sstevel@tonic-gate */
8927c478bd9Sstevel@tonic-gate caddr_t
smmap32(caddr32_t addr,size32_t len,int prot,int flags,int fd,off32_t pos)8937c478bd9Sstevel@tonic-gate smmap32(caddr32_t addr, size32_t len, int prot, int flags, int fd, off32_t pos)
8947c478bd9Sstevel@tonic-gate {
8957c478bd9Sstevel@tonic-gate struct file *fp;
8967c478bd9Sstevel@tonic-gate int error;
8977c478bd9Sstevel@tonic-gate caddr_t a = (caddr_t)(uintptr_t)addr;
8987c478bd9Sstevel@tonic-gate
8997c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32)
9007c478bd9Sstevel@tonic-gate error = EINVAL;
9017c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0)
9027c478bd9Sstevel@tonic-gate error = smmap_common(&a, (size_t)len, prot,
9037c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, NULL, (offset_t)pos);
9047c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) {
9057c478bd9Sstevel@tonic-gate error = smmap_common(&a, (size_t)len, prot,
9067c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, fp, (offset_t)pos);
9077c478bd9Sstevel@tonic-gate releasef(fd);
9087c478bd9Sstevel@tonic-gate } else
9097c478bd9Sstevel@tonic-gate error = EBADF;
9107c478bd9Sstevel@tonic-gate
9117c478bd9Sstevel@tonic-gate ASSERT(error != 0 || (uintptr_t)(a + len) < (uintptr_t)UINT32_MAX);
9127c478bd9Sstevel@tonic-gate
9137c478bd9Sstevel@tonic-gate return (error ? (caddr_t)(uintptr_t)set_errno(error) : a);
9147c478bd9Sstevel@tonic-gate }
9157c478bd9Sstevel@tonic-gate
9167c478bd9Sstevel@tonic-gate /*
9177c478bd9Sstevel@tonic-gate * ILP32 mmap64(2) system call: 64-bit offset, 32-bit address.
9187c478bd9Sstevel@tonic-gate *
9197c478bd9Sstevel@tonic-gate * Now things really get ugly because we can't use the C-style
9207c478bd9Sstevel@tonic-gate * calling convention for more than 6 args, and 64-bit parameter
9217c478bd9Sstevel@tonic-gate * passing on 32-bit systems is less than clean.
9227c478bd9Sstevel@tonic-gate */
9237c478bd9Sstevel@tonic-gate
9247c478bd9Sstevel@tonic-gate struct mmaplf32a {
9257c478bd9Sstevel@tonic-gate caddr_t addr;
9267c478bd9Sstevel@tonic-gate size_t len;
9277c478bd9Sstevel@tonic-gate #ifdef _LP64
9287c478bd9Sstevel@tonic-gate /*
9297c478bd9Sstevel@tonic-gate * 32-bit contents, 64-bit cells
9307c478bd9Sstevel@tonic-gate */
9317c478bd9Sstevel@tonic-gate uint64_t prot;
9327c478bd9Sstevel@tonic-gate uint64_t flags;
9337c478bd9Sstevel@tonic-gate uint64_t fd;
9347c478bd9Sstevel@tonic-gate uint64_t offhi;
9357c478bd9Sstevel@tonic-gate uint64_t offlo;
9367c478bd9Sstevel@tonic-gate #else
9377c478bd9Sstevel@tonic-gate /*
9387c478bd9Sstevel@tonic-gate * 32-bit contents, 32-bit cells
9397c478bd9Sstevel@tonic-gate */
9407c478bd9Sstevel@tonic-gate uint32_t prot;
9417c478bd9Sstevel@tonic-gate uint32_t flags;
9427c478bd9Sstevel@tonic-gate uint32_t fd;
9437c478bd9Sstevel@tonic-gate uint32_t offhi;
9447c478bd9Sstevel@tonic-gate uint32_t offlo;
9457c478bd9Sstevel@tonic-gate #endif
9467c478bd9Sstevel@tonic-gate };
9477c478bd9Sstevel@tonic-gate
9487c478bd9Sstevel@tonic-gate int
smmaplf32(struct mmaplf32a * uap,rval_t * rvp)9497c478bd9Sstevel@tonic-gate smmaplf32(struct mmaplf32a *uap, rval_t *rvp)
9507c478bd9Sstevel@tonic-gate {
9517c478bd9Sstevel@tonic-gate struct file *fp;
9527c478bd9Sstevel@tonic-gate int error;
9537c478bd9Sstevel@tonic-gate caddr_t a = uap->addr;
9547c478bd9Sstevel@tonic-gate int flags = (int)uap->flags;
9557c478bd9Sstevel@tonic-gate int fd = (int)uap->fd;
9567c478bd9Sstevel@tonic-gate #ifdef _BIG_ENDIAN
9577c478bd9Sstevel@tonic-gate offset_t off = ((u_offset_t)uap->offhi << 32) | (u_offset_t)uap->offlo;
9587c478bd9Sstevel@tonic-gate #else
9597c478bd9Sstevel@tonic-gate offset_t off = ((u_offset_t)uap->offlo << 32) | (u_offset_t)uap->offhi;
9607c478bd9Sstevel@tonic-gate #endif
9617c478bd9Sstevel@tonic-gate
9627c478bd9Sstevel@tonic-gate if (flags & _MAP_LOW32)
9637c478bd9Sstevel@tonic-gate error = EINVAL;
9647c478bd9Sstevel@tonic-gate else if (fd == -1 && (flags & MAP_ANON) != 0)
9657c478bd9Sstevel@tonic-gate error = smmap_common(&a, uap->len, (int)uap->prot,
9667c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, NULL, off);
9677c478bd9Sstevel@tonic-gate else if ((fp = getf(fd)) != NULL) {
9687c478bd9Sstevel@tonic-gate error = smmap_common(&a, uap->len, (int)uap->prot,
9697c478bd9Sstevel@tonic-gate flags | _MAP_LOW32, fp, off);
9707c478bd9Sstevel@tonic-gate releasef(fd);
9717c478bd9Sstevel@tonic-gate } else
9727c478bd9Sstevel@tonic-gate error = EBADF;
9737c478bd9Sstevel@tonic-gate
9747c478bd9Sstevel@tonic-gate if (error == 0)
9757c478bd9Sstevel@tonic-gate rvp->r_val1 = (uintptr_t)a;
9767c478bd9Sstevel@tonic-gate return (error);
9777c478bd9Sstevel@tonic-gate }
9787c478bd9Sstevel@tonic-gate
9797c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL || _ILP32 */
9807c478bd9Sstevel@tonic-gate
9817c478bd9Sstevel@tonic-gate int
munmap(caddr_t addr,size_t len)9827c478bd9Sstevel@tonic-gate munmap(caddr_t addr, size_t len)
9837c478bd9Sstevel@tonic-gate {
9847c478bd9Sstevel@tonic-gate struct proc *p = curproc;
9857c478bd9Sstevel@tonic-gate struct as *as = p->p_as;
9867c478bd9Sstevel@tonic-gate
9877c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
9887c478bd9Sstevel@tonic-gate return (set_errno(EINVAL));
9897c478bd9Sstevel@tonic-gate
9907c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
9917c478bd9Sstevel@tonic-gate return (set_errno(EINVAL));
9927c478bd9Sstevel@tonic-gate
9937c478bd9Sstevel@tonic-gate /*
9947c478bd9Sstevel@tonic-gate * Discard lwpchan mappings.
9957c478bd9Sstevel@tonic-gate */
9967c478bd9Sstevel@tonic-gate if (p->p_lcp != NULL)
9977c478bd9Sstevel@tonic-gate lwpchan_delete_mapping(p, addr, addr + len);
9987c478bd9Sstevel@tonic-gate if (as_unmap(as, addr, len) != 0)
9997c478bd9Sstevel@tonic-gate return (set_errno(EINVAL));
10007c478bd9Sstevel@tonic-gate
10017c478bd9Sstevel@tonic-gate return (0);
10027c478bd9Sstevel@tonic-gate }
10037c478bd9Sstevel@tonic-gate
10047c478bd9Sstevel@tonic-gate int
mprotect(caddr_t addr,size_t len,int prot)10057c478bd9Sstevel@tonic-gate mprotect(caddr_t addr, size_t len, int prot)
10067c478bd9Sstevel@tonic-gate {
10077c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as;
10087c478bd9Sstevel@tonic-gate uint_t uprot = prot | PROT_USER;
10097c478bd9Sstevel@tonic-gate int error;
10107c478bd9Sstevel@tonic-gate
10117c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
10127c478bd9Sstevel@tonic-gate return (set_errno(EINVAL));
10137c478bd9Sstevel@tonic-gate
10147c478bd9Sstevel@tonic-gate switch (valid_usr_range(addr, len, prot, as, as->a_userlimit)) {
10157c478bd9Sstevel@tonic-gate case RANGE_OKAY:
10167c478bd9Sstevel@tonic-gate break;
10177c478bd9Sstevel@tonic-gate case RANGE_BADPROT:
10187c478bd9Sstevel@tonic-gate return (set_errno(ENOTSUP));
10197c478bd9Sstevel@tonic-gate case RANGE_BADADDR:
10207c478bd9Sstevel@tonic-gate default:
10217c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM));
10227c478bd9Sstevel@tonic-gate }
10237c478bd9Sstevel@tonic-gate
10247c478bd9Sstevel@tonic-gate error = as_setprot(as, addr, len, uprot);
10257c478bd9Sstevel@tonic-gate if (error)
10267c478bd9Sstevel@tonic-gate return (set_errno(error));
10277c478bd9Sstevel@tonic-gate return (0);
10287c478bd9Sstevel@tonic-gate }
10297c478bd9Sstevel@tonic-gate
10307c478bd9Sstevel@tonic-gate #define MC_CACHE 128 /* internal result buffer */
10317c478bd9Sstevel@tonic-gate #define MC_QUANTUM (MC_CACHE * PAGESIZE) /* addresses covered in loop */
10327c478bd9Sstevel@tonic-gate
10337c478bd9Sstevel@tonic-gate int
mincore(caddr_t addr,size_t len,char * vecp)10347c478bd9Sstevel@tonic-gate mincore(caddr_t addr, size_t len, char *vecp)
10357c478bd9Sstevel@tonic-gate {
10367c478bd9Sstevel@tonic-gate struct as *as = curproc->p_as;
10377c478bd9Sstevel@tonic-gate caddr_t ea; /* end address of loop */
10387c478bd9Sstevel@tonic-gate size_t rl; /* inner result length */
10397c478bd9Sstevel@tonic-gate char vec[MC_CACHE]; /* local vector cache */
10407c478bd9Sstevel@tonic-gate int error;
10417c478bd9Sstevel@tonic-gate model_t model;
10427c478bd9Sstevel@tonic-gate long llen;
10437c478bd9Sstevel@tonic-gate
10447c478bd9Sstevel@tonic-gate model = get_udatamodel();
10457c478bd9Sstevel@tonic-gate /*
10467c478bd9Sstevel@tonic-gate * Validate form of address parameters.
10477c478bd9Sstevel@tonic-gate */
10487c478bd9Sstevel@tonic-gate if (model == DATAMODEL_NATIVE) {
10497c478bd9Sstevel@tonic-gate llen = (long)len;
10507c478bd9Sstevel@tonic-gate } else {
10517c478bd9Sstevel@tonic-gate llen = (int32_t)(size32_t)len;
10527c478bd9Sstevel@tonic-gate }
10537c478bd9Sstevel@tonic-gate if (((uintptr_t)addr & PAGEOFFSET) != 0 || llen <= 0)
10547c478bd9Sstevel@tonic-gate return (set_errno(EINVAL));
10557c478bd9Sstevel@tonic-gate
10567c478bd9Sstevel@tonic-gate if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
10577c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM));
10587c478bd9Sstevel@tonic-gate
10597c478bd9Sstevel@tonic-gate /*
10607c478bd9Sstevel@tonic-gate * Loop over subranges of interval [addr : addr + len), recovering
10617c478bd9Sstevel@tonic-gate * results internally and then copying them out to caller. Subrange
10627c478bd9Sstevel@tonic-gate * is based on the size of MC_CACHE, defined above.
10637c478bd9Sstevel@tonic-gate */
10647c478bd9Sstevel@tonic-gate for (ea = addr + len; addr < ea; addr += MC_QUANTUM) {
10657c478bd9Sstevel@tonic-gate error = as_incore(as, addr,
10667c478bd9Sstevel@tonic-gate (size_t)MIN(MC_QUANTUM, ea - addr), vec, &rl);
10677c478bd9Sstevel@tonic-gate if (rl != 0) {
10687c478bd9Sstevel@tonic-gate rl = (rl + PAGESIZE - 1) / PAGESIZE;
10697c478bd9Sstevel@tonic-gate if (copyout(vec, vecp, rl) != 0)
10707c478bd9Sstevel@tonic-gate return (set_errno(EFAULT));
10717c478bd9Sstevel@tonic-gate vecp += rl;
10727c478bd9Sstevel@tonic-gate }
10737c478bd9Sstevel@tonic-gate if (error != 0)
10747c478bd9Sstevel@tonic-gate return (set_errno(ENOMEM));
10757c478bd9Sstevel@tonic-gate }
10767c478bd9Sstevel@tonic-gate return (0);
10777c478bd9Sstevel@tonic-gate }
1078