Lines Matching refs:cp

1192 	kmem_cache_t *cp;  in kmem_cache_applyall()  local
1195 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_cache_applyall()
1196 cp = list_next(&kmem_caches, cp)) in kmem_cache_applyall()
1198 (void) taskq_dispatch(tq, (task_func_t *)func, cp, in kmem_cache_applyall()
1201 func(cp); in kmem_cache_applyall()
1208 kmem_cache_t *cp; in kmem_cache_applyall_id() local
1211 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_cache_applyall_id()
1212 cp = list_next(&kmem_caches, cp)) { in kmem_cache_applyall_id()
1213 if (!(cp->cache_cflags & KMC_IDENTIFIER)) in kmem_cache_applyall_id()
1216 (void) taskq_dispatch(tq, (task_func_t *)func, cp, in kmem_cache_applyall_id()
1219 func(cp); in kmem_cache_applyall_id()
1228 kmem_findslab(kmem_cache_t *cp, void *buf) in kmem_findslab() argument
1232 mutex_enter(&cp->cache_lock); in kmem_findslab()
1233 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL; in kmem_findslab()
1234 sp = list_next(&cp->cache_complete_slabs, sp)) { in kmem_findslab()
1236 mutex_exit(&cp->cache_lock); in kmem_findslab()
1240 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL; in kmem_findslab()
1241 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) { in kmem_findslab()
1243 mutex_exit(&cp->cache_lock); in kmem_findslab()
1247 mutex_exit(&cp->cache_lock); in kmem_findslab()
1257 kmem_cache_t *cp = cparg; in kmem_error() local
1266 sp = kmem_findslab(cp, buf); in kmem_error()
1268 for (cp = list_tail(&kmem_caches); cp != NULL; in kmem_error()
1269 cp = list_prev(&kmem_caches, cp)) { in kmem_error()
1270 if ((sp = kmem_findslab(cp, buf)) != NULL) in kmem_error()
1276 cp = NULL; in kmem_error()
1279 if (cp != cparg) in kmem_error()
1283 (uintptr_t)sp->slab_base) % cp->cache_chunksize; in kmem_error()
1286 if (cp->cache_flags & KMF_BUFTAG) in kmem_error()
1287 btp = KMEM_BUFTAG(cp, buf); in kmem_error()
1288 if (cp->cache_flags & KMF_HASH) { in kmem_error()
1289 mutex_enter(&cp->cache_lock); in kmem_error()
1290 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next) in kmem_error()
1293 mutex_exit(&cp->cache_lock); in kmem_error()
1296 if (kmem_findslab(cp->cache_bufctl_cache, bcp) == in kmem_error()
1309 kmem_panic_info.kmp_realcache = cp; in kmem_error()
1319 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_error()
1353 printf("buffer was allocated from %s,\n", cp->cache_name); in kmem_error()
1372 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) && in kmem_error()
1382 (void *)sp, cp->cache_name); in kmem_error()
1472 #define KMEM_AUDIT(lp, cp, bcp) \ argument
1482 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp, in kmem_log_event() argument
1490 bca.bc_cache = cp; in kmem_log_event()
1491 KMEM_AUDIT(lp, cp, &bca); in kmem_log_event()
1498 kmem_slab_create(kmem_cache_t *cp, int kmflag) in kmem_slab_create() argument
1500 size_t slabsize = cp->cache_slabsize; in kmem_slab_create()
1501 size_t chunksize = cp->cache_chunksize; in kmem_slab_create()
1502 int cache_flags = cp->cache_flags; in kmem_slab_create()
1507 vmem_t *vmp = cp->cache_arena; in kmem_slab_create()
1509 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_slab_create()
1511 color = cp->cache_color + cp->cache_align; in kmem_slab_create()
1512 if (color > cp->cache_maxcolor) in kmem_slab_create()
1513 color = cp->cache_mincolor; in kmem_slab_create()
1514 cp->cache_color = color; in kmem_slab_create()
1529 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH)); in kmem_slab_create()
1530 if (!(cp->cache_cflags & KMC_NOTOUCH)) in kmem_slab_create()
1538 sp = KMEM_SLAB(cp, slab); in kmem_slab_create()
1542 sp->slab_cache = cp; in kmem_slab_create()
1554 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag); in kmem_slab_create()
1561 bcap->bc_cache = cp; in kmem_slab_create()
1566 bcp = KMEM_BUFCTL(cp, buf); in kmem_slab_create()
1569 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_slab_create()
1575 cp->cache_verify); in kmem_slab_create()
1583 kmem_log_event(kmem_slab_log, cp, sp, slab); in kmem_slab_create()
1591 kmem_cache_free(cp->cache_bufctl_cache, bcp); in kmem_slab_create()
1601 kmem_log_event(kmem_failure_log, cp, NULL, NULL); in kmem_slab_create()
1602 atomic_inc_64(&cp->cache_alloc_fail); in kmem_slab_create()
1611 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp) in kmem_slab_destroy() argument
1613 vmem_t *vmp = cp->cache_arena; in kmem_slab_destroy()
1616 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_slab_destroy()
1619 if (cp->cache_flags & KMF_HASH) { in kmem_slab_destroy()
1623 kmem_cache_free(cp->cache_bufctl_cache, bcp); in kmem_slab_destroy()
1627 vmem_free(vmp, slab, cp->cache_slabsize); in kmem_slab_destroy()
1631 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill) in kmem_slab_alloc_impl() argument
1637 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_alloc_impl()
1644 (sp == avl_first(&cp->cache_partial_slabs)))); in kmem_slab_alloc_impl()
1645 ASSERT(sp->slab_cache == cp); in kmem_slab_alloc_impl()
1647 cp->cache_slab_alloc++; in kmem_slab_alloc_impl()
1648 cp->cache_bufslab--; in kmem_slab_alloc_impl()
1654 if (cp->cache_flags & KMF_HASH) { in kmem_slab_alloc_impl()
1659 hash_bucket = KMEM_HASH(cp, buf); in kmem_slab_alloc_impl()
1662 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { in kmem_slab_alloc_impl()
1663 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_slab_alloc_impl()
1666 buf = KMEM_BUF(cp, bcp); in kmem_slab_alloc_impl()
1677 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_alloc_impl()
1682 list_insert_head(&cp->cache_complete_slabs, sp); in kmem_slab_alloc_impl()
1683 cp->cache_complete_slab_count++; in kmem_slab_alloc_impl()
1693 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) && in kmem_slab_alloc_impl()
1694 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) { in kmem_slab_alloc_impl()
1695 kmem_slab_prefill(cp, sp); in kmem_slab_alloc_impl()
1700 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_alloc_impl()
1708 ASSERT(!avl_update(&cp->cache_partial_slabs, sp)); in kmem_slab_alloc_impl()
1716 kmem_slab_alloc(kmem_cache_t *cp, int kmflag) in kmem_slab_alloc() argument
1722 mutex_enter(&cp->cache_lock); in kmem_slab_alloc()
1723 test_destructor = (cp->cache_slab_alloc == 0); in kmem_slab_alloc()
1724 sp = avl_first(&cp->cache_partial_slabs); in kmem_slab_alloc()
1726 ASSERT(cp->cache_bufslab == 0); in kmem_slab_alloc()
1731 mutex_exit(&cp->cache_lock); in kmem_slab_alloc()
1732 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) { in kmem_slab_alloc()
1735 mutex_enter(&cp->cache_lock); in kmem_slab_alloc()
1736 cp->cache_slab_create++; in kmem_slab_alloc()
1737 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax) in kmem_slab_alloc()
1738 cp->cache_bufmax = cp->cache_buftotal; in kmem_slab_alloc()
1739 cp->cache_bufslab += sp->slab_chunks; in kmem_slab_alloc()
1742 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE); in kmem_slab_alloc()
1743 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == in kmem_slab_alloc()
1744 (cp->cache_complete_slab_count + in kmem_slab_alloc()
1745 avl_numnodes(&cp->cache_partial_slabs) + in kmem_slab_alloc()
1746 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); in kmem_slab_alloc()
1747 mutex_exit(&cp->cache_lock); in kmem_slab_alloc()
1749 if (test_destructor && cp->cache_destructor != NULL) { in kmem_slab_alloc()
1755 if ((cp->cache_constructor == NULL) || in kmem_slab_alloc()
1756 cp->cache_constructor(buf, cp->cache_private, in kmem_slab_alloc()
1758 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_alloc()
1761 cp->cache_bufsize); in kmem_slab_alloc()
1762 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_slab_alloc()
1763 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_slab_alloc()
1776 kmem_slab_free(kmem_cache_t *cp, void *buf) in kmem_slab_free() argument
1783 mutex_enter(&cp->cache_lock); in kmem_slab_free()
1784 cp->cache_slab_free++; in kmem_slab_free()
1786 if (cp->cache_flags & KMF_HASH) { in kmem_slab_free()
1790 prev_bcpp = KMEM_HASH(cp, buf); in kmem_slab_free()
1797 cp->cache_lookup_depth++; in kmem_slab_free()
1801 bcp = KMEM_BUFCTL(cp, buf); in kmem_slab_free()
1802 sp = KMEM_SLAB(cp, buf); in kmem_slab_free()
1805 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) { in kmem_slab_free()
1806 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1807 kmem_error(KMERR_BADADDR, cp, buf); in kmem_slab_free()
1819 kmem_slab_move_yes(cp, sp, buf); in kmem_slab_free()
1822 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) { in kmem_slab_free()
1823 if (cp->cache_flags & KMF_CONTENTS) in kmem_slab_free()
1826 cp->cache_contents); in kmem_slab_free()
1827 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_slab_free()
1833 cp->cache_bufslab++; in kmem_slab_free()
1842 list_remove(&cp->cache_complete_slabs, sp); in kmem_slab_free()
1843 cp->cache_complete_slab_count--; in kmem_slab_free()
1845 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1848 cp->cache_buftotal -= sp->slab_chunks; in kmem_slab_free()
1849 cp->cache_bufslab -= sp->slab_chunks; in kmem_slab_free()
1861 if (cp->cache_defrag == NULL || in kmem_slab_free()
1862 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) && in kmem_slab_free()
1864 cp->cache_slab_destroy++; in kmem_slab_free()
1865 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1866 kmem_slab_destroy(cp, sp); in kmem_slab_free()
1868 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_slab_free()
1882 cp->cache_defrag->kmd_deadcount++; in kmem_slab_free()
1883 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1892 list_remove(&cp->cache_complete_slabs, sp); in kmem_slab_free()
1893 cp->cache_complete_slab_count--; in kmem_slab_free()
1894 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1896 (void) avl_update_gt(&cp->cache_partial_slabs, sp); in kmem_slab_free()
1899 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) == in kmem_slab_free()
1900 (cp->cache_complete_slab_count + in kmem_slab_free()
1901 avl_numnodes(&cp->cache_partial_slabs) + in kmem_slab_free()
1902 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount))); in kmem_slab_free()
1903 mutex_exit(&cp->cache_lock); in kmem_slab_free()
1910 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct, in kmem_cache_alloc_debug() argument
1913 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_cache_alloc_debug()
1918 kmem_error(KMERR_BADBUFTAG, cp, buf); in kmem_cache_alloc_debug()
1924 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { in kmem_cache_alloc_debug()
1925 kmem_error(KMERR_BADBUFCTL, cp, buf); in kmem_cache_alloc_debug()
1929 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_alloc_debug()
1930 if (!construct && (cp->cache_flags & KMF_LITE)) { in kmem_cache_alloc_debug()
1932 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_cache_alloc_debug()
1935 if (cp->cache_constructor != NULL) in kmem_cache_alloc_debug()
1943 cp->cache_verify)) { in kmem_cache_alloc_debug()
1944 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_cache_alloc_debug()
1951 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 && in kmem_cache_alloc_debug()
1954 kmem_log_event(kmem_failure_log, cp, NULL, NULL); in kmem_cache_alloc_debug()
1955 if (!construct && cp->cache_destructor != NULL) in kmem_cache_alloc_debug()
1956 cp->cache_destructor(buf, cp->cache_private); in kmem_cache_alloc_debug()
1961 if (mtbf || (construct && cp->cache_constructor != NULL && in kmem_cache_alloc_debug()
1962 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { in kmem_cache_alloc_debug()
1963 atomic_inc_64(&cp->cache_alloc_fail); in kmem_cache_alloc_debug()
1965 if (cp->cache_flags & KMF_DEADBEEF) in kmem_cache_alloc_debug()
1966 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_cache_alloc_debug()
1967 kmem_slab_free(cp, buf); in kmem_cache_alloc_debug()
1971 if (cp->cache_flags & KMF_AUDIT) { in kmem_cache_alloc_debug()
1972 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_cache_alloc_debug()
1975 if ((cp->cache_flags & KMF_LITE) && in kmem_cache_alloc_debug()
1976 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { in kmem_cache_alloc_debug()
1984 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller) in kmem_cache_free_debug() argument
1986 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_cache_free_debug()
1992 kmem_error(KMERR_DUPFREE, cp, buf); in kmem_cache_free_debug()
1995 sp = kmem_findslab(cp, buf); in kmem_cache_free_debug()
1996 if (sp == NULL || sp->slab_cache != cp) in kmem_cache_free_debug()
1997 kmem_error(KMERR_BADADDR, cp, buf); in kmem_cache_free_debug()
1999 kmem_error(KMERR_REDZONE, cp, buf); in kmem_cache_free_debug()
2005 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) { in kmem_cache_free_debug()
2006 kmem_error(KMERR_BADBUFCTL, cp, buf); in kmem_cache_free_debug()
2011 kmem_error(KMERR_REDZONE, cp, buf); in kmem_cache_free_debug()
2015 if (cp->cache_flags & KMF_AUDIT) { in kmem_cache_free_debug()
2016 if (cp->cache_flags & KMF_CONTENTS) in kmem_cache_free_debug()
2018 buf, cp->cache_contents); in kmem_cache_free_debug()
2019 KMEM_AUDIT(kmem_transaction_log, cp, bcp); in kmem_cache_free_debug()
2022 if ((cp->cache_flags & KMF_LITE) && in kmem_cache_free_debug()
2023 !(cp->cache_cflags & KMC_KMEM_ALLOC)) { in kmem_cache_free_debug()
2027 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_free_debug()
2028 if (cp->cache_flags & KMF_LITE) in kmem_cache_free_debug()
2030 else if (cp->cache_destructor != NULL) in kmem_cache_free_debug()
2031 cp->cache_destructor(buf, cp->cache_private); in kmem_cache_free_debug()
2033 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); in kmem_cache_free_debug()
2043 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds) in kmem_magazine_destroy() argument
2047 ASSERT(!list_link_active(&cp->cache_link) || in kmem_magazine_destroy()
2053 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_magazine_destroy()
2055 cp->cache_verify) != NULL) { in kmem_magazine_destroy()
2056 kmem_error(KMERR_MODIFIED, cp, buf); in kmem_magazine_destroy()
2059 if ((cp->cache_flags & KMF_LITE) && in kmem_magazine_destroy()
2060 cp->cache_destructor != NULL) { in kmem_magazine_destroy()
2061 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_magazine_destroy()
2063 cp->cache_destructor(buf, cp->cache_private); in kmem_magazine_destroy()
2066 } else if (cp->cache_destructor != NULL) { in kmem_magazine_destroy()
2067 cp->cache_destructor(buf, cp->cache_private); in kmem_magazine_destroy()
2070 kmem_slab_free(cp, buf); in kmem_magazine_destroy()
2072 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_magazine_destroy()
2073 kmem_cache_free(cp->cache_magtype->mt_cache, mp); in kmem_magazine_destroy()
2080 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp) in kmem_depot_alloc() argument
2090 if (!mutex_tryenter(&cp->cache_depot_lock)) { in kmem_depot_alloc()
2091 mutex_enter(&cp->cache_depot_lock); in kmem_depot_alloc()
2092 cp->cache_depot_contention++; in kmem_depot_alloc()
2096 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_depot_alloc()
2103 mutex_exit(&cp->cache_depot_lock); in kmem_depot_alloc()
2112 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp) in kmem_depot_free() argument
2114 mutex_enter(&cp->cache_depot_lock); in kmem_depot_free()
2115 ASSERT(KMEM_MAGAZINE_VALID(cp, mp)); in kmem_depot_free()
2119 mutex_exit(&cp->cache_depot_lock); in kmem_depot_free()
2126 kmem_depot_ws_update(kmem_cache_t *cp) in kmem_depot_ws_update() argument
2128 mutex_enter(&cp->cache_depot_lock); in kmem_depot_ws_update()
2129 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min; in kmem_depot_ws_update()
2130 cp->cache_full.ml_min = cp->cache_full.ml_total; in kmem_depot_ws_update()
2131 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min; in kmem_depot_ws_update()
2132 cp->cache_empty.ml_min = cp->cache_empty.ml_total; in kmem_depot_ws_update()
2133 mutex_exit(&cp->cache_depot_lock); in kmem_depot_ws_update()
2141 kmem_depot_ws_zero(kmem_cache_t *cp) in kmem_depot_ws_zero() argument
2143 mutex_enter(&cp->cache_depot_lock); in kmem_depot_ws_zero()
2144 cp->cache_full.ml_reaplimit = cp->cache_full.ml_total; in kmem_depot_ws_zero()
2145 cp->cache_full.ml_min = cp->cache_full.ml_total; in kmem_depot_ws_zero()
2146 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total; in kmem_depot_ws_zero()
2147 cp->cache_empty.ml_min = cp->cache_empty.ml_total; in kmem_depot_ws_zero()
2148 mutex_exit(&cp->cache_depot_lock); in kmem_depot_ws_zero()
2162 kmem_depot_ws_reap(kmem_cache_t *cp) in kmem_depot_ws_reap() argument
2168 ASSERT(!list_link_active(&cp->cache_link) || in kmem_depot_ws_reap()
2171 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_depot_ws_reap()
2173 (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) { in kmem_depot_ws_reap()
2174 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize); in kmem_depot_ws_reap()
2175 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize; in kmem_depot_ws_reap()
2182 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min); in kmem_depot_ws_reap()
2184 (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) { in kmem_depot_ws_reap()
2185 kmem_magazine_destroy(cp, mp, 0); in kmem_depot_ws_reap()
2186 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize; in kmem_depot_ws_reap()
2225 #define KMEM_DUMPCTL(cp, buf) \ argument
2226 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2279 kmem_cache_t *cp; in kmem_dump_begin() local
2283 for (cp = list_head(&kmem_caches); cp != NULL; in kmem_dump_begin()
2284 cp = list_next(&kmem_caches, cp)) { in kmem_dump_begin()
2285 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_dump_begin()
2287 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) { in kmem_dump_begin()
2288 cp->cache_flags |= KMF_DUMPDIVERT; in kmem_dump_begin()
2294 cp->cache_flags |= KMF_DUMPUNSAFE; in kmem_dump_begin()
2343 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag) in kmem_cache_alloc_dump() argument
2350 if ((buf = cp->cache_dump.kd_freelist) != NULL) { in kmem_cache_alloc_dump()
2351 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next; in kmem_cache_alloc_dump()
2357 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align); in kmem_cache_alloc_dump()
2358 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t); in kmem_cache_alloc_dump()
2361 if (cp->cache_align < PAGESIZE) { in kmem_cache_alloc_dump()
2372 cp->cache_dump.kd_alloc_fails++; in kmem_cache_alloc_dump()
2383 if (cp->cache_constructor != NULL && in kmem_cache_alloc_dump()
2384 cp->cache_constructor(buf, cp->cache_private, kmflag) in kmem_cache_alloc_dump()
2388 cp->cache_name, (void *)cp); in kmem_cache_alloc_dump()
2394 cp->cache_dump.kd_alloc_fails++; in kmem_cache_alloc_dump()
2406 kmem_cache_free_dump(kmem_cache_t *cp, void *buf) in kmem_cache_free_dump() argument
2411 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist; in kmem_cache_free_dump()
2412 cp->cache_dump.kd_freelist = buf; in kmem_cache_free_dump()
2428 kmem_cache_alloc(kmem_cache_t *cp, int kmflag) in kmem_cache_alloc() argument
2430 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_cache_alloc()
2448 cp->cache_dump.kd_unsafe++; in kmem_cache_alloc()
2451 kmem_cache_alloc_debug(cp, buf, kmflag, 0, in kmem_cache_alloc()
2479 cp->cache_dump.kd_unsafe++; in kmem_cache_alloc()
2481 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) != in kmem_cache_alloc()
2499 fmp = kmem_depot_alloc(cp, &cp->cache_full); in kmem_cache_alloc()
2502 kmem_depot_free(cp, &cp->cache_empty, in kmem_cache_alloc()
2520 buf = kmem_slab_alloc(cp, kmflag); in kmem_cache_alloc()
2525 if (cp->cache_flags & KMF_BUFTAG) { in kmem_cache_alloc()
2529 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller()); in kmem_cache_alloc()
2540 return (kmem_cache_alloc(cp, kmflag)); in kmem_cache_alloc()
2545 if (cp->cache_constructor != NULL && in kmem_cache_alloc()
2546 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { in kmem_cache_alloc()
2547 atomic_inc_64(&cp->cache_alloc_fail); in kmem_cache_alloc()
2548 kmem_slab_free(cp, buf); in kmem_cache_alloc()
2562 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed) in kmem_slab_free_constructed() argument
2564 if (!freed && (cp->cache_flags & KMF_BUFTAG)) in kmem_slab_free_constructed()
2565 if (kmem_cache_free_debug(cp, buf, caller()) == -1) in kmem_slab_free_constructed()
2572 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF && in kmem_slab_free_constructed()
2573 cp->cache_destructor != NULL) { in kmem_slab_free_constructed()
2574 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */ in kmem_slab_free_constructed()
2575 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_slab_free_constructed()
2577 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_free_constructed()
2580 cp->cache_destructor(buf, cp->cache_private); in kmem_slab_free_constructed()
2584 kmem_slab_free(cp, buf); in kmem_slab_free_constructed()
2595 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp) in kmem_cpucache_magazine_alloc() argument
2606 emp = kmem_depot_alloc(cp, &cp->cache_empty); in kmem_cpucache_magazine_alloc()
2609 kmem_depot_free(cp, &cp->cache_full, in kmem_cpucache_magazine_alloc()
2620 mtp = cp->cache_magtype; in kmem_cpucache_magazine_alloc()
2643 kmem_depot_free(cp, &cp->cache_empty, emp); in kmem_cpucache_magazine_alloc()
2658 kmem_cache_free(kmem_cache_t *cp, void *buf) in kmem_cache_free() argument
2660 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_cache_free()
2666 ASSERT(cp->cache_defrag == NULL || in kmem_cache_free()
2667 cp->cache_defrag->kmd_thread != curthread || in kmem_cache_free()
2668 (buf != cp->cache_defrag->kmd_from_buf && in kmem_cache_free()
2669 buf != cp->cache_defrag->kmd_to_buf)); in kmem_cache_free()
2675 cp->cache_dump.kd_unsafe++; in kmem_cache_free()
2676 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) { in kmem_cache_free()
2680 if (kmem_cache_free_debug(cp, buf, caller()) == -1) in kmem_cache_free()
2716 if (!kmem_cpucache_magazine_alloc(ccp, cp)) { in kmem_cache_free()
2726 kmem_slab_free_constructed(cp, buf, B_TRUE); in kmem_cache_free()
2730 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp) in kmem_slab_prefill() argument
2732 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp); in kmem_slab_prefill()
2733 int cache_flags = cp->cache_flags; in kmem_slab_prefill()
2743 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_prefill()
2745 ASSERT(cp->cache_constructor == NULL); in kmem_slab_prefill()
2746 ASSERT(sp->slab_cache == cp); in kmem_slab_prefill()
2749 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL); in kmem_slab_prefill()
2755 cp->cache_bufslab -= nbufs; in kmem_slab_prefill()
2756 cp->cache_slab_alloc += nbufs; in kmem_slab_prefill()
2757 list_insert_head(&cp->cache_complete_slabs, sp); in kmem_slab_prefill()
2758 cp->cache_complete_slab_count++; in kmem_slab_prefill()
2759 mutex_exit(&cp->cache_lock); in kmem_slab_prefill()
2763 void *buf = KMEM_BUF(cp, head); in kmem_slab_prefill()
2797 if (!kmem_cpucache_magazine_alloc(ccp, cp)) in kmem_slab_prefill()
2812 kmem_slab_free(cp, KMEM_BUF(cp, head)); in kmem_slab_prefill()
2819 mutex_enter(&cp->cache_lock); in kmem_slab_prefill()
2856 kmem_cache_t *cp = kmem_alloc_table[index]; in kmem_zalloc() local
2857 buf = kmem_cache_alloc(cp, kmflag); in kmem_zalloc()
2859 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { in kmem_zalloc()
2860 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_zalloc()
2864 if (cp->cache_flags & KMF_LITE) { in kmem_zalloc()
2883 kmem_cache_t *cp; in kmem_alloc() local
2887 cp = kmem_alloc_table[index]; in kmem_alloc()
2892 cp = kmem_big_alloc_table[index]; in kmem_alloc()
2938 buf = kmem_cache_alloc(cp, kmflag); in kmem_alloc()
2939 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) { in kmem_alloc()
2940 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_alloc()
2944 if (cp->cache_flags & KMF_LITE) { in kmem_alloc()
2955 kmem_cache_t *cp; in kmem_free() local
2958 cp = kmem_alloc_table[index]; in kmem_free()
2963 cp = kmem_big_alloc_table[index]; in kmem_free()
2974 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) { in kmem_free()
2975 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf); in kmem_free()
2979 kmem_error(KMERR_DUPFREE, cp, buf); in kmem_free()
2984 kmem_error(KMERR_BADSIZE, cp, buf); in kmem_free()
2986 kmem_error(KMERR_REDZONE, cp, buf); in kmem_free()
2991 kmem_error(KMERR_REDZONE, cp, buf); in kmem_free()
2995 if (cp->cache_flags & KMF_LITE) { in kmem_free()
3000 kmem_cache_free(cp, buf); in kmem_free()
3073 kmem_cache_reap(kmem_cache_t *cp) in kmem_cache_reap() argument
3076 cp->cache_reap++; in kmem_cache_reap()
3085 if (cp->cache_reclaim != NULL) { in kmem_cache_reap()
3092 delta = cp->cache_full.ml_total; in kmem_cache_reap()
3093 cp->cache_reclaim(cp->cache_private); in kmem_cache_reap()
3094 delta = cp->cache_full.ml_total - delta; in kmem_cache_reap()
3096 mutex_enter(&cp->cache_depot_lock); in kmem_cache_reap()
3097 cp->cache_full.ml_reaplimit += delta; in kmem_cache_reap()
3098 cp->cache_full.ml_min += delta; in kmem_cache_reap()
3099 mutex_exit(&cp->cache_depot_lock); in kmem_cache_reap()
3103 kmem_depot_ws_reap(cp); in kmem_cache_reap()
3105 if (cp->cache_defrag != NULL && !kmem_move_noreap) { in kmem_cache_reap()
3106 kmem_cache_defrag(cp); in kmem_cache_reap()
3205 kmem_cache_magazine_purge(kmem_cache_t *cp) in kmem_cache_magazine_purge() argument
3211 ASSERT(!list_link_active(&cp->cache_link) || in kmem_cache_magazine_purge()
3213 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_cache_magazine_purge()
3216 ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_magazine_purge()
3231 kmem_magazine_destroy(cp, mp, rounds); in kmem_cache_magazine_purge()
3233 kmem_magazine_destroy(cp, pmp, prounds); in kmem_cache_magazine_purge()
3236 kmem_depot_ws_zero(cp); in kmem_cache_magazine_purge()
3237 kmem_depot_ws_reap(cp); in kmem_cache_magazine_purge()
3244 kmem_cache_magazine_enable(kmem_cache_t *cp) in kmem_cache_magazine_enable() argument
3248 if (cp->cache_flags & KMF_NOMAGAZINE) in kmem_cache_magazine_enable()
3252 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_magazine_enable()
3254 ccp->cc_magsize = cp->cache_magtype->mt_magsize; in kmem_cache_magazine_enable()
3281 kmem_cache_reap_soon(kmem_cache_t *cp) in kmem_cache_reap_soon() argument
3283 ASSERT(list_link_active(&cp->cache_link)); in kmem_cache_reap_soon()
3285 kmem_depot_ws_zero(cp); in kmem_cache_reap_soon()
3288 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP); in kmem_cache_reap_soon()
3303 kmem_cache_magazine_resize(kmem_cache_t *cp) in kmem_cache_magazine_resize() argument
3305 kmem_magtype_t *mtp = cp->cache_magtype; in kmem_cache_magazine_resize()
3309 if (cp->cache_chunksize < mtp->mt_maxbuf) { in kmem_cache_magazine_resize()
3310 kmem_cache_magazine_purge(cp); in kmem_cache_magazine_resize()
3311 mutex_enter(&cp->cache_depot_lock); in kmem_cache_magazine_resize()
3312 cp->cache_magtype = ++mtp; in kmem_cache_magazine_resize()
3313 cp->cache_depot_contention_prev = in kmem_cache_magazine_resize()
3314 cp->cache_depot_contention + INT_MAX; in kmem_cache_magazine_resize()
3315 mutex_exit(&cp->cache_depot_lock); in kmem_cache_magazine_resize()
3316 kmem_cache_magazine_enable(cp); in kmem_cache_magazine_resize()
3325 kmem_hash_rescale(kmem_cache_t *cp) in kmem_hash_rescale() argument
3333 1 << (highbit(3 * cp->cache_buftotal + 4) - 2)); in kmem_hash_rescale()
3334 old_size = cp->cache_hash_mask + 1; in kmem_hash_rescale()
3345 mutex_enter(&cp->cache_lock); in kmem_hash_rescale()
3347 old_size = cp->cache_hash_mask + 1; in kmem_hash_rescale()
3348 old_table = cp->cache_hash_table; in kmem_hash_rescale()
3350 cp->cache_hash_mask = new_size - 1; in kmem_hash_rescale()
3351 cp->cache_hash_table = new_table; in kmem_hash_rescale()
3352 cp->cache_rescale++; in kmem_hash_rescale()
3359 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr); in kmem_hash_rescale()
3366 mutex_exit(&cp->cache_lock); in kmem_hash_rescale()
3376 kmem_cache_update(kmem_cache_t *cp) in kmem_cache_update() argument
3387 mutex_enter(&cp->cache_lock); in kmem_cache_update()
3389 if ((cp->cache_flags & KMF_HASH) && in kmem_cache_update()
3390 (cp->cache_buftotal > (cp->cache_hash_mask << 1) || in kmem_cache_update()
3391 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) && in kmem_cache_update()
3392 cp->cache_hash_mask > KMEM_HASH_INITIAL))) in kmem_cache_update()
3395 mutex_exit(&cp->cache_lock); in kmem_cache_update()
3400 kmem_depot_ws_update(cp); in kmem_cache_update()
3406 mutex_enter(&cp->cache_depot_lock); in kmem_cache_update()
3408 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf && in kmem_cache_update()
3409 (int)(cp->cache_depot_contention - in kmem_cache_update()
3410 cp->cache_depot_contention_prev) > kmem_depot_contention) in kmem_cache_update()
3413 cp->cache_depot_contention_prev = cp->cache_depot_contention; in kmem_cache_update()
3415 mutex_exit(&cp->cache_depot_lock); in kmem_cache_update()
3419 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP); in kmem_cache_update()
3423 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP); in kmem_cache_update()
3425 if (cp->cache_defrag != NULL) in kmem_cache_update()
3427 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP); in kmem_cache_update()
3457 kmem_cache_t *cp = ksp->ks_private; in kmem_cache_kstat_update() local
3468 mutex_enter(&cp->cache_lock); in kmem_cache_kstat_update()
3470 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail; in kmem_cache_kstat_update()
3471 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc; in kmem_cache_kstat_update()
3472 kmcp->kmc_free.value.ui64 = cp->cache_slab_free; in kmem_cache_kstat_update()
3473 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc; in kmem_cache_kstat_update()
3474 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free; in kmem_cache_kstat_update()
3477 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_kstat_update()
3494 mutex_enter(&cp->cache_depot_lock); in kmem_cache_kstat_update()
3496 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc; in kmem_cache_kstat_update()
3497 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc; in kmem_cache_kstat_update()
3498 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention; in kmem_cache_kstat_update()
3499 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total; in kmem_cache_kstat_update()
3500 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total; in kmem_cache_kstat_update()
3502 (cp->cache_flags & KMF_NOMAGAZINE) ? in kmem_cache_kstat_update()
3503 0 : cp->cache_magtype->mt_magsize; in kmem_cache_kstat_update()
3505 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc; in kmem_cache_kstat_update()
3506 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc; in kmem_cache_kstat_update()
3507 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize; in kmem_cache_kstat_update()
3509 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_cache_kstat_update()
3510 reap = MIN(reap, cp->cache_full.ml_total); in kmem_cache_kstat_update()
3512 mutex_exit(&cp->cache_depot_lock); in kmem_cache_kstat_update()
3514 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize; in kmem_cache_kstat_update()
3515 kmcp->kmc_align.value.ui64 = cp->cache_align; in kmem_cache_kstat_update()
3516 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize; in kmem_cache_kstat_update()
3517 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize; in kmem_cache_kstat_update()
3519 buf_avail += cp->cache_bufslab; in kmem_cache_kstat_update()
3521 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail; in kmem_cache_kstat_update()
3522 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal; in kmem_cache_kstat_update()
3523 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax; in kmem_cache_kstat_update()
3524 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create; in kmem_cache_kstat_update()
3525 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy; in kmem_cache_kstat_update()
3526 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ? in kmem_cache_kstat_update()
3527 cp->cache_hash_mask + 1 : 0; in kmem_cache_kstat_update()
3528 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth; in kmem_cache_kstat_update()
3529 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale; in kmem_cache_kstat_update()
3530 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id; in kmem_cache_kstat_update()
3531 kmcp->kmc_reap.value.ui64 = cp->cache_reap; in kmem_cache_kstat_update()
3533 if (cp->cache_defrag == NULL) { in kmem_cache_kstat_update()
3548 kmem_defrag_t *kd = cp->cache_defrag; in kmem_cache_kstat_update()
3560 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1); in kmem_cache_kstat_update()
3562 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize); in kmem_cache_kstat_update()
3566 mutex_exit(&cp->cache_lock); in kmem_cache_kstat_update()
3576 kmem_cache_stat(kmem_cache_t *cp, char *name) in kmem_cache_stat() argument
3579 kstat_t *ksp = cp->cache_kstat; in kmem_cache_stat()
3666 const kmem_cache_t *cp; in kmem_partial_slab_cmp() local
3675 cp = s1->slab_cache; in kmem_partial_slab_cmp()
3676 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_partial_slab_cmp()
3677 binshift = cp->cache_partial_binshift; in kmem_partial_slab_cmp()
3682 w0 -= cp->cache_maxchunks; in kmem_partial_slab_cmp()
3688 w1 -= cp->cache_maxchunks; in kmem_partial_slab_cmp()
3724 kmem_cache_t *cp; in kmem_cache_create() local
3756 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE, in kmem_cache_create()
3758 bzero(cp, csize); in kmem_cache_create()
3759 list_link_init(&cp->cache_link); in kmem_cache_create()
3779 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG; in kmem_cache_create()
3787 if (cp->cache_flags & KMF_LITE) { in kmem_cache_create()
3791 cp->cache_flags |= KMF_BUFTAG; in kmem_cache_create()
3792 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); in kmem_cache_create()
3794 cp->cache_flags &= ~KMF_DEBUG; in kmem_cache_create()
3798 if (cp->cache_flags & KMF_DEADBEEF) in kmem_cache_create()
3799 cp->cache_flags |= KMF_REDZONE; in kmem_cache_create()
3801 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT)) in kmem_cache_create()
3802 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3805 cp->cache_flags &= ~KMF_DEBUG; in kmem_cache_create()
3808 cp->cache_flags &= ~KMF_TOUCH; in kmem_cache_create()
3811 cp->cache_flags |= KMF_PREFILL; in kmem_cache_create()
3814 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL); in kmem_cache_create()
3817 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3819 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH)) in kmem_cache_create()
3820 cp->cache_flags |= KMF_REDZONE; in kmem_cache_create()
3822 if (!(cp->cache_flags & KMF_AUDIT)) in kmem_cache_create()
3823 cp->cache_flags &= ~KMF_CONTENTS; in kmem_cache_create()
3825 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall && in kmem_cache_create()
3826 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH)) in kmem_cache_create()
3827 cp->cache_flags |= KMF_FIREWALL; in kmem_cache_create()
3830 cp->cache_flags &= ~KMF_FIREWALL; in kmem_cache_create()
3832 if (cp->cache_flags & KMF_FIREWALL) { in kmem_cache_create()
3833 cp->cache_flags &= ~KMF_BUFTAG; in kmem_cache_create()
3834 cp->cache_flags |= KMF_NOMAGAZINE; in kmem_cache_create()
3842 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN); in kmem_cache_create()
3843 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1); in kmem_cache_create()
3844 cp->cache_bufsize = bufsize; in kmem_cache_create()
3845 cp->cache_align = align; in kmem_cache_create()
3846 cp->cache_constructor = constructor; in kmem_cache_create()
3847 cp->cache_destructor = destructor; in kmem_cache_create()
3848 cp->cache_reclaim = reclaim; in kmem_cache_create()
3849 cp->cache_private = private; in kmem_cache_create()
3850 cp->cache_arena = vmp; in kmem_cache_create()
3851 cp->cache_cflags = cflags; in kmem_cache_create()
3860 cp->cache_bufctl = chunksize - KMEM_ALIGN; in kmem_cache_create()
3863 if (cp->cache_flags & KMF_BUFTAG) { in kmem_cache_create()
3864 cp->cache_bufctl = chunksize; in kmem_cache_create()
3865 cp->cache_buftag = chunksize; in kmem_cache_create()
3866 if (cp->cache_flags & KMF_LITE) in kmem_cache_create()
3872 if (cp->cache_flags & KMF_DEADBEEF) { in kmem_cache_create()
3873 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify); in kmem_cache_create()
3874 if (cp->cache_flags & KMF_LITE) in kmem_cache_create()
3875 cp->cache_verify = sizeof (uint64_t); in kmem_cache_create()
3878 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave); in kmem_cache_create()
3880 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align); in kmem_cache_create()
3886 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum); in kmem_cache_create()
3887 cp->cache_mincolor = cp->cache_slabsize - chunksize; in kmem_cache_create()
3888 cp->cache_maxcolor = cp->cache_mincolor; in kmem_cache_create()
3889 cp->cache_flags |= KMF_HASH; in kmem_cache_create()
3890 ASSERT(!(cp->cache_flags & KMF_BUFTAG)); in kmem_cache_create()
3892 !(cp->cache_flags & KMF_AUDIT) && in kmem_cache_create()
3894 cp->cache_slabsize = vmp->vm_quantum; in kmem_cache_create()
3895 cp->cache_mincolor = 0; in kmem_cache_create()
3896 cp->cache_maxcolor = in kmem_cache_create()
3897 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize; in kmem_cache_create()
3898 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize); in kmem_cache_create()
3899 ASSERT(!(cp->cache_flags & KMF_AUDIT)); in kmem_cache_create()
3917 cp->cache_slabsize = bestfit; in kmem_cache_create()
3918 cp->cache_mincolor = 0; in kmem_cache_create()
3919 cp->cache_maxcolor = bestfit % chunksize; in kmem_cache_create()
3920 cp->cache_flags |= KMF_HASH; in kmem_cache_create()
3923 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize); in kmem_cache_create()
3924 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1; in kmem_cache_create()
3933 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) || in kmem_cache_create()
3934 cp->cache_constructor != NULL) in kmem_cache_create()
3935 cp->cache_flags &= ~KMF_PREFILL; in kmem_cache_create()
3937 if (cp->cache_flags & KMF_HASH) { in kmem_cache_create()
3939 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ? in kmem_cache_create()
3943 if (cp->cache_maxcolor >= vmp->vm_quantum) in kmem_cache_create()
3944 cp->cache_maxcolor = vmp->vm_quantum - 1; in kmem_cache_create()
3946 cp->cache_color = cp->cache_mincolor; in kmem_cache_create()
3951 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL); in kmem_cache_create()
3953 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp, in kmem_cache_create()
3958 list_create(&cp->cache_complete_slabs, in kmem_cache_create()
3961 if (cp->cache_flags & KMF_HASH) { in kmem_cache_create()
3962 cp->cache_hash_table = vmem_alloc(kmem_hash_arena, in kmem_cache_create()
3964 bzero(cp->cache_hash_table, in kmem_cache_create()
3966 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1; in kmem_cache_create()
3967 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1; in kmem_cache_create()
3973 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL); in kmem_cache_create()
3978 cp->cache_magtype = mtp; in kmem_cache_create()
3984 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid]; in kmem_cache_create()
3986 ccp->cc_flags = cp->cache_flags; in kmem_cache_create()
3994 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name, in kmem_cache_create()
3998 cp->cache_kstat->ks_data = &kmem_cache_kstat; in kmem_cache_create()
3999 cp->cache_kstat->ks_update = kmem_cache_kstat_update; in kmem_cache_create()
4000 cp->cache_kstat->ks_private = cp; in kmem_cache_create()
4001 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock; in kmem_cache_create()
4002 kstat_install(cp->cache_kstat); in kmem_cache_create()
4010 list_insert_tail(&kmem_caches, cp); in kmem_cache_create()
4014 kmem_cache_magazine_enable(cp); in kmem_cache_create()
4016 return (cp); in kmem_cache_create()
4059 kmem_cache_set_move(kmem_cache_t *cp, in kmem_cache_set_move() argument
4071 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH)); in kmem_cache_set_move()
4072 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER)); in kmem_cache_set_move()
4081 mutex_enter(&cp->cache_lock); in kmem_cache_set_move()
4083 if (KMEM_IS_MOVABLE(cp)) { in kmem_cache_set_move()
4084 if (cp->cache_move == NULL) { in kmem_cache_set_move()
4085 ASSERT(cp->cache_slab_alloc == 0); in kmem_cache_set_move()
4087 cp->cache_defrag = defrag; in kmem_cache_set_move()
4089 bzero(cp->cache_defrag, sizeof (kmem_defrag_t)); in kmem_cache_set_move()
4090 avl_create(&cp->cache_defrag->kmd_moves_pending, in kmem_cache_set_move()
4096 list_create(&cp->cache_defrag->kmd_deadlist, in kmem_cache_set_move()
4099 kmem_reset_reclaim_threshold(cp->cache_defrag); in kmem_cache_set_move()
4101 cp->cache_move = move; in kmem_cache_set_move()
4104 mutex_exit(&cp->cache_lock); in kmem_cache_set_move()
4112 kmem_cache_destroy(kmem_cache_t *cp) in kmem_cache_destroy() argument
4122 list_remove(&kmem_caches, cp); in kmem_cache_destroy()
4128 if (kmem_move_taskq != NULL && cp->cache_defrag != NULL) in kmem_cache_destroy()
4131 kmem_cache_magazine_purge(cp); in kmem_cache_destroy()
4133 mutex_enter(&cp->cache_lock); in kmem_cache_destroy()
4134 if (cp->cache_buftotal != 0) in kmem_cache_destroy()
4136 cp->cache_name, (void *)cp); in kmem_cache_destroy()
4137 if (cp->cache_defrag != NULL) { in kmem_cache_destroy()
4138 avl_destroy(&cp->cache_defrag->kmd_moves_pending); in kmem_cache_destroy()
4139 list_destroy(&cp->cache_defrag->kmd_deadlist); in kmem_cache_destroy()
4140 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag); in kmem_cache_destroy()
4141 cp->cache_defrag = NULL; in kmem_cache_destroy()
4149 cp->cache_constructor = (int (*)(void *, void *, int))1; in kmem_cache_destroy()
4150 cp->cache_destructor = (void (*)(void *, void *))2; in kmem_cache_destroy()
4151 cp->cache_reclaim = (void (*)(void *))3; in kmem_cache_destroy()
4152 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4; in kmem_cache_destroy()
4153 mutex_exit(&cp->cache_lock); in kmem_cache_destroy()
4155 kstat_delete(cp->cache_kstat); in kmem_cache_destroy()
4157 if (cp->cache_hash_table != NULL) in kmem_cache_destroy()
4158 vmem_free(kmem_hash_arena, cp->cache_hash_table, in kmem_cache_destroy()
4159 (cp->cache_hash_mask + 1) * sizeof (void *)); in kmem_cache_destroy()
4162 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock); in kmem_cache_destroy()
4164 mutex_destroy(&cp->cache_depot_lock); in kmem_cache_destroy()
4165 mutex_destroy(&cp->cache_lock); in kmem_cache_destroy()
4167 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus)); in kmem_cache_destroy()
4196 kmem_cache_t *cp; in kmem_alloc_caches_create() local
4215 cp = kmem_cache_create(name, cache_size, align, in kmem_alloc_caches_create()
4219 alloc_table[(size - 1) >> shift] = cp; in kmem_alloc_caches_create()
4328 kmem_cache_t *cp; in kmem_init() local
4397 while ((cp = list_tail(&kmem_caches)) != NULL) in kmem_init()
4398 kmem_cache_destroy(cp); in kmem_init()
4594 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf) in kmem_slab_allocated() argument
4598 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_allocated()
4601 if (cp->cache_flags & KMF_HASH) { in kmem_slab_allocated()
4602 for (bcp = *KMEM_HASH(cp, buf); in kmem_slab_allocated()
4612 sp = KMEM_SLAB(cp, buf); in kmem_slab_allocated()
4614 bufbcp = KMEM_BUFCTL(cp, buf); in kmem_slab_allocated()
4624 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags) in kmem_slab_is_reclaimable() argument
4628 ASSERT(cp->cache_defrag != NULL); in kmem_slab_is_reclaimable()
4663 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer)); in kmem_slab_is_reclaimable()
4671 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) in kmem_slab_move_yes() argument
4673 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_move_yes()
4682 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_move_yes()
4685 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_move_yes()
4694 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf) in kmem_slab_move_no() argument
4697 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_slab_move_no()
4704 avl_remove(&cp->cache_partial_slabs, sp); in kmem_slab_move_no()
4708 avl_add(&cp->cache_partial_slabs, sp); in kmem_slab_move_no()
4749 kmem_cache_t *cp = sp->slab_cache; in kmem_move_buffer() local
4753 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_buffer()
4762 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) { in kmem_move_buffer()
4763 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4764 kmem_move_end(cp, callback); in kmem_move_buffer()
4772 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4773 free_on_slab = (kmem_slab_allocated(cp, sp, in kmem_move_buffer()
4775 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4778 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4779 kmem_move_end(cp, callback); in kmem_move_buffer()
4783 if (cp->cache_flags & KMF_BUFTAG) { in kmem_move_buffer()
4787 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf, in kmem_move_buffer()
4789 kmem_move_end(cp, callback); in kmem_move_buffer()
4792 } else if (cp->cache_constructor != NULL && in kmem_move_buffer()
4793 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, in kmem_move_buffer()
4795 atomic_inc_64(&cp->cache_alloc_fail); in kmem_move_buffer()
4796 kmem_slab_free(cp, callback->kmm_to_buf); in kmem_move_buffer()
4797 kmem_move_end(cp, callback); in kmem_move_buffer()
4801 cp->cache_defrag->kmd_callbacks++; in kmem_move_buffer()
4802 cp->cache_defrag->kmd_thread = curthread; in kmem_move_buffer()
4803 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf; in kmem_move_buffer()
4804 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf; in kmem_move_buffer()
4805 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *, in kmem_move_buffer()
4808 response = cp->cache_move(callback->kmm_from_buf, in kmem_move_buffer()
4809 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private); in kmem_move_buffer()
4811 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *, in kmem_move_buffer()
4813 cp->cache_defrag->kmd_thread = NULL; in kmem_move_buffer()
4814 cp->cache_defrag->kmd_from_buf = NULL; in kmem_move_buffer()
4815 cp->cache_defrag->kmd_to_buf = NULL; in kmem_move_buffer()
4818 cp->cache_defrag->kmd_yes++; in kmem_move_buffer()
4819 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); in kmem_move_buffer()
4822 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4823 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4824 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4825 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4826 kmem_move_end(cp, callback); in kmem_move_buffer()
4832 cp->cache_defrag->kmd_no++; in kmem_move_buffer()
4833 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4834 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4835 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4838 cp->cache_defrag->kmd_later++; in kmem_move_buffer()
4839 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4841 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4846 kmem_slab_move_no(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4851 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4854 cp->cache_defrag->kmd_dont_need++; in kmem_move_buffer()
4855 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE); in kmem_move_buffer()
4857 cp->cache_defrag->kmd_slabs_freed++; in kmem_move_buffer()
4858 mutex_enter(&cp->cache_lock); in kmem_move_buffer()
4859 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf); in kmem_move_buffer()
4860 mutex_exit(&cp->cache_lock); in kmem_move_buffer()
4877 cp->cache_defrag->kmd_dont_know++; in kmem_move_buffer()
4881 cp->cache_name, (void *)cp, response); in kmem_move_buffer()
4884 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE); in kmem_move_buffer()
4885 kmem_move_end(cp, callback); in kmem_move_buffer()
4890 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags) in kmem_move_begin() argument
4898 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_begin()
4910 mutex_enter(&cp->cache_lock); in kmem_move_begin()
4912 n = avl_numnodes(&cp->cache_partial_slabs); in kmem_move_begin()
4914 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4919 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index); in kmem_move_begin()
4928 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4933 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs), in kmem_move_begin()
4936 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index); in kmem_move_begin()
4938 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4942 mutex_enter(&cp->cache_lock); in kmem_move_begin()
4943 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); in kmem_move_begin()
4944 mutex_exit(&cp->cache_lock); in kmem_move_begin()
4945 kmem_slab_free(cp, to_buf); in kmem_move_begin()
4954 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback) in kmem_move_end() argument
4958 ASSERT(cp->cache_defrag != NULL); in kmem_move_end()
4960 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock)); in kmem_move_end()
4962 mutex_enter(&cp->cache_lock); in kmem_move_end()
4963 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending, in kmem_move_end()
4965 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback); in kmem_move_end()
4966 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) { in kmem_move_end()
4967 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_move_end()
4983 cp->cache_defrag->kmd_deadcount--; in kmem_move_end()
4984 cp->cache_slab_destroy++; in kmem_move_end()
4985 mutex_exit(&cp->cache_lock); in kmem_move_end()
4986 kmem_slab_destroy(cp, sp); in kmem_move_end()
4987 mutex_enter(&cp->cache_lock); in kmem_move_end()
4990 mutex_exit(&cp->cache_lock); in kmem_move_end()
5009 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs, in kmem_move_buffers() argument
5022 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_move_buffers()
5024 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL); in kmem_move_buffers()
5025 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) : in kmem_move_buffers()
5026 avl_numnodes(&cp->cache_partial_slabs) > 1); in kmem_move_buffers()
5049 sp = avl_last(&cp->cache_partial_slabs); in kmem_move_buffers()
5052 ((sp != avl_first(&cp->cache_partial_slabs)) || in kmem_move_buffers()
5054 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) { in kmem_move_buffers()
5056 if (!kmem_slab_is_reclaimable(cp, sp, flags)) { in kmem_move_buffers()
5064 buf = (((char *)buf) + cp->cache_chunksize), j++) { in kmem_move_buffers()
5066 if (kmem_slab_allocated(cp, sp, buf) == NULL) { in kmem_move_buffers()
5090 mutex_exit(&cp->cache_lock); in kmem_move_buffers()
5092 success = kmem_move_begin(cp, sp, buf, flags); in kmem_move_buffers()
5106 mutex_enter(&cp->cache_lock); in kmem_move_buffers()
5112 &cp->cache_defrag->kmd_deadlist; in kmem_move_buffers()
5116 &cp->cache_defrag->kmd_moves_pending)) { in kmem_move_buffers()
5141 cp->cache_defrag->kmd_deadcount--; in kmem_move_buffers()
5142 cp->cache_slab_destroy++; in kmem_move_buffers()
5143 mutex_exit(&cp->cache_lock); in kmem_move_buffers()
5144 kmem_slab_destroy(cp, sp); in kmem_move_buffers()
5145 mutex_enter(&cp->cache_lock); in kmem_move_buffers()
5193 ASSERT(!avl_is_empty(&cp->cache_partial_slabs)); in kmem_move_buffers()
5194 if (sp == avl_first(&cp->cache_partial_slabs)) { in kmem_move_buffers()
5217 kmem_cache_t *cp = args->kmna_cache; in kmem_cache_move_notify_task() local
5222 ASSERT(list_link_active(&cp->cache_link)); in kmem_cache_move_notify_task()
5225 mutex_enter(&cp->cache_lock); in kmem_cache_move_notify_task()
5226 sp = kmem_slab_allocated(cp, NULL, buf); in kmem_cache_move_notify_task()
5230 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5235 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { in kmem_cache_move_notify_task()
5243 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5247 kmem_slab_move_yes(cp, sp, buf); in kmem_cache_move_notify_task()
5250 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5252 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY); in kmem_cache_move_notify_task()
5253 mutex_enter(&cp->cache_lock); in kmem_cache_move_notify_task()
5257 list_t *deadlist = &cp->cache_defrag->kmd_deadlist; in kmem_cache_move_notify_task()
5261 &cp->cache_defrag->kmd_moves_pending)) { in kmem_cache_move_notify_task()
5263 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5267 cp->cache_defrag->kmd_deadcount--; in kmem_cache_move_notify_task()
5268 cp->cache_slab_destroy++; in kmem_cache_move_notify_task()
5269 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5270 kmem_slab_destroy(cp, sp); in kmem_cache_move_notify_task()
5274 kmem_slab_move_yes(cp, sp, buf); in kmem_cache_move_notify_task()
5276 mutex_exit(&cp->cache_lock); in kmem_cache_move_notify_task()
5280 kmem_cache_move_notify(kmem_cache_t *cp, void *buf) in kmem_cache_move_notify() argument
5286 args->kmna_cache = cp; in kmem_cache_move_notify()
5296 kmem_cache_defrag(kmem_cache_t *cp) in kmem_cache_defrag() argument
5300 ASSERT(cp->cache_defrag != NULL); in kmem_cache_defrag()
5302 mutex_enter(&cp->cache_lock); in kmem_cache_defrag()
5303 n = avl_numnodes(&cp->cache_partial_slabs); in kmem_cache_defrag()
5306 cp->cache_defrag->kmd_defrags++; in kmem_cache_defrag()
5307 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE); in kmem_cache_defrag()
5309 mutex_exit(&cp->cache_lock); in kmem_cache_defrag()
5314 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree) in kmem_cache_frag_threshold() argument
5322 (cp->cache_buftotal * kmem_frag_numer)); in kmem_cache_frag_threshold()
5326 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap) in kmem_cache_is_fragmented() argument
5331 ASSERT(MUTEX_HELD(&cp->cache_lock)); in kmem_cache_is_fragmented()
5335 if (avl_numnodes(&cp->cache_partial_slabs) > 1) { in kmem_cache_is_fragmented()
5339 if ((cp->cache_complete_slab_count + avl_numnodes( in kmem_cache_is_fragmented()
5340 &cp->cache_partial_slabs)) < kmem_frag_minslabs) { in kmem_cache_is_fragmented()
5345 nfree = cp->cache_bufslab; in kmem_cache_is_fragmented()
5346 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) && in kmem_cache_is_fragmented()
5347 kmem_cache_frag_threshold(cp, nfree)); in kmem_cache_is_fragmented()
5358 mutex_enter(&cp->cache_depot_lock); in kmem_cache_is_fragmented()
5359 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min); in kmem_cache_is_fragmented()
5360 reap = MIN(reap, cp->cache_full.ml_total); in kmem_cache_is_fragmented()
5361 mutex_exit(&cp->cache_depot_lock); in kmem_cache_is_fragmented()
5363 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize); in kmem_cache_is_fragmented()
5364 if (kmem_cache_frag_threshold(cp, nfree)) { in kmem_cache_is_fragmented()
5374 kmem_cache_scan(kmem_cache_t *cp) in kmem_cache_scan() argument
5381 mutex_enter(&cp->cache_lock); in kmem_cache_scan()
5383 kmd = cp->cache_defrag; in kmem_cache_scan()
5386 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5387 kmem_cache_reap(cp); in kmem_cache_scan()
5391 if (kmem_cache_is_fragmented(cp, &reap)) { in kmem_cache_scan()
5405 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range, in kmem_cache_scan()
5430 kmem_reset_reclaim_threshold(cp->cache_defrag); in kmem_cache_scan()
5432 if (!avl_is_empty(&cp->cache_partial_slabs)) { in kmem_cache_scan()
5443 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5444 kmem_cache_reap(cp); in kmem_cache_scan()
5448 (void) kmem_move_buffers(cp, in kmem_cache_scan()
5455 mutex_exit(&cp->cache_lock); in kmem_cache_scan()
5458 kmem_depot_ws_reap(cp); in kmem_cache_scan()