1/*-
2 * Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kmem.h>
35#include <sys/debug.h>
36#include <sys/mutex.h>
37#include <sys/vmmeter.h>
38
39#include <vm/vm_page.h>
40#include <vm/vm_object.h>
41#include <vm/vm_kern.h>
42#include <vm/vm_map.h>
43
44#ifdef KMEM_DEBUG
45#include <sys/queue.h>
46#include <sys/stack.h>
47#endif
48
49#ifdef _KERNEL
50MALLOC_DEFINE(M_SOLARIS, "solaris", "Solaris");
51#else
52#define	malloc(size, type, flags)	malloc(size)
53#define	free(addr, type)		free(addr)
54#endif
55
56#ifdef KMEM_DEBUG
57struct kmem_item {
58	struct stack	stack;
59	LIST_ENTRY(kmem_item) next;
60};
61static LIST_HEAD(, kmem_item) kmem_items;
62static struct mtx kmem_items_mtx;
63MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF);
64#endif	/* KMEM_DEBUG */
65
66#include <sys/vmem.h>
67
68void *
69zfs_kmem_alloc(size_t size, int kmflags)
70{
71	void *p;
72#ifdef KMEM_DEBUG
73	struct kmem_item *i;
74
75	size += sizeof(struct kmem_item);
76#endif
77	p = malloc(size, M_SOLARIS, kmflags);
78#ifndef _KERNEL
79	if (kmflags & KM_SLEEP)
80		assert(p != NULL);
81#endif
82#ifdef KMEM_DEBUG
83	if (p != NULL) {
84		i = p;
85		p = (u_char *)p + sizeof(struct kmem_item);
86		stack_save(&i->stack);
87		mtx_lock(&kmem_items_mtx);
88		LIST_INSERT_HEAD(&kmem_items, i, next);
89		mtx_unlock(&kmem_items_mtx);
90	}
91#endif
92	return (p);
93}
94
95void
96zfs_kmem_free(void *buf, size_t size __unused)
97{
98#ifdef KMEM_DEBUG
99	if (buf == NULL) {
100		printf("%s: attempt to free NULL\n", __func__);
101		return;
102	}
103	struct kmem_item *i;
104
105	buf = (u_char *)buf - sizeof(struct kmem_item);
106	mtx_lock(&kmem_items_mtx);
107	LIST_FOREACH(i, &kmem_items, next) {
108		if (i == buf)
109			break;
110	}
111	ASSERT(i != NULL);
112	LIST_REMOVE(i, next);
113	mtx_unlock(&kmem_items_mtx);
114#endif
115	free(buf, M_SOLARIS);
116}
117
118static uint64_t kmem_size_val;
119
120static void
121kmem_size_init(void *unused __unused)
122{
123
124	kmem_size_val = (uint64_t)vm_cnt.v_page_count * PAGE_SIZE;
125	if (kmem_size_val > vm_kmem_size)
126		kmem_size_val = vm_kmem_size;
127}
128SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
129
130uint64_t
131kmem_size(void)
132{
133
134	return (kmem_size_val);
135}
136
137static int
138kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
139{
140	struct kmem_cache *cache = private;
141
142	return (cache->kc_constructor(mem, cache->kc_private, flags));
143}
144
145static void
146kmem_std_destructor(void *mem, int size __unused, void *private)
147{
148	struct kmem_cache *cache = private;
149
150	cache->kc_destructor(mem, cache->kc_private);
151}
152
153kmem_cache_t *
154kmem_cache_create(char *name, size_t bufsize, size_t align,
155    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
156    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags)
157{
158	kmem_cache_t *cache;
159
160	ASSERT(vmp == NULL);
161
162	cache = kmem_alloc(sizeof(*cache), KM_SLEEP);
163	strlcpy(cache->kc_name, name, sizeof(cache->kc_name));
164	cache->kc_constructor = constructor;
165	cache->kc_destructor = destructor;
166	cache->kc_private = private;
167#if defined(_KERNEL) && !defined(KMEM_DEBUG)
168	cache->kc_zone = uma_zcreate(cache->kc_name, bufsize,
169	    constructor != NULL ? kmem_std_constructor : NULL,
170	    destructor != NULL ? kmem_std_destructor : NULL,
171	    NULL, NULL, align > 0 ? align - 1 : 0, cflags);
172#else
173	cache->kc_size = bufsize;
174#endif
175
176	return (cache);
177}
178
179void
180kmem_cache_destroy(kmem_cache_t *cache)
181{
182#if defined(_KERNEL) && !defined(KMEM_DEBUG)
183	uma_zdestroy(cache->kc_zone);
184#endif
185	kmem_free(cache, sizeof(*cache));
186}
187
188void *
189kmem_cache_alloc(kmem_cache_t *cache, int flags)
190{
191#if defined(_KERNEL) && !defined(KMEM_DEBUG)
192	return (uma_zalloc_arg(cache->kc_zone, cache, flags));
193#else
194	void *p;
195
196	p = kmem_alloc(cache->kc_size, flags);
197	if (p != NULL && cache->kc_constructor != NULL)
198		kmem_std_constructor(p, cache->kc_size, cache, flags);
199	return (p);
200#endif
201}
202
203void
204kmem_cache_free(kmem_cache_t *cache, void *buf)
205{
206#if defined(_KERNEL) && !defined(KMEM_DEBUG)
207	uma_zfree_arg(cache->kc_zone, buf, cache);
208#else
209	if (cache->kc_destructor != NULL)
210		kmem_std_destructor(buf, cache->kc_size, cache);
211	kmem_free(buf, cache->kc_size);
212#endif
213}
214
215/*
216 * Allow our caller to determine if there are running reaps.
217 *
218 * This call is very conservative and may return B_TRUE even when
219 * reaping activity isn't active. If it returns B_FALSE, then reaping
220 * activity is definitely inactive.
221 */
222boolean_t
223kmem_cache_reap_active(void)
224{
225
226	return (B_FALSE);
227}
228
229/*
230 * Reap (almost) everything soon.
231 *
232 * Note: this does not wait for the reap-tasks to complete. Caller
233 * should use kmem_cache_reap_active() (above) and/or moderation to
234 * avoid scheduling too many reap-tasks.
235 */
236#ifdef _KERNEL
237void
238kmem_cache_reap_soon(kmem_cache_t *cache)
239{
240#ifndef KMEM_DEBUG
241	uma_zone_reclaim(cache->kc_zone, UMA_RECLAIM_DRAIN);
242#endif
243}
244
245void
246kmem_reap(void)
247{
248	uma_reclaim(UMA_RECLAIM_TRIM);
249}
250#else
251void
252kmem_cache_reap_soon(kmem_cache_t *cache __unused)
253{
254}
255
256void
257kmem_reap(void)
258{
259}
260#endif
261
262int
263kmem_debugging(void)
264{
265	return (0);
266}
267
268void *
269calloc(size_t n, size_t s)
270{
271	return (kmem_zalloc(n * s, KM_NOSLEEP));
272}
273
274#ifdef KMEM_DEBUG
275void kmem_show(void *);
276void
277kmem_show(void *dummy __unused)
278{
279	struct kmem_item *i;
280
281	mtx_lock(&kmem_items_mtx);
282	if (LIST_EMPTY(&kmem_items))
283		printf("KMEM_DEBUG: No leaked elements.\n");
284	else {
285		printf("KMEM_DEBUG: Leaked elements:\n\n");
286		LIST_FOREACH(i, &kmem_items, next) {
287			printf("address=%p\n", i);
288			stack_print_ddb(&i->stack);
289			printf("\n");
290		}
291	}
292	mtx_unlock(&kmem_items_mtx);
293}
294
295SYSUNINIT(sol_kmem, SI_SUB_CPU, SI_ORDER_FIRST, kmem_show, NULL);
296#endif	/* KMEM_DEBUG */
297