1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
28 * Copyright (c) 2017 by Delphix. All rights reserved.
29 * Copyright 2018, Joyent, Inc.
30 */
31
32/*
33 * Kernel task queues: general-purpose asynchronous task scheduling.
34 *
35 * A common problem in kernel programming is the need to schedule tasks
36 * to be performed later, by another thread. There are several reasons
37 * you may want or need to do this:
38 *
39 * (1) The task isn't time-critical, but your current code path is.
40 *
41 * (2) The task may require grabbing locks that you already hold.
42 *
43 * (3) The task may need to block (e.g. to wait for memory), but you
44 *     cannot block in your current context.
45 *
46 * (4) Your code path can't complete because of some condition, but you can't
47 *     sleep or fail, so you queue the task for later execution when condition
48 *     disappears.
49 *
50 * (5) You just want a simple way to launch multiple tasks in parallel.
51 *
52 * Task queues provide such a facility. In its simplest form (used when
53 * performance is not a critical consideration) a task queue consists of a
54 * single list of tasks, together with one or more threads to service the
55 * list. There are some cases when this simple queue is not sufficient:
56 *
57 * (1) The task queues are very hot and there is a need to avoid data and lock
58 *	contention over global resources.
59 *
60 * (2) Some tasks may depend on other tasks to complete, so they can't be put in
61 *	the same list managed by the same thread.
62 *
63 * (3) Some tasks may block for a long time, and this should not block other
64 *	tasks in the queue.
65 *
66 * To provide useful service in such cases we define a "dynamic task queue"
67 * which has an individual thread for each of the tasks. These threads are
68 * dynamically created as they are needed and destroyed when they are not in
69 * use. The API for managing task pools is the same as for managing task queues
70 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
71 * dynamic task pool behavior is desired.
72 *
73 * Dynamic task queues may also place tasks in the normal queue (called "backing
74 * queue") when task pool runs out of resources. Users of task queues may
75 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
76 * flags.
77 *
78 * The backing task queue is also used for scheduling internal tasks needed for
79 * dynamic task queue maintenance.
80 *
81 * INTERFACES ==================================================================
82 *
83 * taskq_t *taskq_create(name, nthreads, pri, minalloc, maxalloc, flags);
84 *
85 *	Create a taskq with specified properties.
86 *	Possible 'flags':
87 *
88 *	  TASKQ_DYNAMIC: Create task pool for task management. If this flag is
89 *		specified, 'nthreads' specifies the maximum number of threads in
90 *		the task queue. Task execution order for dynamic task queues is
91 *		not predictable.
92 *
93 *		If this flag is not specified (default case) a
94 *		single-list task queue is created with 'nthreads' threads
95 *		servicing it. Entries in this queue are managed by
96 *		taskq_ent_alloc() and taskq_ent_free() which try to keep the
97 *		task population between 'minalloc' and 'maxalloc', but the
98 *		latter limit is only advisory for TQ_SLEEP dispatches and the
99 *		former limit is only advisory for TQ_NOALLOC dispatches. If
100 *		TASKQ_PREPOPULATE is set in 'flags', the taskq will be
101 *		prepopulated with 'minalloc' task structures.
102 *
103 *		Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
104 *		executed in the order they are scheduled if nthreads == 1.
105 *		If nthreads > 1, task execution order is not predictable.
106 *
107 *	  TASKQ_PREPOPULATE: Prepopulate task queue with threads.
108 *		Also prepopulate the task queue with 'minalloc' task structures.
109 *
110 *	  TASKQ_THREADS_CPU_PCT: This flag specifies that 'nthreads' should be
111 *		interpreted as a percentage of the # of online CPUs on the
112 *		system.  The taskq subsystem will automatically adjust the
113 *		number of threads in the taskq in response to CPU online
114 *		and offline events, to keep the ratio.  nthreads must be in
115 *		the range [0,100].
116 *
117 *		The calculation used is:
118 *
119 *			MAX((ncpus_online * percentage)/100, 1)
120 *
121 *		This flag is not supported for DYNAMIC task queues.
122 *		This flag is not compatible with TASKQ_CPR_SAFE.
123 *
124 *	  TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
125 *		use their own protocol for handling CPR issues. This flag is not
126 *		supported for DYNAMIC task queues.  This flag is not compatible
127 *		with TASKQ_THREADS_CPU_PCT.
128 *
129 *	The 'pri' field specifies the default priority for the threads that
130 *	service all scheduled tasks.
131 *
132 * taskq_t *taskq_create_instance(name, instance, nthreads, pri, minalloc,
133 *    maxalloc, flags);
134 *
135 *	Like taskq_create(), but takes an instance number (or -1 to indicate
136 *	no instance).
137 *
138 * taskq_t *taskq_create_proc(name, nthreads, pri, minalloc, maxalloc, proc,
139 *    flags);
140 *
141 *	Like taskq_create(), but creates the taskq threads in the specified
142 *	system process.  If proc != &p0, this must be called from a thread
143 *	in that process.
144 *
145 * taskq_t *taskq_create_sysdc(name, nthreads, minalloc, maxalloc, proc,
146 *    dc, flags);
147 *
148 *	Like taskq_create_proc(), but the taskq threads will use the
149 *	System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
150 *
151 * void taskq_destroy(tap):
152 *
153 *	Waits for any scheduled tasks to complete, then destroys the taskq.
154 *	Caller should guarantee that no new tasks are scheduled in the closing
155 *	taskq.
156 *
157 * taskqid_t taskq_dispatch(tq, func, arg, flags):
158 *
159 *	Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
160 *	the caller is willing to block for memory.  The function returns an
161 *	opaque value which is zero iff dispatch fails.  If flags is TQ_NOSLEEP
162 *	or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
163 *	and returns TASKQID_INVALID.
164 *
165 *	ASSUMES: func != NULL.
166 *
167 *	Possible flags:
168 *	  TQ_NOSLEEP: Do not wait for resources; may fail.
169 *
170 *	  TQ_NOALLOC: Do not allocate memory; may fail.  May only be used with
171 *		non-dynamic task queues.
172 *
173 *	  TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
174 *		lack of available resources and fail. If this flag is not
175 *		set, and the task pool is exhausted, the task may be scheduled
176 *		in the backing queue. This flag may ONLY be used with dynamic
177 *		task queues.
178 *
179 *		NOTE: This flag should always be used when a task queue is used
180 *		for tasks that may depend on each other for completion.
181 *		Enqueueing dependent tasks may create deadlocks.
182 *
183 *	  TQ_SLEEP:   May block waiting for resources. May still fail for
184 *		dynamic task queues if TQ_NOQUEUE is also specified, otherwise
185 *		always succeed.
186 *
187 *	  TQ_FRONT:   Puts the new task at the front of the queue.  Be careful.
188 *
189 *	NOTE: Dynamic task queues are much more likely to fail in
190 *		taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
191 *		is important to have backup strategies handling such failures.
192 *
193 * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
194 *
195 *	This is a light-weight form of taskq_dispatch(), that uses a
196 *	preallocated taskq_ent_t structure for scheduling.  As a
197 *	result, it does not perform allocations and cannot ever fail.
198 *	Note especially that it cannot be used with TASKQ_DYNAMIC
199 *	taskqs.  The memory for the tqent must not be modified or used
200 *	until the function (func) is called.  (However, func itself
201 *	may safely modify or free this memory, once it is called.)
202 *	Note that the taskq framework will NOT free this memory.
203 *
204 * boolean_t taskq_empty(tq)
205 *
206 *	Queries if there are tasks pending on the queue.
207 *
208 * void taskq_wait(tq):
209 *
210 *	Waits for all previously scheduled tasks to complete.
211 *
212 *	NOTE: It does not stop any new task dispatches.
213 *	      Do NOT call taskq_wait() from a task: it will cause deadlock.
214 *
215 * void taskq_suspend(tq)
216 *
217 *	Suspend all task execution. Tasks already scheduled for a dynamic task
218 *	queue will still be executed, but all new scheduled tasks will be
219 *	suspended until taskq_resume() is called.
220 *
221 * int  taskq_suspended(tq)
222 *
223 *	Returns 1 if taskq is suspended and 0 otherwise. It is intended to
224 *	ASSERT that the task queue is suspended.
225 *
226 * void taskq_resume(tq)
227 *
228 *	Resume task queue execution.
229 *
230 * int  taskq_member(tq, thread)
231 *
232 *	Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
233 *	intended use is to ASSERT that a given function is called in taskq
234 *	context only.
235 *
236 * system_taskq
237 *
238 *	Global system-wide dynamic task queue for common uses. It may be used by
239 *	any subsystem that needs to schedule tasks and does not need to manage
240 *	its own task queues. It is initialized quite early during system boot.
241 *
242 * IMPLEMENTATION ==============================================================
243 *
244 * This is schematic representation of the task queue structures.
245 *
246 *   taskq:
247 *   +-------------+
248 *   | tq_lock     | +---< taskq_ent_free()
249 *   +-------------+ |
250 *   |...          | | tqent:                  tqent:
251 *   +-------------+ | +------------+          +------------+
252 *   | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
253 *   +-------------+   +------------+          +------------+
254 *   |...          |   | ...        |          | ...        |
255 *   +-------------+   +------------+          +------------+
256 *   | tq_task     |    |
257 *   |             |    +-------------->taskq_ent_alloc()
258 * +--------------------------------------------------------------------------+
259 * | |                     |            tqent                   tqent         |
260 * | +---------------------+     +--> +------------+     +--> +------------+  |
261 * | | ...		   |     |    | func, arg  |     |    | func, arg  |  |
262 * +>+---------------------+ <---|-+  +------------+ <---|-+  +------------+  |
263 *   | tq_taskq.tqent_next | ----+ |  | tqent_next | --->+ |  | tqent_next |--+
264 *   +---------------------+	   |  +------------+     ^ |  +------------+
265 * +-| tq_task.tqent_prev  |	   +--| tqent_prev |     | +--| tqent_prev |  ^
266 * | +---------------------+	      +------------+     |    +------------+  |
267 * | |...		   |	      | ...        |     |    | ...        |  |
268 * | +---------------------+	      +------------+     |    +------------+  |
269 * |                                      ^              |                    |
270 * |                                      |              |                    |
271 * +--------------------------------------+--------------+       TQ_APPEND() -+
272 *   |             |                      |
273 *   |...          |   taskq_thread()-----+
274 *   +-------------+
275 *   | tq_buckets  |--+-------> [ NULL ] (for regular task queues)
276 *   +-------------+  |
277 *                    |   DYNAMIC TASK QUEUES:
278 *                    |
279 *                    +-> taskq_bucket[nCPU]		taskq_bucket_dispatch()
280 *                        +-------------------+                    ^
281 *                   +--->| tqbucket_lock     |                    |
282 *                   |    +-------------------+   +--------+      +--------+
283 *                   |    | tqbucket_freelist |-->| tqent  |-->...| tqent  | ^
284 *                   |    +-------------------+<--+--------+<--...+--------+ |
285 *                   |    | ...               |   | thread |      | thread | |
286 *                   |    +-------------------+   +--------+      +--------+ |
287 *                   |    +-------------------+                              |
288 * taskq_dispatch()--+--->| tqbucket_lock     |             TQ_APPEND()------+
289 *      TQ_HASH()    |    +-------------------+   +--------+      +--------+
290 *                   |    | tqbucket_freelist |-->| tqent  |-->...| tqent  |
291 *                   |    +-------------------+<--+--------+<--...+--------+
292 *                   |    | ...               |   | thread |      | thread |
293 *                   |    +-------------------+   +--------+      +--------+
294 *		     +--->	...
295 *
296 *
297 * Task queues use tq_task field to link new entry in the queue. The queue is a
298 * circular doubly-linked list. Entries are put in the end of the list with
299 * TQ_APPEND() and processed from the front of the list by taskq_thread() in
300 * FIFO order. Task queue entries are cached in the free list managed by
301 * taskq_ent_alloc() and taskq_ent_free() functions.
302 *
303 *	All threads used by task queues mark t_taskq field of the thread to
304 *	point to the task queue.
305 *
306 * Taskq Thread Management -----------------------------------------------------
307 *
308 * Taskq's non-dynamic threads are managed with several variables and flags:
309 *
310 *	* tq_nthreads	- The number of threads in taskq_thread() for the
311 *			  taskq.
312 *
313 *	* tq_active	- The number of threads not waiting on a CV in
314 *			  taskq_thread(); includes newly created threads
315 *			  not yet counted in tq_nthreads.
316 *
317 *	* tq_nthreads_target
318 *			- The number of threads desired for the taskq.
319 *
320 *	* tq_flags & TASKQ_CHANGING
321 *			- Indicates that tq_nthreads != tq_nthreads_target.
322 *
323 *	* tq_flags & TASKQ_THREAD_CREATED
324 *			- Indicates that a thread is being created in the taskq.
325 *
326 * During creation, tq_nthreads and tq_active are set to 0, and
327 * tq_nthreads_target is set to the number of threads desired.  The
328 * TASKQ_CHANGING flag is set, and taskq_thread_create() is called to
329 * create the first thread. taskq_thread_create() increments tq_active,
330 * sets TASKQ_THREAD_CREATED, and creates the new thread.
331 *
332 * Each thread starts in taskq_thread(), clears the TASKQ_THREAD_CREATED
333 * flag, and increments tq_nthreads.  It stores the new value of
334 * tq_nthreads as its "thread_id", and stores its thread pointer in the
335 * tq_threadlist at the (thread_id - 1).  We keep the thread_id space
336 * densely packed by requiring that only the largest thread_id can exit during
337 * normal adjustment.   The exception is during the destruction of the
338 * taskq; once tq_nthreads_target is set to zero, no new threads will be created
339 * for the taskq queue, so every thread can exit without any ordering being
340 * necessary.
341 *
342 * Threads will only process work if their thread id is <= tq_nthreads_target.
343 *
344 * When TASKQ_CHANGING is set, threads will check the current thread target
345 * whenever they wake up, and do whatever they can to apply its effects.
346 *
347 * TASKQ_THREAD_CPU_PCT --------------------------------------------------------
348 *
349 * When a taskq is created with TASKQ_THREAD_CPU_PCT, we store their requested
350 * percentage in tq_threads_ncpus_pct, start them off with the correct thread
351 * target, and add them to the taskq_cpupct_list for later adjustment.
352 *
353 * We register taskq_cpu_setup() to be called whenever a CPU changes state.  It
354 * walks the list of TASKQ_THREAD_CPU_PCT taskqs, adjusts their nthread_target
355 * if need be, and wakes up all of the threads to process the change.
356 *
357 * Dynamic Task Queues Implementation ------------------------------------------
358 *
359 * For a dynamic task queues there is a 1-to-1 mapping between a thread and
360 * taskq_ent_structure. Each entry is serviced by its own thread and each thread
361 * is controlled by a single entry.
362 *
363 * Entries are distributed over a set of buckets. To avoid using modulo
364 * arithmetics the number of buckets is 2^n and is determined as the nearest
365 * power of two roundown of the number of CPUs in the system. Tunable
366 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
367 * is attached to a bucket for its lifetime and can't migrate to other buckets.
368 *
369 * Entries that have scheduled tasks are not placed in any list. The dispatch
370 * function sets their "func" and "arg" fields and signals the corresponding
371 * thread to execute the task. Once the thread executes the task it clears the
372 * "func" field and places an entry on the bucket cache of free entries pointed
373 * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
374 * field equal to NULL. The free list is a circular doubly-linked list identical
375 * in structure to the tq_task list above, but entries are taken from it in LIFO
376 * order - the last freed entry is the first to be allocated. The
377 * taskq_bucket_dispatch() function gets the most recently used entry from the
378 * free list, sets its "func" and "arg" fields and signals a worker thread.
379 *
380 * After executing each task a per-entry thread taskq_d_thread() places its
381 * entry on the bucket free list and goes to a timed sleep. If it wakes up
382 * without getting new task it removes the entry from the free list and destroys
383 * itself. The thread sleep time is controlled by a tunable variable
384 * `taskq_thread_timeout'.
385 *
386 * There are various statistics kept in the bucket which allows for later
387 * analysis of taskq usage patterns. Also, a global copy of taskq creation and
388 * death statistics is kept in the global taskq data structure. Since thread
389 * creation and death happen rarely, updating such global data does not present
390 * a performance problem.
391 *
392 * NOTE: Threads are not bound to any CPU and there is absolutely no association
393 *       between the bucket and actual thread CPU, so buckets are used only to
394 *	 split resources and reduce resource contention. Having threads attached
395 *	 to the CPU denoted by a bucket may reduce number of times the job
396 *	 switches between CPUs.
397 *
398 *	 Current algorithm creates a thread whenever a bucket has no free
399 *	 entries. It would be nice to know how many threads are in the running
400 *	 state and don't create threads if all CPUs are busy with existing
401 *	 tasks, but it is unclear how such strategy can be implemented.
402 *
403 *	 Currently buckets are created statically as an array attached to task
404 *	 queue. On some system with nCPUs < max_ncpus it may waste system
405 *	 memory. One solution may be allocation of buckets when they are first
406 *	 touched, but it is not clear how useful it is.
407 *
408 * SUSPEND/RESUME implementation -----------------------------------------------
409 *
410 *	Before executing a task taskq_thread() (executing non-dynamic task
411 *	queues) obtains taskq's thread lock as a reader. The taskq_suspend()
412 *	function gets the same lock as a writer blocking all non-dynamic task
413 *	execution. The taskq_resume() function releases the lock allowing
414 *	taskq_thread to continue execution.
415 *
416 *	For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
417 *	taskq_suspend() function. After that taskq_bucket_dispatch() always
418 *	fails, so that taskq_dispatch() will either enqueue tasks for a
419 *	suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
420 *	flags.
421 *
422 *	NOTE: taskq_suspend() does not immediately block any tasks already
423 *	      scheduled for dynamic task queues. It only suspends new tasks
424 *	      scheduled after taskq_suspend() was called.
425 *
426 *	taskq_member() function works by comparing a thread t_taskq pointer with
427 *	the passed thread pointer.
428 *
429 * LOCKS and LOCK Hierarchy ----------------------------------------------------
430 *
431 *   There are three locks used in task queues:
432 *
433 *   1) The taskq_t's tq_lock, protecting global task queue state.
434 *
435 *   2) Each per-CPU bucket has a lock for bucket management.
436 *
437 *   3) The global taskq_cpupct_lock, which protects the list of
438 *      TASKQ_THREADS_CPU_PCT taskqs.
439 *
440 *   If both (1) and (2) are needed, tq_lock should be taken *after* the bucket
441 *   lock.
442 *
443 *   If both (1) and (3) are needed, tq_lock should be taken *after*
444 *   taskq_cpupct_lock.
445 *
446 * DEBUG FACILITIES ------------------------------------------------------------
447 *
448 * For DEBUG kernels it is possible to induce random failures to
449 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
450 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
451 * failures for dynamic and static task queues respectively.
452 *
453 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
454 *
455 * TUNABLES --------------------------------------------------------------------
456 *
457 *	system_taskq_size	- Size of the global system_taskq.
458 *				  This value is multiplied by nCPUs to determine
459 *				  actual size.
460 *				  Default value: 64
461 *
462 *	taskq_minimum_nthreads_max
463 *				- Minimum size of the thread list for a taskq.
464 *				  Useful for testing different thread pool
465 *				  sizes by overwriting tq_nthreads_target.
466 *
467 *	taskq_thread_timeout	- Maximum idle time for taskq_d_thread()
468 *				  Default value: 5 minutes
469 *
470 *	taskq_maxbuckets	- Maximum number of buckets in any task queue
471 *				  Default value: 128
472 *
473 *	taskq_search_depth	- Maximum # of buckets searched for a free entry
474 *				  Default value: 4
475 *
476 *	taskq_dmtbf		- Mean time between induced dispatch failures
477 *				  for dynamic task queues.
478 *				  Default value: UINT_MAX (no induced failures)
479 *
480 *	taskq_smtbf		- Mean time between induced dispatch failures
481 *				  for static task queues.
482 *				  Default value: UINT_MAX (no induced failures)
483 *
484 * CONDITIONAL compilation -----------------------------------------------------
485 *
486 *    TASKQ_STATISTIC	- If set will enable bucket statistic (default).
487 *
488 */
489
490#include <sys/taskq_impl.h>
491#include <sys/thread.h>
492#include <sys/proc.h>
493#include <sys/kmem.h>
494#include <sys/vmem.h>
495#include <sys/callb.h>
496#include <sys/class.h>
497#include <sys/systm.h>
498#include <sys/cmn_err.h>
499#include <sys/debug.h>
500#include <sys/vmsystm.h>	/* For throttlefree */
501#include <sys/sysmacros.h>
502#include <sys/cpuvar.h>
503#include <sys/cpupart.h>
504#include <sys/sdt.h>
505#include <sys/sysdc.h>
506#include <sys/note.h>
507
508static kmem_cache_t *taskq_ent_cache, *taskq_cache;
509
510/*
511 * Pseudo instance numbers for taskqs without explicitly provided instance.
512 */
513static vmem_t *taskq_id_arena;
514
515/* Global system task queue for common use */
516taskq_t	*system_taskq;
517
518/*
519 * Maximum number of entries in global system taskq is
520 *	system_taskq_size * max_ncpus
521 */
522#define	SYSTEM_TASKQ_SIZE 64
523int system_taskq_size = SYSTEM_TASKQ_SIZE;
524
525/*
526 * Minimum size for tq_nthreads_max; useful for those who want to play around
527 * with increasing a taskq's tq_nthreads_target.
528 */
529int taskq_minimum_nthreads_max = 1;
530
531/*
532 * We want to ensure that when taskq_create() returns, there is at least
533 * one thread ready to handle requests.  To guarantee this, we have to wait
534 * for the second thread, since the first one cannot process requests until
535 * the second thread has been created.
536 */
537#define	TASKQ_CREATE_ACTIVE_THREADS	2
538
539/* Maximum percentage allowed for TASKQ_THREADS_CPU_PCT */
540#define	TASKQ_CPUPCT_MAX_PERCENT	1000
541int taskq_cpupct_max_percent = TASKQ_CPUPCT_MAX_PERCENT;
542
543/*
544 * Dynamic task queue threads that don't get any work within
545 * taskq_thread_timeout destroy themselves
546 */
547#define	TASKQ_THREAD_TIMEOUT (60 * 5)
548int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT;
549
550#define	TASKQ_MAXBUCKETS 128
551int taskq_maxbuckets = TASKQ_MAXBUCKETS;
552
553/*
554 * When a bucket has no available entries another buckets are tried.
555 * taskq_search_depth parameter limits the amount of buckets that we search
556 * before failing. This is mostly useful in systems with many CPUs where we may
557 * spend too much time scanning busy buckets.
558 */
559#define	TASKQ_SEARCH_DEPTH 4
560int taskq_search_depth = TASKQ_SEARCH_DEPTH;
561
562/*
563 * Hashing function: mix various bits of x. May be pretty much anything.
564 */
565#define	TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
566
567/*
568 * We do not create any new threads when the system is low on memory and start
569 * throttling memory allocations. The following macro tries to estimate such
570 * condition.
571 */
572#define	ENOUGH_MEMORY() (freemem > throttlefree)
573
574/*
575 * Static functions.
576 */
577static taskq_t	*taskq_create_common(const char *, int, int, pri_t, int,
578    int, proc_t *, uint_t, uint_t);
579static void taskq_thread(void *);
580static void taskq_d_thread(taskq_ent_t *);
581static void taskq_bucket_extend(void *);
582static int  taskq_constructor(void *, void *, int);
583static void taskq_destructor(void *, void *);
584static int  taskq_ent_constructor(void *, void *, int);
585static void taskq_ent_destructor(void *, void *);
586static taskq_ent_t *taskq_ent_alloc(taskq_t *, int);
587static void taskq_ent_free(taskq_t *, taskq_ent_t *);
588static int taskq_ent_exists(taskq_t *, task_func_t, void *);
589static taskq_ent_t *taskq_bucket_dispatch(taskq_bucket_t *, task_func_t,
590    void *);
591
592/*
593 * Task queues kstats.
594 */
595struct taskq_kstat {
596	kstat_named_t	tq_pid;
597	kstat_named_t	tq_tasks;
598	kstat_named_t	tq_executed;
599	kstat_named_t	tq_maxtasks;
600	kstat_named_t	tq_totaltime;
601	kstat_named_t	tq_nalloc;
602	kstat_named_t	tq_nactive;
603	kstat_named_t	tq_pri;
604	kstat_named_t	tq_nthreads;
605	kstat_named_t	tq_nomem;
606} taskq_kstat = {
607	{ "pid",		KSTAT_DATA_UINT64 },
608	{ "tasks",		KSTAT_DATA_UINT64 },
609	{ "executed",		KSTAT_DATA_UINT64 },
610	{ "maxtasks",		KSTAT_DATA_UINT64 },
611	{ "totaltime",		KSTAT_DATA_UINT64 },
612	{ "nalloc",		KSTAT_DATA_UINT64 },
613	{ "nactive",		KSTAT_DATA_UINT64 },
614	{ "priority",		KSTAT_DATA_UINT64 },
615	{ "threads",		KSTAT_DATA_UINT64 },
616	{ "nomem",		KSTAT_DATA_UINT64 },
617};
618
619struct taskq_d_kstat {
620	kstat_named_t	tqd_pri;
621	kstat_named_t	tqd_btasks;
622	kstat_named_t	tqd_bexecuted;
623	kstat_named_t	tqd_bmaxtasks;
624	kstat_named_t	tqd_bnalloc;
625	kstat_named_t	tqd_bnactive;
626	kstat_named_t	tqd_btotaltime;
627	kstat_named_t	tqd_hits;
628	kstat_named_t	tqd_misses;
629	kstat_named_t	tqd_overflows;
630	kstat_named_t	tqd_tcreates;
631	kstat_named_t	tqd_tdeaths;
632	kstat_named_t	tqd_maxthreads;
633	kstat_named_t	tqd_nomem;
634	kstat_named_t	tqd_disptcreates;
635	kstat_named_t	tqd_totaltime;
636	kstat_named_t	tqd_nalloc;
637	kstat_named_t	tqd_nfree;
638} taskq_d_kstat = {
639	{ "priority",		KSTAT_DATA_UINT64 },
640	{ "btasks",		KSTAT_DATA_UINT64 },
641	{ "bexecuted",		KSTAT_DATA_UINT64 },
642	{ "bmaxtasks",		KSTAT_DATA_UINT64 },
643	{ "bnalloc",		KSTAT_DATA_UINT64 },
644	{ "bnactive",		KSTAT_DATA_UINT64 },
645	{ "btotaltime",		KSTAT_DATA_UINT64 },
646	{ "hits",		KSTAT_DATA_UINT64 },
647	{ "misses",		KSTAT_DATA_UINT64 },
648	{ "overflows",		KSTAT_DATA_UINT64 },
649	{ "tcreates",		KSTAT_DATA_UINT64 },
650	{ "tdeaths",		KSTAT_DATA_UINT64 },
651	{ "maxthreads",		KSTAT_DATA_UINT64 },
652	{ "nomem",		KSTAT_DATA_UINT64 },
653	{ "disptcreates",	KSTAT_DATA_UINT64 },
654	{ "totaltime",		KSTAT_DATA_UINT64 },
655	{ "nalloc",		KSTAT_DATA_UINT64 },
656	{ "nfree",		KSTAT_DATA_UINT64 },
657};
658
659static kmutex_t taskq_kstat_lock;
660static kmutex_t taskq_d_kstat_lock;
661static int taskq_kstat_update(kstat_t *, int);
662static int taskq_d_kstat_update(kstat_t *, int);
663
664/*
665 * List of all TASKQ_THREADS_CPU_PCT taskqs.
666 */
667static list_t taskq_cpupct_list;	/* protected by cpu_lock */
668
669/*
670 * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
671 */
672#define	TASKQ_STATISTIC 1
673
674#if TASKQ_STATISTIC
675#define	TQ_STAT(b, x)	b->tqbucket_stat.x++
676#else
677#define	TQ_STAT(b, x)
678#endif
679
680/*
681 * Random fault injection.
682 */
683uint_t taskq_random;
684uint_t taskq_dmtbf = UINT_MAX;    /* mean time between injected failures */
685uint_t taskq_smtbf = UINT_MAX;    /* mean time between injected failures */
686
687/*
688 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
689 *
690 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
691 * they could prepopulate the cache and make sure that they do not use more
692 * then minalloc entries.  So, fault injection in this case insures that
693 * either TASKQ_PREPOPULATE is not set or there are more entries allocated
694 * than is specified by minalloc.  TQ_NOALLOC dispatches are always allowed
695 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
696 * dispatches.
697 */
698#ifdef DEBUG
699#define	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)		\
700	taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
701	if ((flag & TQ_NOSLEEP) &&				\
702	    taskq_random < 1771875 / taskq_dmtbf) {		\
703		return (TASKQID_INVALID);			\
704	}
705
706#define	TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)		\
707	taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
708	if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) &&		\
709	    (!(tq->tq_flags & TASKQ_PREPOPULATE) ||		\
710	    (tq->tq_nalloc > tq->tq_minalloc)) &&		\
711	    (taskq_random < (1771875 / taskq_smtbf))) {		\
712		mutex_exit(&tq->tq_lock);			\
713		return (TASKQID_INVALID);			\
714	}
715#else
716#define	TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
717#define	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
718#endif
719
720#define	IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) &&	\
721	((l).tqent_prev == &(l)))
722
723/*
724 * Append `tqe' in the end of the doubly-linked list denoted by l.
725 */
726#define	TQ_APPEND(l, tqe) {					\
727	tqe->tqent_next = &l;					\
728	tqe->tqent_prev = l.tqent_prev;				\
729	tqe->tqent_next->tqent_prev = tqe;			\
730	tqe->tqent_prev->tqent_next = tqe;			\
731}
732/*
733 * Prepend 'tqe' to the beginning of l
734 */
735#define	TQ_PREPEND(l, tqe) {					\
736	tqe->tqent_next = l.tqent_next;				\
737	tqe->tqent_prev = &l;					\
738	tqe->tqent_next->tqent_prev = tqe;			\
739	tqe->tqent_prev->tqent_next = tqe;			\
740}
741
742/*
743 * Schedule a task specified by func and arg into the task queue entry tqe.
744 */
745#define	TQ_DO_ENQUEUE(tq, tqe, func, arg, front) {			\
746	ASSERT(MUTEX_HELD(&tq->tq_lock));				\
747	_NOTE(CONSTCOND)						\
748	if (front) {							\
749		TQ_PREPEND(tq->tq_task, tqe);				\
750	} else {							\
751		TQ_APPEND(tq->tq_task, tqe);				\
752	}								\
753	tqe->tqent_func = (func);					\
754	tqe->tqent_arg = (arg);						\
755	tq->tq_tasks++;							\
756	if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks)		\
757		tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed;	\
758	cv_signal(&tq->tq_dispatch_cv);					\
759	DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
760}
761
762#define	TQ_ENQUEUE(tq, tqe, func, arg)					\
763	TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
764
765#define	TQ_ENQUEUE_FRONT(tq, tqe, func, arg)				\
766	TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
767
768/*
769 * Do-nothing task which may be used to prepopulate thread caches.
770 */
771/*ARGSUSED*/
772void
773nulltask(void *unused)
774{
775}
776
777/*ARGSUSED*/
778static int
779taskq_constructor(void *buf, void *cdrarg, int kmflags)
780{
781	taskq_t *tq = buf;
782
783	bzero(tq, sizeof (taskq_t));
784
785	mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
786	rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
787	cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
788	cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
789	cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
790	cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
791
792	tq->tq_task.tqent_next = &tq->tq_task;
793	tq->tq_task.tqent_prev = &tq->tq_task;
794
795	return (0);
796}
797
798/*ARGSUSED*/
799static void
800taskq_destructor(void *buf, void *cdrarg)
801{
802	taskq_t *tq = buf;
803
804	ASSERT(tq->tq_nthreads == 0);
805	ASSERT(tq->tq_buckets == NULL);
806	ASSERT(tq->tq_tcreates == 0);
807	ASSERT(tq->tq_tdeaths == 0);
808
809	mutex_destroy(&tq->tq_lock);
810	rw_destroy(&tq->tq_threadlock);
811	cv_destroy(&tq->tq_dispatch_cv);
812	cv_destroy(&tq->tq_exit_cv);
813	cv_destroy(&tq->tq_wait_cv);
814	cv_destroy(&tq->tq_maxalloc_cv);
815}
816
817/*ARGSUSED*/
818static int
819taskq_ent_constructor(void *buf, void *cdrarg, int kmflags)
820{
821	taskq_ent_t *tqe = buf;
822
823	tqe->tqent_thread = NULL;
824	cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL);
825
826	return (0);
827}
828
829/*ARGSUSED*/
830static void
831taskq_ent_destructor(void *buf, void *cdrarg)
832{
833	taskq_ent_t *tqe = buf;
834
835	ASSERT(tqe->tqent_thread == NULL);
836	cv_destroy(&tqe->tqent_cv);
837}
838
839void
840taskq_init(void)
841{
842	taskq_ent_cache = kmem_cache_create("taskq_ent_cache",
843	    sizeof (taskq_ent_t), 0, taskq_ent_constructor,
844	    taskq_ent_destructor, NULL, NULL, NULL, 0);
845	taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t),
846	    0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0);
847	taskq_id_arena = vmem_create("taskq_id_arena",
848	    (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0,
849	    VM_SLEEP | VMC_IDENTIFIER);
850
851	list_create(&taskq_cpupct_list, sizeof (taskq_t),
852	    offsetof(taskq_t, tq_cpupct_link));
853}
854
855static void
856taskq_update_nthreads(taskq_t *tq, uint_t ncpus)
857{
858	uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct);
859
860	ASSERT(MUTEX_HELD(&cpu_lock));
861	ASSERT(MUTEX_HELD(&tq->tq_lock));
862
863	/* We must be going from non-zero to non-zero; no exiting. */
864	ASSERT3U(tq->tq_nthreads_target, !=, 0);
865	ASSERT3U(newtarget, !=, 0);
866
867	ASSERT3U(newtarget, <=, tq->tq_nthreads_max);
868	if (newtarget != tq->tq_nthreads_target) {
869		tq->tq_flags |= TASKQ_CHANGING;
870		tq->tq_nthreads_target = newtarget;
871		cv_broadcast(&tq->tq_dispatch_cv);
872		cv_broadcast(&tq->tq_exit_cv);
873	}
874}
875
876/* called during task queue creation */
877static void
878taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup)
879{
880	ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
881
882	mutex_enter(&cpu_lock);
883	mutex_enter(&tq->tq_lock);
884	tq->tq_cpupart = cpup->cp_id;
885	taskq_update_nthreads(tq, cpup->cp_ncpus);
886	mutex_exit(&tq->tq_lock);
887
888	list_insert_tail(&taskq_cpupct_list, tq);
889	mutex_exit(&cpu_lock);
890}
891
892static void
893taskq_cpupct_remove(taskq_t *tq)
894{
895	ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
896
897	mutex_enter(&cpu_lock);
898	list_remove(&taskq_cpupct_list, tq);
899	mutex_exit(&cpu_lock);
900}
901
902/*ARGSUSED*/
903static int
904taskq_cpu_setup(cpu_setup_t what, int id, void *arg)
905{
906	taskq_t *tq;
907	cpupart_t *cp = cpu[id]->cpu_part;
908	uint_t ncpus = cp->cp_ncpus;
909
910	ASSERT(MUTEX_HELD(&cpu_lock));
911	ASSERT(ncpus > 0);
912
913	switch (what) {
914	case CPU_OFF:
915	case CPU_CPUPART_OUT:
916		/* offlines are called *before* the cpu is offlined. */
917		if (ncpus > 1)
918			ncpus--;
919		break;
920
921	case CPU_ON:
922	case CPU_CPUPART_IN:
923		break;
924
925	default:
926		return (0);		/* doesn't affect cpu count */
927	}
928
929	for (tq = list_head(&taskq_cpupct_list); tq != NULL;
930	    tq = list_next(&taskq_cpupct_list, tq)) {
931
932		mutex_enter(&tq->tq_lock);
933		/*
934		 * If the taskq is part of the cpuset which is changing,
935		 * update its nthreads_target.
936		 */
937		if (tq->tq_cpupart == cp->cp_id) {
938			taskq_update_nthreads(tq, ncpus);
939		}
940		mutex_exit(&tq->tq_lock);
941	}
942	return (0);
943}
944
945void
946taskq_mp_init(void)
947{
948	mutex_enter(&cpu_lock);
949	register_cpu_setup_func(taskq_cpu_setup, NULL);
950	/*
951	 * Make sure we're up to date.  At this point in boot, there is only
952	 * one processor set, so we only have to update the current CPU.
953	 */
954	(void) taskq_cpu_setup(CPU_ON, CPU->cpu_id, NULL);
955	mutex_exit(&cpu_lock);
956}
957
958/*
959 * Create global system dynamic task queue.
960 */
961void
962system_taskq_init(void)
963{
964	system_taskq = taskq_create_common("system_taskq", 0,
965	    system_taskq_size * max_ncpus, minclsyspri, 4, 512, &p0, 0,
966	    TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
967}
968
969/*
970 * taskq_ent_alloc()
971 *
972 * Allocates a new taskq_ent_t structure either from the free list or from the
973 * cache. Returns NULL if it can't be allocated.
974 *
975 * Assumes: tq->tq_lock is held.
976 */
977static taskq_ent_t *
978taskq_ent_alloc(taskq_t *tq, int flags)
979{
980	int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
981	taskq_ent_t *tqe;
982	clock_t wait_time;
983	clock_t	wait_rv;
984
985	ASSERT(MUTEX_HELD(&tq->tq_lock));
986
987	/*
988	 * TQ_NOALLOC allocations are allowed to use the freelist, even if
989	 * we are below tq_minalloc.
990	 */
991again:	if ((tqe = tq->tq_freelist) != NULL &&
992	    ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
993		tq->tq_freelist = tqe->tqent_next;
994	} else {
995		if (flags & TQ_NOALLOC)
996			return (NULL);
997
998		if (tq->tq_nalloc >= tq->tq_maxalloc) {
999			if (kmflags & KM_NOSLEEP)
1000				return (NULL);
1001
1002			/*
1003			 * We don't want to exceed tq_maxalloc, but we can't
1004			 * wait for other tasks to complete (and thus free up
1005			 * task structures) without risking deadlock with
1006			 * the caller.  So, we just delay for one second
1007			 * to throttle the allocation rate. If we have tasks
1008			 * complete before one second timeout expires then
1009			 * taskq_ent_free will signal us and we will
1010			 * immediately retry the allocation (reap free).
1011			 */
1012			wait_time = ddi_get_lbolt() + hz;
1013			while (tq->tq_freelist == NULL) {
1014				tq->tq_maxalloc_wait++;
1015				wait_rv = cv_timedwait(&tq->tq_maxalloc_cv,
1016				    &tq->tq_lock, wait_time);
1017				tq->tq_maxalloc_wait--;
1018				if (wait_rv == -1)
1019					break;
1020			}
1021			if (tq->tq_freelist)
1022				goto again;		/* reap freelist */
1023
1024		}
1025		mutex_exit(&tq->tq_lock);
1026
1027		tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
1028
1029		mutex_enter(&tq->tq_lock);
1030		if (tqe != NULL)
1031			tq->tq_nalloc++;
1032	}
1033	return (tqe);
1034}
1035
1036/*
1037 * taskq_ent_free()
1038 *
1039 * Free taskq_ent_t structure by either putting it on the free list or freeing
1040 * it to the cache.
1041 *
1042 * Assumes: tq->tq_lock is held.
1043 */
1044static void
1045taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
1046{
1047	ASSERT(MUTEX_HELD(&tq->tq_lock));
1048
1049	if (tq->tq_nalloc <= tq->tq_minalloc) {
1050		tqe->tqent_next = tq->tq_freelist;
1051		tq->tq_freelist = tqe;
1052	} else {
1053		tq->tq_nalloc--;
1054		mutex_exit(&tq->tq_lock);
1055		kmem_cache_free(taskq_ent_cache, tqe);
1056		mutex_enter(&tq->tq_lock);
1057	}
1058
1059	if (tq->tq_maxalloc_wait)
1060		cv_signal(&tq->tq_maxalloc_cv);
1061}
1062
1063/*
1064 * taskq_ent_exists()
1065 *
1066 * Return 1 if taskq already has entry for calling 'func(arg)'.
1067 *
1068 * Assumes: tq->tq_lock is held.
1069 */
1070static int
1071taskq_ent_exists(taskq_t *tq, task_func_t func, void *arg)
1072{
1073	taskq_ent_t	*tqe;
1074
1075	ASSERT(MUTEX_HELD(&tq->tq_lock));
1076
1077	for (tqe = tq->tq_task.tqent_next; tqe != &tq->tq_task;
1078	    tqe = tqe->tqent_next)
1079		if ((tqe->tqent_func == func) && (tqe->tqent_arg == arg))
1080			return (1);
1081	return (0);
1082}
1083
1084/*
1085 * Dispatch a task "func(arg)" to a free entry of bucket b.
1086 *
1087 * Assumes: no bucket locks is held.
1088 *
1089 * Returns: a pointer to an entry if dispatch was successful.
1090 *	    NULL if there are no free entries or if the bucket is suspended.
1091 */
1092static taskq_ent_t *
1093taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg)
1094{
1095	taskq_ent_t *tqe;
1096
1097	ASSERT(MUTEX_NOT_HELD(&b->tqbucket_lock));
1098	ASSERT(func != NULL);
1099
1100	mutex_enter(&b->tqbucket_lock);
1101
1102	ASSERT(b->tqbucket_nfree != 0 || IS_EMPTY(b->tqbucket_freelist));
1103	ASSERT(b->tqbucket_nfree == 0 || !IS_EMPTY(b->tqbucket_freelist));
1104
1105	/*
1106	 * Get en entry from the freelist if there is one.
1107	 * Schedule task into the entry.
1108	 */
1109	if ((b->tqbucket_nfree != 0) &&
1110	    !(b->tqbucket_flags & TQBUCKET_SUSPEND)) {
1111		tqe = b->tqbucket_freelist.tqent_prev;
1112
1113		ASSERT(tqe != &b->tqbucket_freelist);
1114		ASSERT(tqe->tqent_thread != NULL);
1115
1116		tqe->tqent_prev->tqent_next = tqe->tqent_next;
1117		tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1118		b->tqbucket_nalloc++;
1119		b->tqbucket_nfree--;
1120		tqe->tqent_func = func;
1121		tqe->tqent_arg = arg;
1122		TQ_STAT(b, tqs_hits);
1123		cv_signal(&tqe->tqent_cv);
1124		DTRACE_PROBE2(taskq__d__enqueue, taskq_bucket_t *, b,
1125		    taskq_ent_t *, tqe);
1126	} else {
1127		tqe = NULL;
1128		TQ_STAT(b, tqs_misses);
1129	}
1130	mutex_exit(&b->tqbucket_lock);
1131	return (tqe);
1132}
1133
1134/*
1135 * Dispatch a task.
1136 *
1137 * Assumes: func != NULL
1138 *
1139 * Returns: NULL if dispatch failed.
1140 *	    non-NULL if task dispatched successfully.
1141 *	    Actual return value is the pointer to taskq entry that was used to
1142 *	    dispatch a task. This is useful for debugging.
1143 */
1144taskqid_t
1145taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
1146{
1147	taskq_bucket_t *bucket = NULL;	/* Which bucket needs extension */
1148	taskq_ent_t *tqe = NULL;
1149	taskq_ent_t *tqe1;
1150	uint_t bsize;
1151
1152	ASSERT(tq != NULL);
1153	ASSERT(func != NULL);
1154
1155	if (!(tq->tq_flags & TASKQ_DYNAMIC)) {
1156		/*
1157		 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
1158		 */
1159		ASSERT(!(flags & TQ_NOQUEUE));
1160		/*
1161		 * Enqueue the task to the underlying queue.
1162		 */
1163		mutex_enter(&tq->tq_lock);
1164
1165		TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
1166
1167		if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
1168			tq->tq_nomem++;
1169			mutex_exit(&tq->tq_lock);
1170			return ((taskqid_t)tqe);
1171		}
1172		/* Make sure we start without any flags */
1173		tqe->tqent_un.tqent_flags = 0;
1174
1175		if (flags & TQ_FRONT) {
1176			TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1177		} else {
1178			TQ_ENQUEUE(tq, tqe, func, arg);
1179		}
1180		mutex_exit(&tq->tq_lock);
1181		return ((taskqid_t)tqe);
1182	}
1183
1184	/*
1185	 * Dynamic taskq dispatching.
1186	 */
1187	ASSERT(!(flags & (TQ_NOALLOC | TQ_FRONT)));
1188	TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags);
1189
1190	bsize = tq->tq_nbuckets;
1191
1192	if (bsize == 1) {
1193		/*
1194		 * In a single-CPU case there is only one bucket, so get
1195		 * entry directly from there.
1196		 */
1197		if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg))
1198		    != NULL)
1199			return ((taskqid_t)tqe);	/* Fastpath */
1200		bucket = tq->tq_buckets;
1201	} else {
1202		int loopcount;
1203		taskq_bucket_t *b;
1204		uintptr_t h = ((uintptr_t)CPU + (uintptr_t)arg) >> 3;
1205
1206		h = TQ_HASH(h);
1207
1208		/*
1209		 * The 'bucket' points to the original bucket that we hit. If we
1210		 * can't allocate from it, we search other buckets, but only
1211		 * extend this one.
1212		 */
1213		b = &tq->tq_buckets[h & (bsize - 1)];
1214		ASSERT(b->tqbucket_taskq == tq);	/* Sanity check */
1215
1216		/*
1217		 * Do a quick check before grabbing the lock. If the bucket does
1218		 * not have free entries now, chances are very small that it
1219		 * will after we take the lock, so we just skip it.
1220		 */
1221		if (b->tqbucket_nfree != 0) {
1222			if ((tqe = taskq_bucket_dispatch(b, func, arg)) != NULL)
1223				return ((taskqid_t)tqe);	/* Fastpath */
1224		} else {
1225			TQ_STAT(b, tqs_misses);
1226		}
1227
1228		bucket = b;
1229		loopcount = MIN(taskq_search_depth, bsize);
1230		/*
1231		 * If bucket dispatch failed, search loopcount number of buckets
1232		 * before we give up and fail.
1233		 */
1234		do {
1235			b = &tq->tq_buckets[++h & (bsize - 1)];
1236			ASSERT(b->tqbucket_taskq == tq);  /* Sanity check */
1237			loopcount--;
1238
1239			if (b->tqbucket_nfree != 0) {
1240				tqe = taskq_bucket_dispatch(b, func, arg);
1241			} else {
1242				TQ_STAT(b, tqs_misses);
1243			}
1244		} while ((tqe == NULL) && (loopcount > 0));
1245	}
1246
1247	/*
1248	 * At this point we either scheduled a task and (tqe != NULL) or failed
1249	 * (tqe == NULL). Try to recover from fails.
1250	 */
1251
1252	/*
1253	 * For KM_SLEEP dispatches, try to extend the bucket and retry dispatch.
1254	 */
1255	if ((tqe == NULL) && !(flags & TQ_NOSLEEP)) {
1256		/*
1257		 * taskq_bucket_extend() may fail to do anything, but this is
1258		 * fine - we deal with it later. If the bucket was successfully
1259		 * extended, there is a good chance that taskq_bucket_dispatch()
1260		 * will get this new entry, unless someone is racing with us and
1261		 * stealing the new entry from under our nose.
1262		 * taskq_bucket_extend() may sleep.
1263		 */
1264		taskq_bucket_extend(bucket);
1265		TQ_STAT(bucket, tqs_disptcreates);
1266		if ((tqe = taskq_bucket_dispatch(bucket, func, arg)) != NULL)
1267			return ((taskqid_t)tqe);
1268	}
1269
1270	ASSERT(bucket != NULL);
1271
1272	/*
1273	 * Since there are not enough free entries in the bucket, add a
1274	 * taskq entry to extend it in the background using backing queue
1275	 * (unless we already have a taskq entry to perform that extension).
1276	 */
1277	mutex_enter(&tq->tq_lock);
1278	if (!taskq_ent_exists(tq, taskq_bucket_extend, bucket)) {
1279		if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) {
1280			TQ_ENQUEUE_FRONT(tq, tqe1, taskq_bucket_extend, bucket);
1281		} else {
1282			tq->tq_nomem++;
1283		}
1284	}
1285
1286	/*
1287	 * Dispatch failed and we can't find an entry to schedule a task.
1288	 * Revert to the backing queue unless TQ_NOQUEUE was asked.
1289	 */
1290	if ((tqe == NULL) && !(flags & TQ_NOQUEUE)) {
1291		if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) {
1292			TQ_ENQUEUE(tq, tqe, func, arg);
1293		} else {
1294			tq->tq_nomem++;
1295		}
1296	}
1297	mutex_exit(&tq->tq_lock);
1298
1299	return ((taskqid_t)tqe);
1300}
1301
1302void
1303taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
1304    taskq_ent_t *tqe)
1305{
1306	ASSERT(func != NULL);
1307	ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1308
1309	/*
1310	 * Mark it as a prealloc'd task.  This is important
1311	 * to ensure that we don't free it later.
1312	 */
1313	tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC;
1314	/*
1315	 * Enqueue the task to the underlying queue.
1316	 */
1317	mutex_enter(&tq->tq_lock);
1318
1319	if (flags & TQ_FRONT) {
1320		TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1321	} else {
1322		TQ_ENQUEUE(tq, tqe, func, arg);
1323	}
1324	mutex_exit(&tq->tq_lock);
1325}
1326
1327/*
1328 * Allow our caller to ask if there are tasks pending on the queue.
1329 */
1330boolean_t
1331taskq_empty(taskq_t *tq)
1332{
1333	boolean_t rv;
1334
1335	ASSERT3P(tq, !=, curthread->t_taskq);
1336	mutex_enter(&tq->tq_lock);
1337	rv = (tq->tq_task.tqent_next == &tq->tq_task) && (tq->tq_active == 0);
1338	mutex_exit(&tq->tq_lock);
1339
1340	return (rv);
1341}
1342
1343/*
1344 * Wait for all pending tasks to complete.
1345 * Calling taskq_wait from a task will cause deadlock.
1346 */
1347void
1348taskq_wait(taskq_t *tq)
1349{
1350	ASSERT(tq != curthread->t_taskq);
1351
1352	mutex_enter(&tq->tq_lock);
1353	while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1354		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1355	mutex_exit(&tq->tq_lock);
1356
1357	if (tq->tq_flags & TASKQ_DYNAMIC) {
1358		taskq_bucket_t *b = tq->tq_buckets;
1359		int bid = 0;
1360		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1361			mutex_enter(&b->tqbucket_lock);
1362			while (b->tqbucket_nalloc > 0)
1363				cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
1364			mutex_exit(&b->tqbucket_lock);
1365		}
1366	}
1367}
1368
1369void
1370taskq_wait_id(taskq_t *tq, taskqid_t id __unused)
1371{
1372	taskq_wait(tq);
1373}
1374
1375/*
1376 * Suspend execution of tasks.
1377 *
1378 * Tasks in the queue part will be suspended immediately upon return from this
1379 * function. Pending tasks in the dynamic part will continue to execute, but all
1380 * new tasks will  be suspended.
1381 */
1382void
1383taskq_suspend(taskq_t *tq)
1384{
1385	rw_enter(&tq->tq_threadlock, RW_WRITER);
1386
1387	if (tq->tq_flags & TASKQ_DYNAMIC) {
1388		taskq_bucket_t *b = tq->tq_buckets;
1389		int bid = 0;
1390		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1391			mutex_enter(&b->tqbucket_lock);
1392			b->tqbucket_flags |= TQBUCKET_SUSPEND;
1393			mutex_exit(&b->tqbucket_lock);
1394		}
1395	}
1396	/*
1397	 * Mark task queue as being suspended. Needed for taskq_suspended().
1398	 */
1399	mutex_enter(&tq->tq_lock);
1400	ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
1401	tq->tq_flags |= TASKQ_SUSPENDED;
1402	mutex_exit(&tq->tq_lock);
1403}
1404
1405/*
1406 * returns: 1 if tq is suspended, 0 otherwise.
1407 */
1408int
1409taskq_suspended(taskq_t *tq)
1410{
1411	return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
1412}
1413
1414/*
1415 * Resume taskq execution.
1416 */
1417void
1418taskq_resume(taskq_t *tq)
1419{
1420	ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
1421
1422	if (tq->tq_flags & TASKQ_DYNAMIC) {
1423		taskq_bucket_t *b = tq->tq_buckets;
1424		int bid = 0;
1425		for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1426			mutex_enter(&b->tqbucket_lock);
1427			b->tqbucket_flags &= ~TQBUCKET_SUSPEND;
1428			mutex_exit(&b->tqbucket_lock);
1429		}
1430	}
1431	mutex_enter(&tq->tq_lock);
1432	ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
1433	tq->tq_flags &= ~TASKQ_SUSPENDED;
1434	mutex_exit(&tq->tq_lock);
1435
1436	rw_exit(&tq->tq_threadlock);
1437}
1438
1439int
1440taskq_member(taskq_t *tq, kthread_t *thread)
1441{
1442	return (thread->t_taskq == tq);
1443}
1444
1445/*
1446 * Creates a thread in the taskq.  We only allow one outstanding create at
1447 * a time.  We drop and reacquire the tq_lock in order to avoid blocking other
1448 * taskq activity while thread_create() or lwp_kernel_create() run.
1449 *
1450 * The first time we're called, we do some additional setup, and do not
1451 * return until there are enough threads to start servicing requests.
1452 */
1453static void
1454taskq_thread_create(taskq_t *tq)
1455{
1456	kthread_t	*t;
1457	const boolean_t	first = (tq->tq_nthreads == 0);
1458
1459	ASSERT(MUTEX_HELD(&tq->tq_lock));
1460	ASSERT(tq->tq_flags & TASKQ_CHANGING);
1461	ASSERT(tq->tq_nthreads < tq->tq_nthreads_target);
1462	ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED));
1463
1464
1465	tq->tq_flags |= TASKQ_THREAD_CREATED;
1466	tq->tq_active++;
1467	mutex_exit(&tq->tq_lock);
1468
1469	/*
1470	 * With TASKQ_DUTY_CYCLE the new thread must have an LWP
1471	 * as explained in ../disp/sysdc.c (for the msacct data).
1472	 * Otherwise simple kthreads are preferred.
1473	 */
1474	if ((tq->tq_flags & TASKQ_DUTY_CYCLE) != 0) {
1475		/* Enforced in taskq_create_common */
1476		ASSERT3P(tq->tq_proc, !=, &p0);
1477		t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN,
1478		    tq->tq_pri);
1479	} else {
1480		t = thread_create(NULL, 0, taskq_thread, tq, 0, tq->tq_proc,
1481		    TS_RUN, tq->tq_pri);
1482	}
1483
1484	if (!first) {
1485		mutex_enter(&tq->tq_lock);
1486		return;
1487	}
1488
1489	/*
1490	 * We know the thread cannot go away, since tq cannot be
1491	 * destroyed until creation has completed.  We can therefore
1492	 * safely dereference t.
1493	 */
1494	if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1495		taskq_cpupct_install(tq, t->t_cpupart);
1496	}
1497	mutex_enter(&tq->tq_lock);
1498
1499	/* Wait until we can service requests. */
1500	while (tq->tq_nthreads != tq->tq_nthreads_target &&
1501	    tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) {
1502		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1503	}
1504}
1505
1506/*
1507 * Common "sleep taskq thread" function, which handles CPR stuff, as well
1508 * as giving a nice common point for debuggers to find inactive threads.
1509 */
1510static clock_t
1511taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
1512    callb_cpr_t *cprinfo, clock_t timeout)
1513{
1514	clock_t ret = 0;
1515
1516	if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1517		CALLB_CPR_SAFE_BEGIN(cprinfo);
1518	}
1519	if (timeout < 0)
1520		cv_wait(cv, mx);
1521	else
1522		ret = cv_reltimedwait(cv, mx, timeout, TR_CLOCK_TICK);
1523
1524	if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1525		CALLB_CPR_SAFE_END(cprinfo, mx);
1526	}
1527
1528	return (ret);
1529}
1530
1531/*
1532 * Worker thread for processing task queue.
1533 */
1534static void
1535taskq_thread(void *arg)
1536{
1537	int thread_id;
1538
1539	taskq_t *tq = arg;
1540	taskq_ent_t *tqe;
1541	callb_cpr_t cprinfo;
1542	hrtime_t start, end;
1543	boolean_t freeit;
1544
1545	curthread->t_taskq = tq;	/* mark ourselves for taskq_member() */
1546
1547	if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) {
1548		sysdc_thread_enter(curthread, tq->tq_DC,
1549		    (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0);
1550	}
1551
1552	if (tq->tq_flags & TASKQ_CPR_SAFE) {
1553		CALLB_CPR_INIT_SAFE(curthread, tq->tq_name);
1554	} else {
1555		CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr,
1556		    tq->tq_name);
1557	}
1558	mutex_enter(&tq->tq_lock);
1559	thread_id = ++tq->tq_nthreads;
1560	ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED);
1561	ASSERT(tq->tq_flags & TASKQ_CHANGING);
1562	tq->tq_flags &= ~TASKQ_THREAD_CREATED;
1563
1564	VERIFY3S(thread_id, <=, tq->tq_nthreads_max);
1565
1566	if (tq->tq_nthreads_max == 1)
1567		tq->tq_thread = curthread;
1568	else
1569		tq->tq_threadlist[thread_id - 1] = curthread;
1570
1571	/* Allow taskq_create_common()'s taskq_thread_create() to return. */
1572	if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS)
1573		cv_broadcast(&tq->tq_wait_cv);
1574
1575	for (;;) {
1576		if (tq->tq_flags & TASKQ_CHANGING) {
1577			/* See if we're no longer needed */
1578			if (thread_id > tq->tq_nthreads_target) {
1579				/*
1580				 * To preserve the one-to-one mapping between
1581				 * thread_id and thread, we must exit from
1582				 * highest thread ID to least.
1583				 *
1584				 * However, if everyone is exiting, the order
1585				 * doesn't matter, so just exit immediately.
1586				 * (this is safe, since you must wait for
1587				 * nthreads to reach 0 after setting
1588				 * tq_nthreads_target to 0)
1589				 */
1590				if (thread_id == tq->tq_nthreads ||
1591				    tq->tq_nthreads_target == 0)
1592					break;
1593
1594				/* Wait for higher thread_ids to exit */
1595				(void) taskq_thread_wait(tq, &tq->tq_lock,
1596				    &tq->tq_exit_cv, &cprinfo, -1);
1597				continue;
1598			}
1599
1600			/*
1601			 * If no thread is starting taskq_thread(), we can
1602			 * do some bookkeeping.
1603			 */
1604			if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) {
1605				/* Check if we've reached our target */
1606				if (tq->tq_nthreads == tq->tq_nthreads_target) {
1607					tq->tq_flags &= ~TASKQ_CHANGING;
1608					cv_broadcast(&tq->tq_wait_cv);
1609				}
1610				/* Check if we need to create a thread */
1611				if (tq->tq_nthreads < tq->tq_nthreads_target) {
1612					taskq_thread_create(tq);
1613					continue; /* tq_lock was dropped */
1614				}
1615			}
1616		}
1617		if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
1618			if (--tq->tq_active == 0)
1619				cv_broadcast(&tq->tq_wait_cv);
1620			(void) taskq_thread_wait(tq, &tq->tq_lock,
1621			    &tq->tq_dispatch_cv, &cprinfo, -1);
1622			tq->tq_active++;
1623			continue;
1624		}
1625
1626		tqe->tqent_prev->tqent_next = tqe->tqent_next;
1627		tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1628		mutex_exit(&tq->tq_lock);
1629
1630		/*
1631		 * For prealloc'd tasks, we don't free anything.  We
1632		 * have to check this now, because once we call the
1633		 * function for a prealloc'd taskq, we can't touch the
1634		 * tqent any longer (calling the function returns the
1635		 * ownershp of the tqent back to caller of
1636		 * taskq_dispatch.)
1637		 */
1638		if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
1639		    (tqe->tqent_un.tqent_flags & TQENT_FLAG_PREALLOC)) {
1640			/* clear pointers to assist assertion checks */
1641			tqe->tqent_next = tqe->tqent_prev = NULL;
1642			freeit = B_FALSE;
1643		} else {
1644			freeit = B_TRUE;
1645		}
1646
1647		rw_enter(&tq->tq_threadlock, RW_READER);
1648		start = gethrtime();
1649		DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
1650		    taskq_ent_t *, tqe);
1651		tqe->tqent_func(tqe->tqent_arg);
1652		DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
1653		    taskq_ent_t *, tqe);
1654		end = gethrtime();
1655		rw_exit(&tq->tq_threadlock);
1656
1657		mutex_enter(&tq->tq_lock);
1658		tq->tq_totaltime += end - start;
1659		tq->tq_executed++;
1660
1661		if (freeit)
1662			taskq_ent_free(tq, tqe);
1663	}
1664
1665	if (tq->tq_nthreads_max == 1)
1666		tq->tq_thread = NULL;
1667	else
1668		tq->tq_threadlist[thread_id - 1] = NULL;
1669
1670	/* We're exiting, and therefore no longer active */
1671	ASSERT(tq->tq_active > 0);
1672	tq->tq_active--;
1673
1674	ASSERT(tq->tq_nthreads > 0);
1675	tq->tq_nthreads--;
1676
1677	/* Wake up anyone waiting for us to exit */
1678	cv_broadcast(&tq->tq_exit_cv);
1679	if (tq->tq_nthreads == tq->tq_nthreads_target) {
1680		if (!(tq->tq_flags & TASKQ_THREAD_CREATED))
1681			tq->tq_flags &= ~TASKQ_CHANGING;
1682
1683		cv_broadcast(&tq->tq_wait_cv);
1684	}
1685
1686	ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
1687	CALLB_CPR_EXIT(&cprinfo);		/* drops tq->tq_lock */
1688	if (curthread->t_lwp != NULL) {
1689		mutex_enter(&curproc->p_lock);
1690		lwp_exit();
1691	} else {
1692		thread_exit();
1693	}
1694}
1695
1696/*
1697 * Worker per-entry thread for dynamic dispatches.
1698 */
1699static void
1700taskq_d_thread(taskq_ent_t *tqe)
1701{
1702	taskq_bucket_t	*bucket = tqe->tqent_un.tqent_bucket;
1703	taskq_t		*tq = bucket->tqbucket_taskq;
1704	kmutex_t	*lock = &bucket->tqbucket_lock;
1705	kcondvar_t	*cv = &tqe->tqent_cv;
1706	callb_cpr_t	cprinfo;
1707	clock_t		w = 0;
1708
1709	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name);
1710
1711	mutex_enter(lock);
1712
1713	for (;;) {
1714		/*
1715		 * If a task is scheduled (func != NULL), execute it, otherwise
1716		 * sleep, waiting for a job.
1717		 */
1718		if (tqe->tqent_func != NULL) {
1719			hrtime_t	start;
1720			hrtime_t	end;
1721
1722			ASSERT(bucket->tqbucket_nalloc > 0);
1723
1724			/*
1725			 * It is possible to free the entry right away before
1726			 * actually executing the task so that subsequent
1727			 * dispatches may immediately reuse it. But this,
1728			 * effectively, creates a two-length queue in the entry
1729			 * and may lead to a deadlock if the execution of the
1730			 * current task depends on the execution of the next
1731			 * scheduled task. So, we keep the entry busy until the
1732			 * task is processed.
1733			 */
1734
1735			mutex_exit(lock);
1736			start = gethrtime();
1737			DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq,
1738			    taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1739			tqe->tqent_func(tqe->tqent_arg);
1740			DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq,
1741			    taskq_bucket_t *, bucket, taskq_ent_t *, tqe);
1742			end = gethrtime();
1743			mutex_enter(lock);
1744			bucket->tqbucket_totaltime += end - start;
1745
1746			/*
1747			 * Return the entry to the bucket free list.
1748			 */
1749			tqe->tqent_func = NULL;
1750			TQ_APPEND(bucket->tqbucket_freelist, tqe);
1751			bucket->tqbucket_nalloc--;
1752			bucket->tqbucket_nfree++;
1753			ASSERT(!IS_EMPTY(bucket->tqbucket_freelist));
1754			/*
1755			 * taskq_wait() waits for nalloc to drop to zero on
1756			 * tqbucket_cv.
1757			 */
1758			cv_signal(&bucket->tqbucket_cv);
1759		}
1760
1761		/*
1762		 * At this point the entry must be in the bucket free list -
1763		 * either because it was there initially or because it just
1764		 * finished executing a task and put itself on the free list.
1765		 */
1766		ASSERT(bucket->tqbucket_nfree > 0);
1767		/*
1768		 * Go to sleep unless we are closing.
1769		 * If a thread is sleeping too long, it dies.
1770		 */
1771		if (! (bucket->tqbucket_flags & TQBUCKET_CLOSE)) {
1772			w = taskq_thread_wait(tq, lock, cv,
1773			    &cprinfo, taskq_thread_timeout * hz);
1774		}
1775
1776		/*
1777		 * At this point we may be in two different states:
1778		 *
1779		 * (1) tqent_func is set which means that a new task is
1780		 *	dispatched and we need to execute it.
1781		 *
1782		 * (2) Thread is sleeping for too long or we are closing. In
1783		 *	both cases destroy the thread and the entry.
1784		 */
1785
1786		/* If func is NULL we should be on the freelist. */
1787		ASSERT((tqe->tqent_func != NULL) ||
1788		    (bucket->tqbucket_nfree > 0));
1789		/* If func is non-NULL we should be allocated */
1790		ASSERT((tqe->tqent_func == NULL) ||
1791		    (bucket->tqbucket_nalloc > 0));
1792
1793		/* Check freelist consistency */
1794		ASSERT((bucket->tqbucket_nfree > 0) ||
1795		    IS_EMPTY(bucket->tqbucket_freelist));
1796		ASSERT((bucket->tqbucket_nfree == 0) ||
1797		    !IS_EMPTY(bucket->tqbucket_freelist));
1798
1799		if ((tqe->tqent_func == NULL) &&
1800		    ((w == -1) || (bucket->tqbucket_flags & TQBUCKET_CLOSE))) {
1801			/*
1802			 * This thread is sleeping for too long or we are
1803			 * closing - time to die.
1804			 * Thread creation/destruction happens rarely,
1805			 * so grabbing the lock is not a big performance issue.
1806			 * The bucket lock is dropped by CALLB_CPR_EXIT().
1807			 */
1808
1809			/* Remove the entry from the free list. */
1810			tqe->tqent_prev->tqent_next = tqe->tqent_next;
1811			tqe->tqent_next->tqent_prev = tqe->tqent_prev;
1812			ASSERT(bucket->tqbucket_nfree > 0);
1813			bucket->tqbucket_nfree--;
1814
1815			TQ_STAT(bucket, tqs_tdeaths);
1816			cv_signal(&bucket->tqbucket_cv);
1817			tqe->tqent_thread = NULL;
1818			mutex_enter(&tq->tq_lock);
1819			tq->tq_tdeaths++;
1820			mutex_exit(&tq->tq_lock);
1821			CALLB_CPR_EXIT(&cprinfo);
1822			kmem_cache_free(taskq_ent_cache, tqe);
1823			thread_exit();
1824		}
1825	}
1826}
1827
1828
1829/*
1830 * Taskq creation. May sleep for memory.
1831 * Always use automatically generated instances to avoid kstat name space
1832 * collisions.
1833 */
1834
1835taskq_t *
1836taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
1837    int maxalloc, uint_t flags)
1838{
1839	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1840
1841	return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1842	    maxalloc, &p0, 0, flags | TASKQ_NOINSTANCE));
1843}
1844
1845/*
1846 * Create an instance of task queue. It is legal to create task queues with the
1847 * same name and different instances.
1848 *
1849 * taskq_create_instance is used by ddi_taskq_create() where it gets the
1850 * instance from ddi_get_instance(). In some cases the instance is not
1851 * initialized and is set to -1. This case is handled as if no instance was
1852 * passed at all.
1853 */
1854taskq_t *
1855taskq_create_instance(const char *name, int instance, int nthreads, pri_t pri,
1856    int minalloc, int maxalloc, uint_t flags)
1857{
1858	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1859	ASSERT((instance >= 0) || (instance == -1));
1860
1861	if (instance < 0) {
1862		flags |= TASKQ_NOINSTANCE;
1863	}
1864
1865	return (taskq_create_common(name, instance, nthreads,
1866	    pri, minalloc, maxalloc, &p0, 0, flags));
1867}
1868
1869taskq_t *
1870taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc,
1871    int maxalloc, proc_t *proc, uint_t flags)
1872{
1873	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1874	ASSERT(proc->p_flag & SSYS);
1875
1876	return (taskq_create_common(name, 0, nthreads, pri, minalloc,
1877	    maxalloc, proc, 0, flags | TASKQ_NOINSTANCE));
1878}
1879
1880taskq_t *
1881taskq_create_sysdc(const char *name, int nthreads, int minalloc,
1882    int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1883{
1884	ASSERT((flags & ~TASKQ_INTERFACE_FLAGS) == 0);
1885	ASSERT(proc->p_flag & SSYS);
1886
1887	return (taskq_create_common(name, 0, nthreads, minclsyspri, minalloc,
1888	    maxalloc, proc, dc, flags | TASKQ_NOINSTANCE | TASKQ_DUTY_CYCLE));
1889}
1890
1891static taskq_t *
1892taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
1893    int minalloc, int maxalloc, proc_t *proc, uint_t dc, uint_t flags)
1894{
1895	taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
1896	uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
1897	uint_t bsize;	/* # of buckets - always power of 2 */
1898	int max_nthreads;
1899
1900	/*
1901	 * TASKQ_DYNAMIC, TASKQ_CPR_SAFE and TASKQ_THREADS_CPU_PCT are all
1902	 * mutually incompatible.
1903	 */
1904	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_CPR_SAFE));
1905	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_THREADS_CPU_PCT));
1906	IMPLY((flags & TASKQ_CPR_SAFE), !(flags & TASKQ_THREADS_CPU_PCT));
1907
1908	/* Cannot have DYNAMIC with DUTY_CYCLE */
1909	IMPLY((flags & TASKQ_DYNAMIC), !(flags & TASKQ_DUTY_CYCLE));
1910
1911	/* Cannot have DUTY_CYCLE with a p0 kernel process */
1912	IMPLY((flags & TASKQ_DUTY_CYCLE), proc != &p0);
1913
1914	/* Cannot have DC_BATCH without DUTY_CYCLE */
1915	ASSERT((flags & (TASKQ_DUTY_CYCLE|TASKQ_DC_BATCH)) != TASKQ_DC_BATCH);
1916
1917	ASSERT(proc != NULL);
1918
1919	bsize = 1 << (highbit(ncpus) - 1);
1920	ASSERT(bsize >= 1);
1921	bsize = MIN(bsize, taskq_maxbuckets);
1922
1923	if (flags & TASKQ_DYNAMIC) {
1924		ASSERT3S(nthreads, >=, 1);
1925		tq->tq_maxsize = nthreads;
1926
1927		/* For dynamic task queues use just one backup thread */
1928		nthreads = max_nthreads = 1;
1929
1930	} else if (flags & TASKQ_THREADS_CPU_PCT) {
1931		uint_t pct;
1932		ASSERT3S(nthreads, >=, 0);
1933		pct = nthreads;
1934
1935		if (pct > taskq_cpupct_max_percent)
1936			pct = taskq_cpupct_max_percent;
1937
1938		/*
1939		 * If you're using THREADS_CPU_PCT, the process for the
1940		 * taskq threads must be curproc.  This allows any pset
1941		 * binding to be inherited correctly.  If proc is &p0,
1942		 * we won't be creating LWPs, so new threads will be assigned
1943		 * to the default processor set.
1944		 */
1945		ASSERT(curproc == proc || proc == &p0);
1946		tq->tq_threads_ncpus_pct = pct;
1947		nthreads = 1;		/* corrected in taskq_thread_create() */
1948		max_nthreads = TASKQ_THREADS_PCT(max_ncpus, pct);
1949
1950	} else {
1951		ASSERT3S(nthreads, >=, 1);
1952		max_nthreads = nthreads;
1953	}
1954
1955	if (max_nthreads < taskq_minimum_nthreads_max)
1956		max_nthreads = taskq_minimum_nthreads_max;
1957
1958	/*
1959	 * Make sure the name is 0-terminated, and conforms to the rules for
1960	 * C indentifiers
1961	 */
1962	(void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
1963	strident_canon(tq->tq_name, TASKQ_NAMELEN + 1);
1964
1965	tq->tq_flags = flags | TASKQ_CHANGING;
1966	tq->tq_active = 0;
1967	tq->tq_instance = instance;
1968	tq->tq_nthreads_target = nthreads;
1969	tq->tq_nthreads_max = max_nthreads;
1970	tq->tq_minalloc = minalloc;
1971	tq->tq_maxalloc = maxalloc;
1972	tq->tq_nbuckets = bsize;
1973	tq->tq_proc = proc;
1974	tq->tq_pri = pri;
1975	tq->tq_DC = dc;
1976	list_link_init(&tq->tq_cpupct_link);
1977
1978	if (max_nthreads > 1)
1979		tq->tq_threadlist = kmem_alloc(
1980		    sizeof (kthread_t *) * max_nthreads, KM_SLEEP);
1981
1982	mutex_enter(&tq->tq_lock);
1983	if (flags & TASKQ_PREPOPULATE) {
1984		while (minalloc-- > 0)
1985			taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
1986	}
1987
1988	/*
1989	 * Before we start creating threads for this taskq, take a
1990	 * zone hold so the zone can't go away before taskq_destroy
1991	 * makes sure all the taskq threads are gone.  This hold is
1992	 * similar in purpose to those taken by zthread_create().
1993	 */
1994	zone_hold(tq->tq_proc->p_zone);
1995
1996	/*
1997	 * Create the first thread, which will create any other threads
1998	 * necessary.  taskq_thread_create will not return until we have
1999	 * enough threads to be able to process requests.
2000	 */
2001	taskq_thread_create(tq);
2002	mutex_exit(&tq->tq_lock);
2003
2004	if (flags & TASKQ_DYNAMIC) {
2005		taskq_bucket_t *bucket = kmem_zalloc(sizeof (taskq_bucket_t) *
2006		    bsize, KM_SLEEP);
2007		int b_id;
2008
2009		tq->tq_buckets = bucket;
2010
2011		/* Initialize each bucket */
2012		for (b_id = 0; b_id < bsize; b_id++, bucket++) {
2013			mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT,
2014			    NULL);
2015			cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL);
2016			bucket->tqbucket_taskq = tq;
2017			bucket->tqbucket_freelist.tqent_next =
2018			    bucket->tqbucket_freelist.tqent_prev =
2019			    &bucket->tqbucket_freelist;
2020			if (flags & TASKQ_PREPOPULATE)
2021				taskq_bucket_extend(bucket);
2022		}
2023	}
2024
2025	/*
2026	 * Install kstats.
2027	 * We have two cases:
2028	 *   1) Instance is provided to taskq_create_instance(). In this case it
2029	 *	should be >= 0 and we use it.
2030	 *
2031	 *   2) Instance is not provided and is automatically generated
2032	 */
2033	if (flags & TASKQ_NOINSTANCE) {
2034		instance = tq->tq_instance =
2035		    (int)(uintptr_t)vmem_alloc(taskq_id_arena, 1, VM_SLEEP);
2036	}
2037
2038	if (flags & TASKQ_DYNAMIC) {
2039		if ((tq->tq_kstat = kstat_create("unix", instance,
2040		    tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED,
2041		    sizeof (taskq_d_kstat) / sizeof (kstat_named_t),
2042		    KSTAT_FLAG_VIRTUAL)) != NULL) {
2043			tq->tq_kstat->ks_lock = &taskq_d_kstat_lock;
2044			tq->tq_kstat->ks_data = &taskq_d_kstat;
2045			tq->tq_kstat->ks_update = taskq_d_kstat_update;
2046			tq->tq_kstat->ks_private = tq;
2047			kstat_install(tq->tq_kstat);
2048		}
2049	} else {
2050		if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name,
2051		    "taskq", KSTAT_TYPE_NAMED,
2052		    sizeof (taskq_kstat) / sizeof (kstat_named_t),
2053		    KSTAT_FLAG_VIRTUAL)) != NULL) {
2054			tq->tq_kstat->ks_lock = &taskq_kstat_lock;
2055			tq->tq_kstat->ks_data = &taskq_kstat;
2056			tq->tq_kstat->ks_update = taskq_kstat_update;
2057			tq->tq_kstat->ks_private = tq;
2058			kstat_install(tq->tq_kstat);
2059		}
2060	}
2061
2062	return (tq);
2063}
2064
2065/*
2066 * taskq_destroy().
2067 *
2068 * Assumes: by the time taskq_destroy is called no one will use this task queue
2069 * in any way and no one will try to dispatch entries in it.
2070 */
2071void
2072taskq_destroy(taskq_t *tq)
2073{
2074	taskq_bucket_t *b = tq->tq_buckets;
2075	int bid = 0;
2076
2077	ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
2078
2079	/*
2080	 * Destroy kstats.
2081	 */
2082	if (tq->tq_kstat != NULL) {
2083		kstat_delete(tq->tq_kstat);
2084		tq->tq_kstat = NULL;
2085	}
2086
2087	/*
2088	 * Destroy instance if needed.
2089	 */
2090	if (tq->tq_flags & TASKQ_NOINSTANCE) {
2091		vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance),
2092		    1);
2093		tq->tq_instance = 0;
2094	}
2095
2096	/*
2097	 * Unregister from the cpupct list.
2098	 */
2099	if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
2100		taskq_cpupct_remove(tq);
2101	}
2102
2103	/*
2104	 * Wait for any pending entries to complete.
2105	 */
2106	taskq_wait(tq);
2107
2108	mutex_enter(&tq->tq_lock);
2109	ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
2110	    (tq->tq_active == 0));
2111
2112	/* notify all the threads that they need to exit */
2113	tq->tq_nthreads_target = 0;
2114
2115	tq->tq_flags |= TASKQ_CHANGING;
2116	cv_broadcast(&tq->tq_dispatch_cv);
2117	cv_broadcast(&tq->tq_exit_cv);
2118
2119	while (tq->tq_nthreads != 0)
2120		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
2121
2122	if (tq->tq_nthreads_max != 1)
2123		kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
2124		    tq->tq_nthreads_max);
2125
2126	tq->tq_minalloc = 0;
2127	while (tq->tq_nalloc != 0)
2128		taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
2129
2130	mutex_exit(&tq->tq_lock);
2131
2132	/*
2133	 * Mark each bucket as closing and wakeup all sleeping threads.
2134	 */
2135	for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2136		taskq_ent_t *tqe;
2137
2138		mutex_enter(&b->tqbucket_lock);
2139
2140		b->tqbucket_flags |= TQBUCKET_CLOSE;
2141		/* Wakeup all sleeping threads */
2142
2143		for (tqe = b->tqbucket_freelist.tqent_next;
2144		    tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next)
2145			cv_signal(&tqe->tqent_cv);
2146
2147		ASSERT(b->tqbucket_nalloc == 0);
2148
2149		/*
2150		 * At this point we waited for all pending jobs to complete (in
2151		 * both the task queue and the bucket and no new jobs should
2152		 * arrive. Wait for all threads to die.
2153		 */
2154		while (b->tqbucket_nfree > 0)
2155			cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
2156		mutex_exit(&b->tqbucket_lock);
2157		mutex_destroy(&b->tqbucket_lock);
2158		cv_destroy(&b->tqbucket_cv);
2159	}
2160
2161	if (tq->tq_buckets != NULL) {
2162		ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2163		kmem_free(tq->tq_buckets,
2164		    sizeof (taskq_bucket_t) * tq->tq_nbuckets);
2165
2166		/* Cleanup fields before returning tq to the cache */
2167		tq->tq_buckets = NULL;
2168		tq->tq_tcreates = 0;
2169		tq->tq_tdeaths = 0;
2170	} else {
2171		ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
2172	}
2173
2174	/*
2175	 * Now that all the taskq threads are gone, we can
2176	 * drop the zone hold taken in taskq_create_common
2177	 */
2178	zone_rele(tq->tq_proc->p_zone);
2179
2180	tq->tq_threads_ncpus_pct = 0;
2181	tq->tq_totaltime = 0;
2182	tq->tq_tasks = 0;
2183	tq->tq_maxtasks = 0;
2184	tq->tq_executed = 0;
2185	kmem_cache_free(taskq_cache, tq);
2186}
2187
2188/*
2189 * Extend a bucket with a new entry on the free list and attach a worker thread
2190 * to it.
2191 *
2192 * Argument: pointer to the bucket.
2193 *
2194 * This function may quietly fail. It is only used by taskq_dispatch() which
2195 * handles such failures properly.
2196 */
2197static void
2198taskq_bucket_extend(void *arg)
2199{
2200	taskq_ent_t *tqe;
2201	taskq_bucket_t *b = (taskq_bucket_t *)arg;
2202	taskq_t *tq = b->tqbucket_taskq;
2203	int nthreads;
2204
2205	mutex_enter(&tq->tq_lock);
2206
2207	if (! ENOUGH_MEMORY()) {
2208		tq->tq_nomem++;
2209		mutex_exit(&tq->tq_lock);
2210		return;
2211	}
2212
2213	/*
2214	 * Observe global taskq limits on the number of threads.
2215	 */
2216	if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) {
2217		tq->tq_tcreates--;
2218		mutex_exit(&tq->tq_lock);
2219		return;
2220	}
2221	mutex_exit(&tq->tq_lock);
2222
2223	tqe = kmem_cache_alloc(taskq_ent_cache, KM_NOSLEEP);
2224
2225	if (tqe == NULL) {
2226		mutex_enter(&tq->tq_lock);
2227		tq->tq_nomem++;
2228		tq->tq_tcreates--;
2229		mutex_exit(&tq->tq_lock);
2230		return;
2231	}
2232
2233	ASSERT(tqe->tqent_thread == NULL);
2234
2235	tqe->tqent_un.tqent_bucket = b;
2236
2237	/*
2238	 * Create a thread in a TS_STOPPED state first. If it is successfully
2239	 * created, place the entry on the free list and start the thread.
2240	 */
2241	tqe->tqent_thread = thread_create(NULL, 0, taskq_d_thread, tqe,
2242	    0, tq->tq_proc, TS_STOPPED, tq->tq_pri);
2243
2244	/*
2245	 * Once the entry is ready, link it to the the bucket free list.
2246	 */
2247	mutex_enter(&b->tqbucket_lock);
2248	tqe->tqent_func = NULL;
2249	TQ_APPEND(b->tqbucket_freelist, tqe);
2250	b->tqbucket_nfree++;
2251	TQ_STAT(b, tqs_tcreates);
2252
2253#if TASKQ_STATISTIC
2254	nthreads = b->tqbucket_stat.tqs_tcreates -
2255	    b->tqbucket_stat.tqs_tdeaths;
2256	b->tqbucket_stat.tqs_maxthreads = MAX(nthreads,
2257	    b->tqbucket_stat.tqs_maxthreads);
2258#endif
2259
2260	mutex_exit(&b->tqbucket_lock);
2261	/*
2262	 * Start the stopped thread.
2263	 */
2264	thread_lock(tqe->tqent_thread);
2265	tqe->tqent_thread->t_taskq = tq;
2266	tqe->tqent_thread->t_schedflag |= TS_ALLSTART;
2267	setrun_locked(tqe->tqent_thread);
2268	thread_unlock(tqe->tqent_thread);
2269}
2270
2271static int
2272taskq_kstat_update(kstat_t *ksp, int rw)
2273{
2274	struct taskq_kstat *tqsp = &taskq_kstat;
2275	taskq_t *tq = ksp->ks_private;
2276
2277	if (rw == KSTAT_WRITE)
2278		return (EACCES);
2279
2280	tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid;
2281	tqsp->tq_tasks.value.ui64 = tq->tq_tasks;
2282	tqsp->tq_executed.value.ui64 = tq->tq_executed;
2283	tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks;
2284	tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime;
2285	tqsp->tq_nactive.value.ui64 = tq->tq_active;
2286	tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc;
2287	tqsp->tq_pri.value.ui64 = tq->tq_pri;
2288	tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads;
2289	tqsp->tq_nomem.value.ui64 = tq->tq_nomem;
2290	return (0);
2291}
2292
2293static int
2294taskq_d_kstat_update(kstat_t *ksp, int rw)
2295{
2296	struct taskq_d_kstat *tqsp = &taskq_d_kstat;
2297	taskq_t *tq = ksp->ks_private;
2298	taskq_bucket_t *b = tq->tq_buckets;
2299	int bid = 0;
2300
2301	if (rw == KSTAT_WRITE)
2302		return (EACCES);
2303
2304	ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2305
2306	tqsp->tqd_btasks.value.ui64 = tq->tq_tasks;
2307	tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed;
2308	tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks;
2309	tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc;
2310	tqsp->tqd_bnactive.value.ui64 = tq->tq_active;
2311	tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime;
2312	tqsp->tqd_pri.value.ui64 = tq->tq_pri;
2313	tqsp->tqd_nomem.value.ui64 = tq->tq_nomem;
2314
2315	tqsp->tqd_hits.value.ui64 = 0;
2316	tqsp->tqd_misses.value.ui64 = 0;
2317	tqsp->tqd_overflows.value.ui64 = 0;
2318	tqsp->tqd_tcreates.value.ui64 = 0;
2319	tqsp->tqd_tdeaths.value.ui64 = 0;
2320	tqsp->tqd_maxthreads.value.ui64 = 0;
2321	tqsp->tqd_nomem.value.ui64 = 0;
2322	tqsp->tqd_disptcreates.value.ui64 = 0;
2323	tqsp->tqd_totaltime.value.ui64 = 0;
2324	tqsp->tqd_nalloc.value.ui64 = 0;
2325	tqsp->tqd_nfree.value.ui64 = 0;
2326
2327	for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2328		tqsp->tqd_hits.value.ui64 += b->tqbucket_stat.tqs_hits;
2329		tqsp->tqd_misses.value.ui64 += b->tqbucket_stat.tqs_misses;
2330		tqsp->tqd_overflows.value.ui64 += b->tqbucket_stat.tqs_overflow;
2331		tqsp->tqd_tcreates.value.ui64 += b->tqbucket_stat.tqs_tcreates;
2332		tqsp->tqd_tdeaths.value.ui64 += b->tqbucket_stat.tqs_tdeaths;
2333		tqsp->tqd_maxthreads.value.ui64 +=
2334		    b->tqbucket_stat.tqs_maxthreads;
2335		tqsp->tqd_disptcreates.value.ui64 +=
2336		    b->tqbucket_stat.tqs_disptcreates;
2337		tqsp->tqd_totaltime.value.ui64 += b->tqbucket_totaltime;
2338		tqsp->tqd_nalloc.value.ui64 += b->tqbucket_nalloc;
2339		tqsp->tqd_nfree.value.ui64 += b->tqbucket_nfree;
2340	}
2341	return (0);
2342}
2343