xref: /illumos-gate/usr/src/uts/common/inet/squeue.c (revision 3a7e2f8d)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5e824d57fSjohnlev  * Common Development and Distribution License (the "License").
6e824d57fSjohnlev  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
229ee3959aSAnders Persson  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
237c478bd9Sstevel@tonic-gate  */
247c478bd9Sstevel@tonic-gate 
251ddb55e6SBryan Cantrill /*
26233fee3fSPatrick Mooney  * Copyright 2017 Joyent, Inc.
271ddb55e6SBryan Cantrill  */
281ddb55e6SBryan Cantrill 
297c478bd9Sstevel@tonic-gate /*
30da14cebeSEric Cheng  * Squeues: General purpose serialization mechanism
31da14cebeSEric Cheng  * ------------------------------------------------
327c478bd9Sstevel@tonic-gate  *
33da14cebeSEric Cheng  * Background:
34da14cebeSEric Cheng  * -----------
357c478bd9Sstevel@tonic-gate  *
36da14cebeSEric Cheng  * This is a general purpose high-performance serialization mechanism
37da14cebeSEric Cheng  * currently used by TCP/IP. It is implement by means of a per CPU queue,
38da14cebeSEric Cheng  * a worker thread and a polling thread with are bound to the CPU
39da14cebeSEric Cheng  * associated with the squeue. The squeue is strictly FIFO for both read
40da14cebeSEric Cheng  * and write side and only one thread can process it at any given time.
41da14cebeSEric Cheng  * The design goal of squeue was to offer a very high degree of
42da14cebeSEric Cheng  * parallelization (on a per H/W execution pipeline basis) with at
43da14cebeSEric Cheng  * most one queuing.
447c478bd9Sstevel@tonic-gate  *
45bd670b35SErik Nordmark  * The modules needing protection typically calls SQUEUE_ENTER_ONE() or
46bd670b35SErik Nordmark  * SQUEUE_ENTER() macro as soon as a thread enter the module
47da14cebeSEric Cheng  * from either direction. For each packet, the processing function
48da14cebeSEric Cheng  * and argument is stored in the mblk itself. When the packet is ready
49da14cebeSEric Cheng  * to be processed, the squeue retrieves the stored function and calls
50da14cebeSEric Cheng  * it with the supplied argument and the pointer to the packet itself.
51da14cebeSEric Cheng  * The called function can assume that no other thread is processing
52da14cebeSEric Cheng  * the squeue when it is executing.
537c478bd9Sstevel@tonic-gate  *
54da14cebeSEric Cheng  * Squeue/connection binding:
55da14cebeSEric Cheng  * --------------------------
567c478bd9Sstevel@tonic-gate  *
57da14cebeSEric Cheng  * TCP/IP uses an IP classifier in conjunction with squeue where specific
58da14cebeSEric Cheng  * connections are assigned to specific squeue (based on various policies),
59da14cebeSEric Cheng  * at the connection creation time. Once assigned, the connection to
60da14cebeSEric Cheng  * squeue mapping is never changed and all future packets for that
61da14cebeSEric Cheng  * connection are processed on that squeue. The connection ("conn") to
62da14cebeSEric Cheng  * squeue mapping is stored in "conn_t" member "conn_sqp".
637c478bd9Sstevel@tonic-gate  *
64da14cebeSEric Cheng  * Since the processing of the connection cuts across multiple layers
65da14cebeSEric Cheng  * but still allows packets for different connnection to be processed on
66da14cebeSEric Cheng  * other CPU/squeues, squeues are also termed as "Vertical Perimeter" or
67da14cebeSEric Cheng  * "Per Connection Vertical Perimeter".
687c478bd9Sstevel@tonic-gate  *
69da14cebeSEric Cheng  * Processing Model:
70da14cebeSEric Cheng  * -----------------
717c478bd9Sstevel@tonic-gate  *
72da14cebeSEric Cheng  * Squeue doesn't necessary processes packets with its own worker thread.
73da14cebeSEric Cheng  * The callers can pick if they just want to queue the packet, process
74da14cebeSEric Cheng  * their packet if nothing is queued or drain and process. The first two
75da14cebeSEric Cheng  * modes are typically employed when the packet was generated while
76da14cebeSEric Cheng  * already doing the processing behind the squeue and last mode (drain
77da14cebeSEric Cheng  * and process) is typically employed when the thread is entering squeue
78da14cebeSEric Cheng  * for the first time. The squeue still imposes a finite time limit
79da14cebeSEric Cheng  * for which a external thread can do processing after which it switches
80da14cebeSEric Cheng  * processing to its own worker thread.
817c478bd9Sstevel@tonic-gate  *
82da14cebeSEric Cheng  * Once created, squeues are never deleted. Hence squeue pointers are
83da14cebeSEric Cheng  * always valid. This means that functions outside the squeue can still
84da14cebeSEric Cheng  * refer safely to conn_sqp and their is no need for ref counts.
857c478bd9Sstevel@tonic-gate  *
86da14cebeSEric Cheng  * Only a thread executing in the squeue can change the squeue of the
87da14cebeSEric Cheng  * connection. It does so by calling a squeue framework function to do this.
88da14cebeSEric Cheng  * After changing the squeue, the thread must leave the squeue. It must not
89da14cebeSEric Cheng  * continue to execute any code that needs squeue protection.
907c478bd9Sstevel@tonic-gate  *
91da14cebeSEric Cheng  * The squeue framework, after entering the squeue, checks if the current
92da14cebeSEric Cheng  * squeue matches the conn_sqp. If the check fails, the packet is delivered
93da14cebeSEric Cheng  * to right squeue.
947c478bd9Sstevel@tonic-gate  *
95da14cebeSEric Cheng  * Polling Model:
96da14cebeSEric Cheng  * --------------
977c478bd9Sstevel@tonic-gate  *
98da14cebeSEric Cheng  * Squeues can control the rate of packet arrival into itself from the
99da14cebeSEric Cheng  * NIC or specific Rx ring within a NIC. As part of capability negotiation
100da14cebeSEric Cheng  * between IP and MAC layer, squeue are created for each TCP soft ring
101da14cebeSEric Cheng  * (or TCP Rx ring - to be implemented in future). As part of this
102da14cebeSEric Cheng  * negotiation, squeues get a cookie for underlying soft ring or Rx
103da14cebeSEric Cheng  * ring, a function to turn off incoming packets and a function to call
104da14cebeSEric Cheng  * to poll for packets. This helps schedule the receive side packet
105da14cebeSEric Cheng  * processing so that queue backlog doesn't build up and packet processing
106da14cebeSEric Cheng  * doesn't keep getting disturbed by high priority interrupts. As part
107da14cebeSEric Cheng  * of this mode, as soon as a backlog starts building, squeue turns off
108da14cebeSEric Cheng  * the interrupts and switches to poll mode. In poll mode, when poll
109da14cebeSEric Cheng  * thread goes down to retrieve packets, it retrieves them in the form of
110da14cebeSEric Cheng  * a chain which improves performance even more. As the squeue/softring
111da14cebeSEric Cheng  * system gets more packets, it gets more efficient by switching to
112da14cebeSEric Cheng  * polling more often and dealing with larger packet chains.
1137c478bd9Sstevel@tonic-gate  *
1147c478bd9Sstevel@tonic-gate  */
1157c478bd9Sstevel@tonic-gate 
1167c478bd9Sstevel@tonic-gate #include <sys/types.h>
1177c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
1187c478bd9Sstevel@tonic-gate #include <sys/debug.h>
1197c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
1207c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
1217c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h>
1227c478bd9Sstevel@tonic-gate #include <sys/systm.h>
1237c478bd9Sstevel@tonic-gate #include <sys/callb.h>
1247c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
1257c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
126da14cebeSEric Cheng #include <sys/sunddi.h>
1271ddb55e6SBryan Cantrill #include <sys/stack.h>
1281ddb55e6SBryan Cantrill #include <sys/archsystm.h>
1297c478bd9Sstevel@tonic-gate 
1307c478bd9Sstevel@tonic-gate #include <inet/ipclassifier.h>
131d045b987Smasputra #include <inet/udp_impl.h>
1327c478bd9Sstevel@tonic-gate 
1337c478bd9Sstevel@tonic-gate #include <sys/squeue_impl.h>
1347c478bd9Sstevel@tonic-gate 
135d19d6468Sbw static void squeue_drain(squeue_t *, uint_t, hrtime_t);
1367c478bd9Sstevel@tonic-gate static void squeue_worker(squeue_t *sqp);
137da14cebeSEric Cheng static void squeue_polling_thread(squeue_t *sqp);
138233fee3fSPatrick Mooney static void squeue_worker_wakeup(squeue_t *sqp);
139196b393bSPatrick Mooney static void squeue_try_drain_one(squeue_t *, conn_t *);
1407c478bd9Sstevel@tonic-gate 
1417c478bd9Sstevel@tonic-gate kmem_cache_t *squeue_cache;
1427c478bd9Sstevel@tonic-gate 
143d19d6468Sbw #define	SQUEUE_MSEC_TO_NSEC 1000000
144d19d6468Sbw 
145da14cebeSEric Cheng int squeue_drain_ms = 20;
1467c478bd9Sstevel@tonic-gate 
147d19d6468Sbw /* The values above converted to ticks or nano seconds */
148233fee3fSPatrick Mooney static uint_t squeue_drain_ns = 0;
1497c478bd9Sstevel@tonic-gate 
1501ddb55e6SBryan Cantrill uintptr_t squeue_drain_stack_needed = 10240;
1511ddb55e6SBryan Cantrill uint_t squeue_drain_stack_toodeep;
1521ddb55e6SBryan Cantrill 
153da14cebeSEric Cheng #define	MAX_BYTES_TO_PICKUP	150000
1547c478bd9Sstevel@tonic-gate 
1557c478bd9Sstevel@tonic-gate #define	ENQUEUE_CHAIN(sqp, mp, tail, cnt) {			\
1567c478bd9Sstevel@tonic-gate 	/*							\
1577c478bd9Sstevel@tonic-gate 	 * Enqueue our mblk chain.				\
1587c478bd9Sstevel@tonic-gate 	 */							\
1597c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
1607c478bd9Sstevel@tonic-gate 								\
1617c478bd9Sstevel@tonic-gate 	if ((sqp)->sq_last != NULL)				\
1627c478bd9Sstevel@tonic-gate 		(sqp)->sq_last->b_next = (mp);			\
1637c478bd9Sstevel@tonic-gate 	else							\
1647c478bd9Sstevel@tonic-gate 		(sqp)->sq_first = (mp);				\
1657c478bd9Sstevel@tonic-gate 	(sqp)->sq_last = (tail);				\
1667c478bd9Sstevel@tonic-gate 	(sqp)->sq_count += (cnt);				\
1677c478bd9Sstevel@tonic-gate 	ASSERT((sqp)->sq_count > 0);				\
1687c478bd9Sstevel@tonic-gate 	DTRACE_PROBE4(squeue__enqueuechain, squeue_t *, sqp,	\
1697c478bd9Sstevel@tonic-gate 		mblk_t *, mp, mblk_t *, tail, int, cnt);	\
1707c478bd9Sstevel@tonic-gate 								\
1717c478bd9Sstevel@tonic-gate }
1727c478bd9Sstevel@tonic-gate 
173efe28d82SRajagopal Kunhappan /*
174efe28d82SRajagopal Kunhappan  * Blank the receive ring (in this case it is the soft ring). When
175efe28d82SRajagopal Kunhappan  * blanked, the soft ring will not send any more packets up.
176efe28d82SRajagopal Kunhappan  * Blanking may not succeed when there is a CPU already in the soft
177efe28d82SRajagopal Kunhappan  * ring sending packets up. In that case, SQS_POLLING will not be
178efe28d82SRajagopal Kunhappan  * set.
179efe28d82SRajagopal Kunhappan  */
180da14cebeSEric Cheng #define	SQS_POLLING_ON(sqp, sq_poll_capable, rx_ring) {		\
1817c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
182da14cebeSEric Cheng 	if (sq_poll_capable) {					\
183da14cebeSEric Cheng 		ASSERT(rx_ring != NULL);			\
184da14cebeSEric Cheng 		ASSERT(sqp->sq_state & SQS_POLL_CAPAB);		\
185da14cebeSEric Cheng 		if (!(sqp->sq_state & SQS_POLLING)) {		\
186efe28d82SRajagopal Kunhappan 			if (rx_ring->rr_intr_disable(rx_ring->rr_intr_handle)) \
187efe28d82SRajagopal Kunhappan 				sqp->sq_state |= SQS_POLLING;	\
188da14cebeSEric Cheng 		}						\
189da14cebeSEric Cheng 	}							\
1907c478bd9Sstevel@tonic-gate }
1917c478bd9Sstevel@tonic-gate 
192da14cebeSEric Cheng #define	SQS_POLLING_OFF(sqp, sq_poll_capable, rx_ring) {	\
193da14cebeSEric Cheng 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
194da14cebeSEric Cheng 	if (sq_poll_capable) {					\
195da14cebeSEric Cheng 		ASSERT(rx_ring != NULL);			\
196da14cebeSEric Cheng 		ASSERT(sqp->sq_state & SQS_POLL_CAPAB);		\
197da14cebeSEric Cheng 		if (sqp->sq_state & SQS_POLLING) {		\
198da14cebeSEric Cheng 			sqp->sq_state &= ~SQS_POLLING;		\
199da14cebeSEric Cheng 			rx_ring->rr_intr_enable(rx_ring->rr_intr_handle); \
200da14cebeSEric Cheng 		}						\
201da14cebeSEric Cheng 	}							\
202da14cebeSEric Cheng }
2037c478bd9Sstevel@tonic-gate 
204efe28d82SRajagopal Kunhappan /* Wakeup poll thread only if SQS_POLLING is set */
205efe28d82SRajagopal Kunhappan #define	SQS_POLL_RING(sqp) {			\
2067c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
207efe28d82SRajagopal Kunhappan 	if (sqp->sq_state & SQS_POLLING) {			\
208da14cebeSEric Cheng 		ASSERT(sqp->sq_state & SQS_POLL_CAPAB);		\
209da14cebeSEric Cheng 		if (!(sqp->sq_state & SQS_GET_PKTS)) {		\
210da14cebeSEric Cheng 			sqp->sq_state |= SQS_GET_PKTS;		\
211da14cebeSEric Cheng 			cv_signal(&sqp->sq_poll_cv);		\
212da14cebeSEric Cheng 		}						\
213da14cebeSEric Cheng 	}							\
214da14cebeSEric Cheng }
215da14cebeSEric Cheng 
216da14cebeSEric Cheng #ifdef DEBUG
217da14cebeSEric Cheng #define	SQUEUE_DBG_SET(sqp, mp, proc, connp, tag) {		\
218da14cebeSEric Cheng 	(sqp)->sq_curmp = (mp);					\
219da14cebeSEric Cheng 	(sqp)->sq_curproc = (proc);				\
220da14cebeSEric Cheng 	(sqp)->sq_connp = (connp);				\
221da14cebeSEric Cheng 	(mp)->b_tag = (sqp)->sq_tag = (tag);			\
222da14cebeSEric Cheng }
223da14cebeSEric Cheng 
224da14cebeSEric Cheng #define	SQUEUE_DBG_CLEAR(sqp)	{				\
225da14cebeSEric Cheng 	(sqp)->sq_curmp = NULL;					\
226da14cebeSEric Cheng 	(sqp)->sq_curproc = NULL;				\
227da14cebeSEric Cheng 	(sqp)->sq_connp = NULL;					\
2287c478bd9Sstevel@tonic-gate }
229da14cebeSEric Cheng #else
230da14cebeSEric Cheng #define	SQUEUE_DBG_SET(sqp, mp, proc, connp, tag)
231da14cebeSEric Cheng #define	SQUEUE_DBG_CLEAR(sqp)
232da14cebeSEric Cheng #endif
2337c478bd9Sstevel@tonic-gate 
2347c478bd9Sstevel@tonic-gate void
squeue_init(void)2357c478bd9Sstevel@tonic-gate squeue_init(void)
2367c478bd9Sstevel@tonic-gate {
2377c478bd9Sstevel@tonic-gate 	squeue_cache = kmem_cache_create("squeue_cache",
2387c478bd9Sstevel@tonic-gate 	    sizeof (squeue_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
2397c478bd9Sstevel@tonic-gate 
240da14cebeSEric Cheng 	squeue_drain_ns = squeue_drain_ms * SQUEUE_MSEC_TO_NSEC;
2417c478bd9Sstevel@tonic-gate }
2427c478bd9Sstevel@tonic-gate 
2437c478bd9Sstevel@tonic-gate squeue_t *
squeue_create(pri_t pri)244233fee3fSPatrick Mooney squeue_create(pri_t pri)
2457c478bd9Sstevel@tonic-gate {
2467c478bd9Sstevel@tonic-gate 	squeue_t *sqp = kmem_cache_alloc(squeue_cache, KM_SLEEP);
2477c478bd9Sstevel@tonic-gate 
2487c478bd9Sstevel@tonic-gate 	bzero(sqp, sizeof (squeue_t));
249da14cebeSEric Cheng 	sqp->sq_bind = PBIND_NONE;
250da14cebeSEric Cheng 	sqp->sq_priority = pri;
2517c478bd9Sstevel@tonic-gate 	sqp->sq_worker = thread_create(NULL, 0, squeue_worker,
2527c478bd9Sstevel@tonic-gate 	    sqp, 0, &p0, TS_RUN, pri);
2537c478bd9Sstevel@tonic-gate 
254da14cebeSEric Cheng 	sqp->sq_poll_thr = thread_create(NULL, 0, squeue_polling_thread,
255da14cebeSEric Cheng 	    sqp, 0, &p0, TS_RUN, pri);
256da14cebeSEric Cheng 
257da14cebeSEric Cheng 	sqp->sq_enter = squeue_enter;
258da14cebeSEric Cheng 	sqp->sq_drain = squeue_drain;
259da14cebeSEric Cheng 
2607c478bd9Sstevel@tonic-gate 	return (sqp);
2617c478bd9Sstevel@tonic-gate }
2627c478bd9Sstevel@tonic-gate 
263da14cebeSEric Cheng /*
264da14cebeSEric Cheng  * Bind squeue worker thread to the specified CPU, given by CPU id.
265da14cebeSEric Cheng  * If the CPU id  value is -1, bind the worker thread to the value
266da14cebeSEric Cheng  * specified in sq_bind field. If a thread is already bound to a
267da14cebeSEric Cheng  * different CPU, unbind it from the old CPU and bind to the new one.
268da14cebeSEric Cheng  */
269da14cebeSEric Cheng 
2707c478bd9Sstevel@tonic-gate void
squeue_bind(squeue_t * sqp,processorid_t bind)2717c478bd9Sstevel@tonic-gate squeue_bind(squeue_t *sqp, processorid_t bind)
2727c478bd9Sstevel@tonic-gate {
2737c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
274da14cebeSEric Cheng 	ASSERT(sqp->sq_bind != PBIND_NONE || bind != PBIND_NONE);
275da14cebeSEric Cheng 	ASSERT(MUTEX_HELD(&cpu_lock));
276da14cebeSEric Cheng 
2777c478bd9Sstevel@tonic-gate 	if (sqp->sq_state & SQS_BOUND) {
278da14cebeSEric Cheng 		if (sqp->sq_bind == bind) {
279da14cebeSEric Cheng 			mutex_exit(&sqp->sq_lock);
280da14cebeSEric Cheng 			return;
281da14cebeSEric Cheng 		}
282da14cebeSEric Cheng 		thread_affinity_clear(sqp->sq_worker);
283da14cebeSEric Cheng 	} else {
284da14cebeSEric Cheng 		sqp->sq_state |= SQS_BOUND;
2857c478bd9Sstevel@tonic-gate 	}
2867c478bd9Sstevel@tonic-gate 
287da14cebeSEric Cheng 	if (bind != PBIND_NONE)
288da14cebeSEric Cheng 		sqp->sq_bind = bind;
2897c478bd9Sstevel@tonic-gate 
2907c478bd9Sstevel@tonic-gate 	thread_affinity_set(sqp->sq_worker, sqp->sq_bind);
291da14cebeSEric Cheng 	mutex_exit(&sqp->sq_lock);
2927c478bd9Sstevel@tonic-gate }
2937c478bd9Sstevel@tonic-gate 
2947c478bd9Sstevel@tonic-gate void
squeue_unbind(squeue_t * sqp)2957c478bd9Sstevel@tonic-gate squeue_unbind(squeue_t *sqp)
2967c478bd9Sstevel@tonic-gate {
2977c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
2987c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_BOUND)) {
2997c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
3007c478bd9Sstevel@tonic-gate 		return;
3017c478bd9Sstevel@tonic-gate 	}
3027c478bd9Sstevel@tonic-gate 
3037c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~SQS_BOUND;
304da14cebeSEric Cheng 	thread_affinity_clear(sqp->sq_worker);
3057c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
306da14cebeSEric Cheng }
3077c478bd9Sstevel@tonic-gate 
3087c478bd9Sstevel@tonic-gate /*
3097c478bd9Sstevel@tonic-gate  * squeue_enter() - enter squeue sqp with mblk mp (which can be
3107c478bd9Sstevel@tonic-gate  * a chain), while tail points to the end and cnt in number of
3117c478bd9Sstevel@tonic-gate  * mblks in the chain.
3127c478bd9Sstevel@tonic-gate  *
3137c478bd9Sstevel@tonic-gate  * For a chain of single packet (i.e. mp == tail), go through the
3147c478bd9Sstevel@tonic-gate  * fast path if no one is processing the squeue and nothing is queued.
3157c478bd9Sstevel@tonic-gate  *
3167c478bd9Sstevel@tonic-gate  * The proc and arg for each mblk is already stored in the mblk in
3177c478bd9Sstevel@tonic-gate  * appropriate places.
318da14cebeSEric Cheng  *
319da14cebeSEric Cheng  * The process_flag specifies if we are allowed to process the mblk
320da14cebeSEric Cheng  * and drain in the entering thread context. If process_flag is
321da14cebeSEric Cheng  * SQ_FILL, then we just queue the mblk and return (after signaling
322da14cebeSEric Cheng  * the worker thread if no one else is processing the squeue).
323bd670b35SErik Nordmark  *
324bd670b35SErik Nordmark  * The ira argument can be used when the count is one.
325bd670b35SErik Nordmark  * For a chain the caller needs to prepend any needed mblks from
326bd670b35SErik Nordmark  * ip_recv_attr_to_mblk().
3277c478bd9Sstevel@tonic-gate  */
328da14cebeSEric Cheng /* ARGSUSED */
3297c478bd9Sstevel@tonic-gate void
squeue_enter(squeue_t * sqp,mblk_t * mp,mblk_t * tail,uint32_t cnt,ip_recv_attr_t * ira,int process_flag,uint8_t tag)330da14cebeSEric Cheng squeue_enter(squeue_t *sqp, mblk_t *mp, mblk_t *tail, uint32_t cnt,
331bd670b35SErik Nordmark     ip_recv_attr_t *ira, int process_flag, uint8_t tag)
3327c478bd9Sstevel@tonic-gate {
333da14cebeSEric Cheng 	conn_t		*connp;
3347c478bd9Sstevel@tonic-gate 	sqproc_t	proc;
335d19d6468Sbw 	hrtime_t	now;
3367c478bd9Sstevel@tonic-gate 
3377c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
3387c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
3397c478bd9Sstevel@tonic-gate 	ASSERT(tail != NULL);
3407c478bd9Sstevel@tonic-gate 	ASSERT(cnt > 0);
3417c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
342bd670b35SErik Nordmark 	ASSERT(ira == NULL || cnt == 1);
3437c478bd9Sstevel@tonic-gate 
3447c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
345da14cebeSEric Cheng 
346da14cebeSEric Cheng 	/*
347da14cebeSEric Cheng 	 * Try to process the packet if SQ_FILL flag is not set and
348da14cebeSEric Cheng 	 * we are allowed to process the squeue. The SQ_NODRAIN is
349da14cebeSEric Cheng 	 * ignored if the packet chain consists of more than 1 packet.
350da14cebeSEric Cheng 	 */
351da14cebeSEric Cheng 	if (!(sqp->sq_state & SQS_PROC) && ((process_flag == SQ_PROCESS) ||
352da14cebeSEric Cheng 	    (process_flag == SQ_NODRAIN && sqp->sq_first == NULL))) {
3537c478bd9Sstevel@tonic-gate 		/*
3547c478bd9Sstevel@tonic-gate 		 * See if anything is already queued. If we are the
3557c478bd9Sstevel@tonic-gate 		 * first packet, do inline processing else queue the
3567c478bd9Sstevel@tonic-gate 		 * packet and do the drain.
3577c478bd9Sstevel@tonic-gate 		 */
3587c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL && cnt == 1) {
3597c478bd9Sstevel@tonic-gate 			/*
3607c478bd9Sstevel@tonic-gate 			 * Fast-path, ok to process and nothing queued.
3617c478bd9Sstevel@tonic-gate 			 */
3627c478bd9Sstevel@tonic-gate 			sqp->sq_state |= (SQS_PROC|SQS_FAST);
363da14cebeSEric Cheng 			sqp->sq_run = curthread;
3647c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
3657c478bd9Sstevel@tonic-gate 
3667c478bd9Sstevel@tonic-gate 			/*
3677c478bd9Sstevel@tonic-gate 			 * We are the chain of 1 packet so
3687c478bd9Sstevel@tonic-gate 			 * go through this fast path.
3697c478bd9Sstevel@tonic-gate 			 */
370da14cebeSEric Cheng 			ASSERT(mp->b_prev != NULL);
371da14cebeSEric Cheng 			ASSERT(mp->b_queue != NULL);
372da14cebeSEric Cheng 			connp = (conn_t *)mp->b_prev;
3737c478bd9Sstevel@tonic-gate 			mp->b_prev = NULL;
3747c478bd9Sstevel@tonic-gate 			proc = (sqproc_t)mp->b_queue;
3757c478bd9Sstevel@tonic-gate 			mp->b_queue = NULL;
376da14cebeSEric Cheng 			ASSERT(proc != NULL && connp != NULL);
3777c478bd9Sstevel@tonic-gate 			ASSERT(mp->b_next == NULL);
3787c478bd9Sstevel@tonic-gate 
379da14cebeSEric Cheng 			/*
380da14cebeSEric Cheng 			 * Handle squeue switching. More details in the
381da14cebeSEric Cheng 			 * block comment at the top of the file
382da14cebeSEric Cheng 			 */
383da14cebeSEric Cheng 			if (connp->conn_sqp == sqp) {
384da14cebeSEric Cheng 				SQUEUE_DBG_SET(sqp, mp, proc, connp,
385da14cebeSEric Cheng 				    tag);
386da14cebeSEric Cheng 				connp->conn_on_sqp = B_TRUE;
387da14cebeSEric Cheng 				DTRACE_PROBE3(squeue__proc__start, squeue_t *,
388da14cebeSEric Cheng 				    sqp, mblk_t *, mp, conn_t *, connp);
389bd670b35SErik Nordmark 				(*proc)(connp, mp, sqp, ira);
390da14cebeSEric Cheng 				DTRACE_PROBE2(squeue__proc__end, squeue_t *,
391da14cebeSEric Cheng 				    sqp, conn_t *, connp);
392da14cebeSEric Cheng 				connp->conn_on_sqp = B_FALSE;
393da14cebeSEric Cheng 				SQUEUE_DBG_CLEAR(sqp);
394da14cebeSEric Cheng 				CONN_DEC_REF(connp);
395da14cebeSEric Cheng 			} else {
396da14cebeSEric Cheng 				SQUEUE_ENTER_ONE(connp->conn_sqp, mp, proc,
397bd670b35SErik Nordmark 				    connp, ira, SQ_FILL, SQTAG_SQUEUE_CHANGE);
3987c478bd9Sstevel@tonic-gate 			}
3997c478bd9Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
4007c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
4017c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
402da14cebeSEric Cheng 			sqp->sq_run = NULL;
403da14cebeSEric Cheng 			if (sqp->sq_first == NULL ||
404da14cebeSEric Cheng 			    process_flag == SQ_NODRAIN) {
405196b393bSPatrick Mooney 				/*
406196b393bSPatrick Mooney 				 * Even if SQ_NODRAIN was specified, it may
407196b393bSPatrick Mooney 				 * still be best to process a single queued
408196b393bSPatrick Mooney 				 * item if it matches the active connection.
409196b393bSPatrick Mooney 				 */
410196b393bSPatrick Mooney 				if (sqp->sq_first != NULL) {
411196b393bSPatrick Mooney 					squeue_try_drain_one(sqp, connp);
412196b393bSPatrick Mooney 				}
413196b393bSPatrick Mooney 
4147c478bd9Sstevel@tonic-gate 				/*
415233fee3fSPatrick Mooney 				 * If work or control actions are pending, wake
416233fee3fSPatrick Mooney 				 * up the worker thread.
4177c478bd9Sstevel@tonic-gate 				 */
418233fee3fSPatrick Mooney 				if (sqp->sq_first != NULL ||
419233fee3fSPatrick Mooney 				    sqp->sq_state & SQS_WORKER_THR_CONTROL) {
420233fee3fSPatrick Mooney 					squeue_worker_wakeup(sqp);
421233fee3fSPatrick Mooney 				}
4227c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
4237c478bd9Sstevel@tonic-gate 				return;
4247c478bd9Sstevel@tonic-gate 			}
4257c478bd9Sstevel@tonic-gate 		} else {
426bd670b35SErik Nordmark 			if (ira != NULL) {
427bd670b35SErik Nordmark 				mblk_t	*attrmp;
428bd670b35SErik Nordmark 
429bd670b35SErik Nordmark 				ASSERT(cnt == 1);
430bd670b35SErik Nordmark 				attrmp = ip_recv_attr_to_mblk(ira);
431bd670b35SErik Nordmark 				if (attrmp == NULL) {
432bd670b35SErik Nordmark 					mutex_exit(&sqp->sq_lock);
433bd670b35SErik Nordmark 					ip_drop_input("squeue: "
434bd670b35SErik Nordmark 					    "ip_recv_attr_to_mblk",
435bd670b35SErik Nordmark 					    mp, NULL);
436bd670b35SErik Nordmark 					/* Caller already set b_prev/b_next */
437bd670b35SErik Nordmark 					mp->b_prev = mp->b_next = NULL;
438bd670b35SErik Nordmark 					freemsg(mp);
439bd670b35SErik Nordmark 					return;
440bd670b35SErik Nordmark 				}
441bd670b35SErik Nordmark 				ASSERT(attrmp->b_cont == NULL);
442bd670b35SErik Nordmark 				attrmp->b_cont = mp;
443bd670b35SErik Nordmark 				/* Move connp and func to new */
444bd670b35SErik Nordmark 				attrmp->b_queue = mp->b_queue;
445bd670b35SErik Nordmark 				mp->b_queue = NULL;
446bd670b35SErik Nordmark 				attrmp->b_prev = mp->b_prev;
447bd670b35SErik Nordmark 				mp->b_prev = NULL;
448bd670b35SErik Nordmark 
449bd670b35SErik Nordmark 				ASSERT(mp == tail);
450bd670b35SErik Nordmark 				tail = mp = attrmp;
451bd670b35SErik Nordmark 			}
452bd670b35SErik Nordmark 
4537c478bd9Sstevel@tonic-gate 			ENQUEUE_CHAIN(sqp, mp, tail, cnt);
454da14cebeSEric Cheng #ifdef DEBUG
4557c478bd9Sstevel@tonic-gate 			mp->b_tag = tag;
4567c478bd9Sstevel@tonic-gate #endif
4577c478bd9Sstevel@tonic-gate 		}
4587c478bd9Sstevel@tonic-gate 		/*
4597c478bd9Sstevel@tonic-gate 		 * We are here because either we couldn't do inline
4607c478bd9Sstevel@tonic-gate 		 * processing (because something was already queued),
461da14cebeSEric Cheng 		 * or we had a chain of more than one packet,
4627c478bd9Sstevel@tonic-gate 		 * or something else arrived after we were done with
4637c478bd9Sstevel@tonic-gate 		 * inline processing.
4647c478bd9Sstevel@tonic-gate 		 */
4657c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&sqp->sq_lock));
4667c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_first != NULL);
467d19d6468Sbw 		now = gethrtime();
4687c6d7024SJerry Jelinek 		sqp->sq_run = curthread;
469da14cebeSEric Cheng 		sqp->sq_drain(sqp, SQS_ENTER, now + squeue_drain_ns);
4707c478bd9Sstevel@tonic-gate 
4717c478bd9Sstevel@tonic-gate 		/*
4727c478bd9Sstevel@tonic-gate 		 * If we didn't do a complete drain, the worker
4737c478bd9Sstevel@tonic-gate 		 * thread was already signalled by squeue_drain.
474da14cebeSEric Cheng 		 * In case any control actions are pending, wake
475da14cebeSEric Cheng 		 * up the worker.
4767c478bd9Sstevel@tonic-gate 		 */
4777c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
478233fee3fSPatrick Mooney 		if (sqp->sq_state & SQS_WORKER_THR_CONTROL) {
479233fee3fSPatrick Mooney 			squeue_worker_wakeup(sqp);
480233fee3fSPatrick Mooney 		}
4817c478bd9Sstevel@tonic-gate 	} else {
4827c478bd9Sstevel@tonic-gate 		/*
483da14cebeSEric Cheng 		 * We let a thread processing a squeue reenter only
484da14cebeSEric Cheng 		 * once. This helps the case of incoming connection
485da14cebeSEric Cheng 		 * where a SYN-ACK-ACK that triggers the conn_ind
486da14cebeSEric Cheng 		 * doesn't have to queue the packet if listener and
487da14cebeSEric Cheng 		 * eager are on the same squeue. Also helps the
488da14cebeSEric Cheng 		 * loopback connection where the two ends are bound
489da14cebeSEric Cheng 		 * to the same squeue (which is typical on single
490da14cebeSEric Cheng 		 * CPU machines).
491da14cebeSEric Cheng 		 *
492da14cebeSEric Cheng 		 * We let the thread reenter only once for the fear
493da14cebeSEric Cheng 		 * of stack getting blown with multiple traversal.
4947c478bd9Sstevel@tonic-gate 		 */
495da14cebeSEric Cheng 		connp = (conn_t *)mp->b_prev;
496da14cebeSEric Cheng 		if (!(sqp->sq_state & SQS_REENTER) &&
497da14cebeSEric Cheng 		    (process_flag != SQ_FILL) && (sqp->sq_first == NULL) &&
498da14cebeSEric Cheng 		    (sqp->sq_run == curthread) && (cnt == 1) &&
499da14cebeSEric Cheng 		    (connp->conn_on_sqp == B_FALSE)) {
500da14cebeSEric Cheng 			sqp->sq_state |= SQS_REENTER;
501da14cebeSEric Cheng 			mutex_exit(&sqp->sq_lock);
5027c478bd9Sstevel@tonic-gate 
503da14cebeSEric Cheng 			ASSERT(mp->b_prev != NULL);
504da14cebeSEric Cheng 			ASSERT(mp->b_queue != NULL);
5057c478bd9Sstevel@tonic-gate 
506da14cebeSEric Cheng 			mp->b_prev = NULL;
507da14cebeSEric Cheng 			proc = (sqproc_t)mp->b_queue;
508da14cebeSEric Cheng 			mp->b_queue = NULL;
5097c478bd9Sstevel@tonic-gate 
5107c478bd9Sstevel@tonic-gate 			/*
511da14cebeSEric Cheng 			 * Handle squeue switching. More details in the
512da14cebeSEric Cheng 			 * block comment at the top of the file
5137c478bd9Sstevel@tonic-gate 			 */
514da14cebeSEric Cheng 			if (connp->conn_sqp == sqp) {
515da14cebeSEric Cheng 				connp->conn_on_sqp = B_TRUE;
516da14cebeSEric Cheng 				DTRACE_PROBE3(squeue__proc__start, squeue_t *,
517da14cebeSEric Cheng 				    sqp, mblk_t *, mp, conn_t *, connp);
518bd670b35SErik Nordmark 				(*proc)(connp, mp, sqp, ira);
519da14cebeSEric Cheng 				DTRACE_PROBE2(squeue__proc__end, squeue_t *,
520da14cebeSEric Cheng 				    sqp, conn_t *, connp);
521da14cebeSEric Cheng 				connp->conn_on_sqp = B_FALSE;
522da14cebeSEric Cheng 				CONN_DEC_REF(connp);
523da14cebeSEric Cheng 			} else {
524da14cebeSEric Cheng 				SQUEUE_ENTER_ONE(connp->conn_sqp, mp, proc,
525bd670b35SErik Nordmark 				    connp, ira, SQ_FILL, SQTAG_SQUEUE_CHANGE);
5267c478bd9Sstevel@tonic-gate 			}
5277c478bd9Sstevel@tonic-gate 
5287c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
529da14cebeSEric Cheng 			sqp->sq_state &= ~SQS_REENTER;
530da14cebeSEric Cheng 			mutex_exit(&sqp->sq_lock);
531da14cebeSEric Cheng 			return;
5327c478bd9Sstevel@tonic-gate 		}
5337c478bd9Sstevel@tonic-gate 
5347c478bd9Sstevel@tonic-gate 		/*
535da14cebeSEric Cheng 		 * Queue is already being processed or there is already
536da14cebeSEric Cheng 		 * one or more paquets on the queue. Enqueue the
537da14cebeSEric Cheng 		 * packet and wakeup the squeue worker thread if the
538da14cebeSEric Cheng 		 * squeue is not being processed.
5397c478bd9Sstevel@tonic-gate 		 */
540da14cebeSEric Cheng #ifdef DEBUG
541da14cebeSEric Cheng 		mp->b_tag = tag;
5427c478bd9Sstevel@tonic-gate #endif
543bd670b35SErik Nordmark 		if (ira != NULL) {
544bd670b35SErik Nordmark 			mblk_t	*attrmp;
5457c478bd9Sstevel@tonic-gate 
546bd670b35SErik Nordmark 			ASSERT(cnt == 1);
547bd670b35SErik Nordmark 			attrmp = ip_recv_attr_to_mblk(ira);
548bd670b35SErik Nordmark 			if (attrmp == NULL) {
549bd670b35SErik Nordmark 				mutex_exit(&sqp->sq_lock);
550bd670b35SErik Nordmark 				ip_drop_input("squeue: ip_recv_attr_to_mblk",
551bd670b35SErik Nordmark 				    mp, NULL);
552bd670b35SErik Nordmark 				/* Caller already set b_prev/b_next */
553bd670b35SErik Nordmark 				mp->b_prev = mp->b_next = NULL;
554bd670b35SErik Nordmark 				freemsg(mp);
555bd670b35SErik Nordmark 				return;
556bd670b35SErik Nordmark 			}
557bd670b35SErik Nordmark 			ASSERT(attrmp->b_cont == NULL);
558bd670b35SErik Nordmark 			attrmp->b_cont = mp;
559bd670b35SErik Nordmark 			/* Move connp and func to new */
560bd670b35SErik Nordmark 			attrmp->b_queue = mp->b_queue;
561bd670b35SErik Nordmark 			mp->b_queue = NULL;
562bd670b35SErik Nordmark 			attrmp->b_prev = mp->b_prev;
563bd670b35SErik Nordmark 			mp->b_prev = NULL;
564bd670b35SErik Nordmark 
565bd670b35SErik Nordmark 			ASSERT(mp == tail);
566bd670b35SErik Nordmark 			tail = mp = attrmp;
567bd670b35SErik Nordmark 		}
568da14cebeSEric Cheng 		ENQUEUE_CHAIN(sqp, mp, tail, cnt);
5697c478bd9Sstevel@tonic-gate 		/*
570233fee3fSPatrick Mooney 		 * If the worker isn't running or control actions are pending,
571233fee3fSPatrick Mooney 		 * wake it it up now.
5727c478bd9Sstevel@tonic-gate 		 */
573233fee3fSPatrick Mooney 		if ((sqp->sq_state & SQS_PROC) == 0 ||
574233fee3fSPatrick Mooney 		    (sqp->sq_state & SQS_WORKER_THR_CONTROL) != 0) {
575233fee3fSPatrick Mooney 			squeue_worker_wakeup(sqp);
576233fee3fSPatrick Mooney 		}
5777c478bd9Sstevel@tonic-gate 	}
578233fee3fSPatrick Mooney 	mutex_exit(&sqp->sq_lock);
5797c478bd9Sstevel@tonic-gate }
5807c478bd9Sstevel@tonic-gate 
5817c478bd9Sstevel@tonic-gate /*
5827c478bd9Sstevel@tonic-gate  * PRIVATE FUNCTIONS
5837c478bd9Sstevel@tonic-gate  */
5847c478bd9Sstevel@tonic-gate 
585233fee3fSPatrick Mooney 
586233fee3fSPatrick Mooney /*
587233fee3fSPatrick Mooney  * Wake up worker thread for squeue to process queued work.
588233fee3fSPatrick Mooney  */
5897c478bd9Sstevel@tonic-gate static void
squeue_worker_wakeup(squeue_t * sqp)590233fee3fSPatrick Mooney squeue_worker_wakeup(squeue_t *sqp)
5917c478bd9Sstevel@tonic-gate {
592233fee3fSPatrick Mooney 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));
5937c478bd9Sstevel@tonic-gate 
594233fee3fSPatrick Mooney 	cv_signal(&sqp->sq_worker_cv);
595233fee3fSPatrick Mooney 	sqp->sq_awoken = gethrtime();
5967c478bd9Sstevel@tonic-gate }
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate static void
squeue_drain(squeue_t * sqp,uint_t proc_type,hrtime_t expire)599d19d6468Sbw squeue_drain(squeue_t *sqp, uint_t proc_type, hrtime_t expire)
6007c478bd9Sstevel@tonic-gate {
601da14cebeSEric Cheng 	mblk_t		*mp;
602196b393bSPatrick Mooney 	mblk_t		*head;
603196b393bSPatrick Mooney 	sqproc_t	proc;
604da14cebeSEric Cheng 	conn_t		*connp;
6057c478bd9Sstevel@tonic-gate 	ill_rx_ring_t	*sq_rx_ring = sqp->sq_rx_ring;
606196b393bSPatrick Mooney 	hrtime_t	now;
607da14cebeSEric Cheng 	boolean_t	sq_poll_capable;
608bd670b35SErik Nordmark 	ip_recv_attr_t	*ira, iras;
6097c478bd9Sstevel@tonic-gate 
6101ddb55e6SBryan Cantrill 	/*
6111ddb55e6SBryan Cantrill 	 * Before doing any work, check our stack depth; if we're not a
6121ddb55e6SBryan Cantrill 	 * worker thread for this squeue and we're beginning to get tight on
6131ddb55e6SBryan Cantrill 	 * on stack, kick the worker, bump a counter and return.
6141ddb55e6SBryan Cantrill 	 */
6151ddb55e6SBryan Cantrill 	if (proc_type != SQS_WORKER && STACK_BIAS + (uintptr_t)getfp() -
6161ddb55e6SBryan Cantrill 	    (uintptr_t)curthread->t_stkbase < squeue_drain_stack_needed) {
6171ddb55e6SBryan Cantrill 		ASSERT(mutex_owned(&sqp->sq_lock));
618233fee3fSPatrick Mooney 		squeue_worker_wakeup(sqp);
6191ddb55e6SBryan Cantrill 		squeue_drain_stack_toodeep++;
6201ddb55e6SBryan Cantrill 		return;
6211ddb55e6SBryan Cantrill 	}
6221ddb55e6SBryan Cantrill 
623da14cebeSEric Cheng 	sq_poll_capable = (sqp->sq_state & SQS_POLL_CAPAB) != 0;
624da14cebeSEric Cheng again:
6257c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&sqp->sq_lock));
626da14cebeSEric Cheng 	ASSERT(!(sqp->sq_state & (SQS_POLL_THR_QUIESCED |
627da14cebeSEric Cheng 	    SQS_POLL_QUIESCE_DONE)));
628da14cebeSEric Cheng 
629da14cebeSEric Cheng 	head = sqp->sq_first;
630da14cebeSEric Cheng 	sqp->sq_first = NULL;
631da14cebeSEric Cheng 	sqp->sq_last = NULL;
632da14cebeSEric Cheng 	sqp->sq_count = 0;
6337c478bd9Sstevel@tonic-gate 
6347c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_PROC | proc_type;
635da14cebeSEric Cheng 
6367c478bd9Sstevel@tonic-gate 	/*
6377c478bd9Sstevel@tonic-gate 	 * We have backlog built up. Switch to polling mode if the
638da14cebeSEric Cheng 	 * device underneath allows it. Need to do it so that
639da14cebeSEric Cheng 	 * more packets don't come in and disturb us (by contending
640da14cebeSEric Cheng 	 * for sq_lock or higher priority thread preempting us).
641da14cebeSEric Cheng 	 *
642da14cebeSEric Cheng 	 * The worker thread is allowed to do active polling while we
643da14cebeSEric Cheng 	 * just disable the interrupts for drain by non worker (kernel
644da14cebeSEric Cheng 	 * or userland) threads so they can peacefully process the
645da14cebeSEric Cheng 	 * packets during time allocated to them.
6467c478bd9Sstevel@tonic-gate 	 */
647da14cebeSEric Cheng 	SQS_POLLING_ON(sqp, sq_poll_capable, sq_rx_ring);
6487c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
6497c478bd9Sstevel@tonic-gate 
6507c478bd9Sstevel@tonic-gate 	while ((mp = head) != NULL) {
651da14cebeSEric Cheng 
6527c478bd9Sstevel@tonic-gate 		head = mp->b_next;
6537c478bd9Sstevel@tonic-gate 		mp->b_next = NULL;
6547c478bd9Sstevel@tonic-gate 
6557c478bd9Sstevel@tonic-gate 		proc = (sqproc_t)mp->b_queue;
6567c478bd9Sstevel@tonic-gate 		mp->b_queue = NULL;
6577c478bd9Sstevel@tonic-gate 		connp = (conn_t *)mp->b_prev;
6587c478bd9Sstevel@tonic-gate 		mp->b_prev = NULL;
6597c478bd9Sstevel@tonic-gate 
660bd670b35SErik Nordmark 		/* Is there an ip_recv_attr_t to handle? */
661bd670b35SErik Nordmark 		if (ip_recv_attr_is_mblk(mp)) {
662bd670b35SErik Nordmark 			mblk_t	*attrmp = mp;
663bd670b35SErik Nordmark 
664bd670b35SErik Nordmark 			ASSERT(attrmp->b_cont != NULL);
665bd670b35SErik Nordmark 
666bd670b35SErik Nordmark 			mp = attrmp->b_cont;
667bd670b35SErik Nordmark 			attrmp->b_cont = NULL;
668bd670b35SErik Nordmark 			ASSERT(mp->b_queue == NULL);
669bd670b35SErik Nordmark 			ASSERT(mp->b_prev == NULL);
670bd670b35SErik Nordmark 
671bd670b35SErik Nordmark 			if (!ip_recv_attr_from_mblk(attrmp, &iras)) {
672bd670b35SErik Nordmark 				/* The ill or ip_stack_t disappeared on us */
673bd670b35SErik Nordmark 				ip_drop_input("ip_recv_attr_from_mblk",
674bd670b35SErik Nordmark 				    mp, NULL);
675bd670b35SErik Nordmark 				ira_cleanup(&iras, B_TRUE);
676bd670b35SErik Nordmark 				CONN_DEC_REF(connp);
677bd670b35SErik Nordmark 				continue;
678bd670b35SErik Nordmark 			}
679bd670b35SErik Nordmark 			ira = &iras;
680bd670b35SErik Nordmark 		} else {
681bd670b35SErik Nordmark 			ira = NULL;
682bd670b35SErik Nordmark 		}
683bd670b35SErik Nordmark 
684bd670b35SErik Nordmark 
685da14cebeSEric Cheng 		/*
686da14cebeSEric Cheng 		 * Handle squeue switching. More details in the
687da14cebeSEric Cheng 		 * block comment at the top of the file
688da14cebeSEric Cheng 		 */
689da14cebeSEric Cheng 		if (connp->conn_sqp == sqp) {
690da14cebeSEric Cheng 			SQUEUE_DBG_SET(sqp, mp, proc, connp,
691da14cebeSEric Cheng 			    mp->b_tag);
692da14cebeSEric Cheng 			connp->conn_on_sqp = B_TRUE;
693da14cebeSEric Cheng 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
694da14cebeSEric Cheng 			    sqp, mblk_t *, mp, conn_t *, connp);
695bd670b35SErik Nordmark 			(*proc)(connp, mp, sqp, ira);
696da14cebeSEric Cheng 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
697da14cebeSEric Cheng 			    sqp, conn_t *, connp);
698da14cebeSEric Cheng 			connp->conn_on_sqp = B_FALSE;
699da14cebeSEric Cheng 			CONN_DEC_REF(connp);
700da14cebeSEric Cheng 		} else {
701bd670b35SErik Nordmark 			SQUEUE_ENTER_ONE(connp->conn_sqp, mp, proc, connp, ira,
702da14cebeSEric Cheng 			    SQ_FILL, SQTAG_SQUEUE_CHANGE);
7037c478bd9Sstevel@tonic-gate 		}
704bd670b35SErik Nordmark 		if (ira != NULL)
705bd670b35SErik Nordmark 			ira_cleanup(ira, B_TRUE);
7067c478bd9Sstevel@tonic-gate 	}
7077c478bd9Sstevel@tonic-gate 
708da14cebeSEric Cheng 	SQUEUE_DBG_CLEAR(sqp);
7097c478bd9Sstevel@tonic-gate 
7107c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
7117c478bd9Sstevel@tonic-gate 
712da14cebeSEric Cheng 	/*
713da14cebeSEric Cheng 	 * Check if there is still work to do (either more arrived or timer
714da14cebeSEric Cheng 	 * expired). If we are the worker thread and we are polling capable,
715da14cebeSEric Cheng 	 * continue doing the work since no one else is around to do the
716da14cebeSEric Cheng 	 * work anyway (but signal the poll thread to retrieve some packets
717da14cebeSEric Cheng 	 * in the meanwhile). If we are not the worker thread, just
718da14cebeSEric Cheng 	 * signal the worker thread to take up the work if processing time
719da14cebeSEric Cheng 	 * has expired.
720da14cebeSEric Cheng 	 */
7217c478bd9Sstevel@tonic-gate 	if (sqp->sq_first != NULL) {
7227c478bd9Sstevel@tonic-gate 		/*
723da14cebeSEric Cheng 		 * Still more to process. If time quanta not expired, we
724da14cebeSEric Cheng 		 * should let the drain go on. The worker thread is allowed
725da14cebeSEric Cheng 		 * to drain as long as there is anything left.
7267c478bd9Sstevel@tonic-gate 		 */
727da14cebeSEric Cheng 		now = gethrtime();
728da14cebeSEric Cheng 		if ((now < expire) || (proc_type == SQS_WORKER)) {
729da14cebeSEric Cheng 			/*
730da14cebeSEric Cheng 			 * If time not expired or we are worker thread and
731da14cebeSEric Cheng 			 * this squeue is polling capable, continue to do
732da14cebeSEric Cheng 			 * the drain.
733da14cebeSEric Cheng 			 *
734da14cebeSEric Cheng 			 * We turn off interrupts for all userland threads
735da14cebeSEric Cheng 			 * doing drain but we do active polling only for
736da14cebeSEric Cheng 			 * worker thread.
737efe28d82SRajagopal Kunhappan 			 *
738efe28d82SRajagopal Kunhappan 			 * Calling SQS_POLL_RING() even in the case of
739efe28d82SRajagopal Kunhappan 			 * SQS_POLLING_ON() not succeeding is ok as
740efe28d82SRajagopal Kunhappan 			 * SQS_POLL_RING() will not wake up poll thread
741efe28d82SRajagopal Kunhappan 			 * if SQS_POLLING bit is not set.
742da14cebeSEric Cheng 			 */
743da14cebeSEric Cheng 			if (proc_type == SQS_WORKER)
744efe28d82SRajagopal Kunhappan 				SQS_POLL_RING(sqp);
745da14cebeSEric Cheng 			goto again;
7467c478bd9Sstevel@tonic-gate 		}
747233fee3fSPatrick Mooney 
748233fee3fSPatrick Mooney 		squeue_worker_wakeup(sqp);
7497c478bd9Sstevel@tonic-gate 	}
7507c478bd9Sstevel@tonic-gate 
7517c478bd9Sstevel@tonic-gate 	/*
752da14cebeSEric Cheng 	 * If the poll thread is already running, just return. The
753da14cebeSEric Cheng 	 * poll thread continues to hold the proc and will finish
754da14cebeSEric Cheng 	 * processing.
7557c478bd9Sstevel@tonic-gate 	 */
756da14cebeSEric Cheng 	if (sqp->sq_state & SQS_GET_PKTS) {
757da14cebeSEric Cheng 		ASSERT(!(sqp->sq_state & (SQS_POLL_THR_QUIESCED |
758da14cebeSEric Cheng 		    SQS_POLL_QUIESCE_DONE)));
759da14cebeSEric Cheng 		sqp->sq_state &= ~proc_type;
760da14cebeSEric Cheng 		return;
761da14cebeSEric Cheng 	}
7627c478bd9Sstevel@tonic-gate 
7637c478bd9Sstevel@tonic-gate 	/*
764da14cebeSEric Cheng 	 *
765da14cebeSEric Cheng 	 * If we are the worker thread and no work is left, send the poll
766da14cebeSEric Cheng 	 * thread down once more to see if something arrived. Otherwise,
767da14cebeSEric Cheng 	 * turn the interrupts back on and we are done.
7687c478bd9Sstevel@tonic-gate 	 */
769efe28d82SRajagopal Kunhappan 	if ((proc_type == SQS_WORKER) && (sqp->sq_state & SQS_POLLING)) {
770da14cebeSEric Cheng 		/*
771da14cebeSEric Cheng 		 * Do one last check to see if anything arrived
772da14cebeSEric Cheng 		 * in the NIC. We leave the SQS_PROC set to ensure
773da14cebeSEric Cheng 		 * that poll thread keeps the PROC and can decide
774da14cebeSEric Cheng 		 * if it needs to turn polling off or continue
775da14cebeSEric Cheng 		 * processing.
776da14cebeSEric Cheng 		 *
777da14cebeSEric Cheng 		 * If we drop the SQS_PROC here and poll thread comes
778da14cebeSEric Cheng 		 * up empty handed, it can not safely turn polling off
779da14cebeSEric Cheng 		 * since someone else could have acquired the PROC
780da14cebeSEric Cheng 		 * and started draining. The previously running poll
781da14cebeSEric Cheng 		 * thread and the current thread doing drain would end
782da14cebeSEric Cheng 		 * up in a race for turning polling on/off and more
783da14cebeSEric Cheng 		 * complex code would be required to deal with it.
784da14cebeSEric Cheng 		 *
785da14cebeSEric Cheng 		 * Its lot simpler for drain to hand the SQS_PROC to
786da14cebeSEric Cheng 		 * poll thread (if running) and let poll thread finish
787da14cebeSEric Cheng 		 * without worrying about racing with any other thread.
788da14cebeSEric Cheng 		 */
789da14cebeSEric Cheng 		ASSERT(!(sqp->sq_state & (SQS_POLL_THR_QUIESCED |
790da14cebeSEric Cheng 		    SQS_POLL_QUIESCE_DONE)));
791efe28d82SRajagopal Kunhappan 		SQS_POLL_RING(sqp);
792da14cebeSEric Cheng 		sqp->sq_state &= ~proc_type;
793da14cebeSEric Cheng 	} else {
794da14cebeSEric Cheng 		/*
795efe28d82SRajagopal Kunhappan 		 * The squeue is either not capable of polling or the
796efe28d82SRajagopal Kunhappan 		 * attempt to blank (i.e., turn SQS_POLLING_ON()) was
797efe28d82SRajagopal Kunhappan 		 * unsuccessful or poll thread already finished
798efe28d82SRajagopal Kunhappan 		 * processing and didn't find anything. Since there
799efe28d82SRajagopal Kunhappan 		 * is nothing queued and we already turn polling on
800efe28d82SRajagopal Kunhappan 		 * (for all threads doing drain), we should turn
801efe28d82SRajagopal Kunhappan 		 * polling off and relinquish the PROC.
802da14cebeSEric Cheng 		 */
803da14cebeSEric Cheng 		ASSERT(!(sqp->sq_state & (SQS_POLL_THR_QUIESCED |
804da14cebeSEric Cheng 		    SQS_POLL_QUIESCE_DONE)));
805da14cebeSEric Cheng 		SQS_POLLING_OFF(sqp, sq_poll_capable, sq_rx_ring);
806da14cebeSEric Cheng 		sqp->sq_state &= ~(SQS_PROC | proc_type);
807da14cebeSEric Cheng 		/*
808da14cebeSEric Cheng 		 * If we are not the worker and there is a pending quiesce
809da14cebeSEric Cheng 		 * event, wake up the worker
810da14cebeSEric Cheng 		 */
811da14cebeSEric Cheng 		if ((proc_type != SQS_WORKER) &&
812233fee3fSPatrick Mooney 		    (sqp->sq_state & SQS_WORKER_THR_CONTROL)) {
813233fee3fSPatrick Mooney 			squeue_worker_wakeup(sqp);
814233fee3fSPatrick Mooney 		}
8157c478bd9Sstevel@tonic-gate 	}
8167c478bd9Sstevel@tonic-gate }
8177c478bd9Sstevel@tonic-gate 
818da14cebeSEric Cheng /*
819da14cebeSEric Cheng  * Quiesce, Restart, or Cleanup of the squeue poll thread.
820da14cebeSEric Cheng  *
821da14cebeSEric Cheng  * Quiesce and Restart: After an squeue poll thread has been quiesced, it does
822da14cebeSEric Cheng  * not attempt to poll the underlying soft ring any more. The quiesce is
823da14cebeSEric Cheng  * triggered by the mac layer when it wants to quiesce a soft ring. Typically
824da14cebeSEric Cheng  * control operations such as changing the fanout of a NIC or VNIC (dladm
825da14cebeSEric Cheng  * setlinkprop) need to quiesce data flow before changing the wiring.
826da14cebeSEric Cheng  * The operation is done by the mac layer, but it calls back into IP to
827da14cebeSEric Cheng  * quiesce the soft ring. After completing the operation (say increase or
828da14cebeSEric Cheng  * decrease of the fanout) the mac layer then calls back into IP to restart
829da14cebeSEric Cheng  * the quiesced soft ring.
830da14cebeSEric Cheng  *
831da14cebeSEric Cheng  * Cleanup: This is triggered when the squeue binding to a soft ring is
832da14cebeSEric Cheng  * removed permanently. Typically interface plumb and unplumb would trigger
833da14cebeSEric Cheng  * this. It can also be triggered from the mac layer when a soft ring is
834da14cebeSEric Cheng  * being deleted say as the result of a fanout reduction. Since squeues are
835da14cebeSEric Cheng  * never deleted, the cleanup marks the squeue as fit for recycling and
836da14cebeSEric Cheng  * moves it to the zeroth squeue set.
837da14cebeSEric Cheng  */
8387c478bd9Sstevel@tonic-gate static void
squeue_poll_thr_control(squeue_t * sqp)839da14cebeSEric Cheng squeue_poll_thr_control(squeue_t *sqp)
840da14cebeSEric Cheng {
841da14cebeSEric Cheng 	if (sqp->sq_state & SQS_POLL_THR_RESTART) {
842da14cebeSEric Cheng 		/* Restart implies a previous quiesce */
843da14cebeSEric Cheng 		ASSERT(sqp->sq_state & SQS_POLL_THR_QUIESCED);
844da14cebeSEric Cheng 		sqp->sq_state &= ~(SQS_POLL_THR_QUIESCED |
845da14cebeSEric Cheng 		    SQS_POLL_THR_RESTART);
846da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_CAPAB;
847da14cebeSEric Cheng 		cv_signal(&sqp->sq_worker_cv);
848da14cebeSEric Cheng 		return;
849da14cebeSEric Cheng 	}
850da14cebeSEric Cheng 
851da14cebeSEric Cheng 	if (sqp->sq_state & SQS_POLL_THR_QUIESCE) {
852da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_THR_QUIESCED;
853da14cebeSEric Cheng 		sqp->sq_state &= ~SQS_POLL_THR_QUIESCE;
854da14cebeSEric Cheng 		cv_signal(&sqp->sq_worker_cv);
855da14cebeSEric Cheng 		return;
856da14cebeSEric Cheng 	}
857da14cebeSEric Cheng }
858da14cebeSEric Cheng 
859da14cebeSEric Cheng /*
860da14cebeSEric Cheng  * POLLING Notes
861da14cebeSEric Cheng  *
862da14cebeSEric Cheng  * With polling mode, we want to do as much processing as we possibly can
863da14cebeSEric Cheng  * in worker thread context. The sweet spot is worker thread keeps doing
864da14cebeSEric Cheng  * work all the time in polling mode and writers etc. keep dumping packets
865da14cebeSEric Cheng  * to worker thread. Occassionally, we send the poll thread (running at
866da14cebeSEric Cheng  * lower priority to NIC to get the chain of packets to feed to worker).
867da14cebeSEric Cheng  * Sending the poll thread down to NIC is dependant on 3 criterions
868da14cebeSEric Cheng  *
869da14cebeSEric Cheng  * 1) Its always driven from squeue_drain and only if worker thread is
870da14cebeSEric Cheng  *	doing the drain.
871da14cebeSEric Cheng  * 2) We clear the backlog once and more packets arrived in between.
872da14cebeSEric Cheng  *	Before starting drain again, send the poll thread down if
873da14cebeSEric Cheng  *	the drain is being done by worker thread.
874da14cebeSEric Cheng  * 3) Before exiting the squeue_drain, if the poll thread is not already
875da14cebeSEric Cheng  *	working and we are the worker thread, try to poll one more time.
876da14cebeSEric Cheng  *
877da14cebeSEric Cheng  * For latency sake, we do allow any thread calling squeue_enter
878da14cebeSEric Cheng  * to process its packet provided:
879da14cebeSEric Cheng  *
880da14cebeSEric Cheng  * 1) Nothing is queued
881da14cebeSEric Cheng  * 2) If more packets arrived in between, the non worker thread are allowed
882da14cebeSEric Cheng  *	to do the drain till their time quanta expired provided SQS_GET_PKTS
883da14cebeSEric Cheng  *	wasn't set in between.
884da14cebeSEric Cheng  *
885da14cebeSEric Cheng  * Avoiding deadlocks with interrupts
886da14cebeSEric Cheng  * ==================================
887da14cebeSEric Cheng  *
888da14cebeSEric Cheng  * One of the big problem is that we can't send poll_thr down while holding
889da14cebeSEric Cheng  * the sq_lock since the thread can block. So we drop the sq_lock before
890da14cebeSEric Cheng  * calling sq_get_pkts(). We keep holding the SQS_PROC as long as the
891da14cebeSEric Cheng  * poll thread is running so that no other thread can acquire the
892da14cebeSEric Cheng  * perimeter in between. If the squeue_drain gets done (no more work
893da14cebeSEric Cheng  * left), it leaves the SQS_PROC set if poll thread is running.
894da14cebeSEric Cheng  */
895da14cebeSEric Cheng 
896da14cebeSEric Cheng /*
897da14cebeSEric Cheng  * This is the squeue poll thread. In poll mode, it polls the underlying
898da14cebeSEric Cheng  * TCP softring and feeds packets into the squeue. The worker thread then
899da14cebeSEric Cheng  * drains the squeue. The poll thread also responds to control signals for
900da14cebeSEric Cheng  * quiesceing, restarting, or cleanup of an squeue. These are driven by
901da14cebeSEric Cheng  * control operations like plumb/unplumb or as a result of dynamic Rx ring
902da14cebeSEric Cheng  * related operations that are driven from the mac layer.
903da14cebeSEric Cheng  */
904da14cebeSEric Cheng static void
squeue_polling_thread(squeue_t * sqp)905da14cebeSEric Cheng squeue_polling_thread(squeue_t *sqp)
9067c478bd9Sstevel@tonic-gate {
9077c478bd9Sstevel@tonic-gate 	kmutex_t *lock = &sqp->sq_lock;
908da14cebeSEric Cheng 	kcondvar_t *async = &sqp->sq_poll_cv;
909da14cebeSEric Cheng 	ip_mac_rx_t sq_get_pkts;
910da14cebeSEric Cheng 	ip_accept_t ip_accept;
911da14cebeSEric Cheng 	ill_rx_ring_t *sq_rx_ring;
912da14cebeSEric Cheng 	ill_t *sq_ill;
913da14cebeSEric Cheng 	mblk_t *head, *tail, *mp;
914da14cebeSEric Cheng 	uint_t cnt;
915da14cebeSEric Cheng 	void *sq_mac_handle;
9167c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
917da14cebeSEric Cheng 	size_t bytes_to_pickup;
918da14cebeSEric Cheng 	uint32_t ctl_state;
9197c478bd9Sstevel@tonic-gate 
920da14cebeSEric Cheng 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "sq_poll");
9217c478bd9Sstevel@tonic-gate 	mutex_enter(lock);
9227c478bd9Sstevel@tonic-gate 
9237c478bd9Sstevel@tonic-gate 	for (;;) {
924da14cebeSEric Cheng 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
925da14cebeSEric Cheng 		cv_wait(async, lock);
926da14cebeSEric Cheng 		CALLB_CPR_SAFE_END(&cprinfo, lock);
927da14cebeSEric Cheng 
928da14cebeSEric Cheng 		ctl_state = sqp->sq_state & (SQS_POLL_THR_CONTROL |
929da14cebeSEric Cheng 		    SQS_POLL_THR_QUIESCED);
930da14cebeSEric Cheng 		if (ctl_state != 0) {
931da14cebeSEric Cheng 			/*
932da14cebeSEric Cheng 			 * If the squeue is quiesced, then wait for a control
933da14cebeSEric Cheng 			 * request. A quiesced squeue must not poll the
934da14cebeSEric Cheng 			 * underlying soft ring.
935da14cebeSEric Cheng 			 */
936da14cebeSEric Cheng 			if (ctl_state == SQS_POLL_THR_QUIESCED)
937da14cebeSEric Cheng 				continue;
938da14cebeSEric Cheng 			/*
939da14cebeSEric Cheng 			 * Act on control requests to quiesce, cleanup or
940da14cebeSEric Cheng 			 * restart an squeue
941da14cebeSEric Cheng 			 */
942da14cebeSEric Cheng 			squeue_poll_thr_control(sqp);
943da14cebeSEric Cheng 			continue;
9447c478bd9Sstevel@tonic-gate 		}
9457c478bd9Sstevel@tonic-gate 
946da14cebeSEric Cheng 		if (!(sqp->sq_state & SQS_POLL_CAPAB))
947da14cebeSEric Cheng 			continue;
948da14cebeSEric Cheng 
949da14cebeSEric Cheng 		ASSERT((sqp->sq_state &
950da14cebeSEric Cheng 		    (SQS_PROC|SQS_POLLING|SQS_GET_PKTS)) ==
951da14cebeSEric Cheng 		    (SQS_PROC|SQS_POLLING|SQS_GET_PKTS));
952da14cebeSEric Cheng 
953da14cebeSEric Cheng poll_again:
954da14cebeSEric Cheng 		sq_rx_ring = sqp->sq_rx_ring;
955da14cebeSEric Cheng 		sq_get_pkts = sq_rx_ring->rr_rx;
956da14cebeSEric Cheng 		sq_mac_handle = sq_rx_ring->rr_rx_handle;
957da14cebeSEric Cheng 		ip_accept = sq_rx_ring->rr_ip_accept;
958da14cebeSEric Cheng 		sq_ill = sq_rx_ring->rr_ill;
959da14cebeSEric Cheng 		bytes_to_pickup = MAX_BYTES_TO_PICKUP;
960da14cebeSEric Cheng 		mutex_exit(lock);
961da14cebeSEric Cheng 		head = sq_get_pkts(sq_mac_handle, bytes_to_pickup);
962da14cebeSEric Cheng 		mp = NULL;
963da14cebeSEric Cheng 		if (head != NULL) {
964da14cebeSEric Cheng 			/*
965da14cebeSEric Cheng 			 * We got the packet chain from the mac layer. It
966da14cebeSEric Cheng 			 * would be nice to be able to process it inline
967da14cebeSEric Cheng 			 * for better performance but we need to give
968da14cebeSEric Cheng 			 * IP a chance to look at this chain to ensure
969da14cebeSEric Cheng 			 * that packets are really meant for this squeue
970da14cebeSEric Cheng 			 * and do the IP processing.
971da14cebeSEric Cheng 			 */
972da14cebeSEric Cheng 			mp = ip_accept(sq_ill, sq_rx_ring, sqp, head,
973da14cebeSEric Cheng 			    &tail, &cnt);
9747c478bd9Sstevel@tonic-gate 		}
975da14cebeSEric Cheng 		mutex_enter(lock);
976bd670b35SErik Nordmark 		if (mp != NULL) {
977bd670b35SErik Nordmark 			/*
978bd670b35SErik Nordmark 			 * The ip_accept function has already added an
979bd670b35SErik Nordmark 			 * ip_recv_attr_t mblk if that is needed.
980bd670b35SErik Nordmark 			 */
981da14cebeSEric Cheng 			ENQUEUE_CHAIN(sqp, mp, tail, cnt);
982bd670b35SErik Nordmark 		}
983da14cebeSEric Cheng 		ASSERT((sqp->sq_state &
984da14cebeSEric Cheng 		    (SQS_PROC|SQS_POLLING|SQS_GET_PKTS)) ==
985da14cebeSEric Cheng 		    (SQS_PROC|SQS_POLLING|SQS_GET_PKTS));
9867c478bd9Sstevel@tonic-gate 
987da14cebeSEric Cheng 		if (sqp->sq_first != NULL && !(sqp->sq_state & SQS_WORKER)) {
9887c478bd9Sstevel@tonic-gate 			/*
989da14cebeSEric Cheng 			 * We have packets to process and worker thread
990da14cebeSEric Cheng 			 * is not running.  Check to see if poll thread is
991da14cebeSEric Cheng 			 * allowed to process. Let it do processing only if it
992da14cebeSEric Cheng 			 * picked up some packets from the NIC otherwise
993da14cebeSEric Cheng 			 * wakeup the worker thread.
9947c478bd9Sstevel@tonic-gate 			 */
995da14cebeSEric Cheng 			if (mp != NULL) {
996da14cebeSEric Cheng 				hrtime_t  now;
997da14cebeSEric Cheng 
998da14cebeSEric Cheng 				now = gethrtime();
999da14cebeSEric Cheng 				sqp->sq_run = curthread;
1000da14cebeSEric Cheng 				sqp->sq_drain(sqp, SQS_POLL_PROC, now +
1001da14cebeSEric Cheng 				    squeue_drain_ns);
1002da14cebeSEric Cheng 				sqp->sq_run = NULL;
1003da14cebeSEric Cheng 
1004da14cebeSEric Cheng 				if (sqp->sq_first == NULL)
1005da14cebeSEric Cheng 					goto poll_again;
10067c478bd9Sstevel@tonic-gate 
10077c478bd9Sstevel@tonic-gate 				/*
1008da14cebeSEric Cheng 				 * Couldn't do the entire drain because the
1009da14cebeSEric Cheng 				 * time limit expired, let the
1010da14cebeSEric Cheng 				 * worker thread take over.
10117c478bd9Sstevel@tonic-gate 				 */
10127c478bd9Sstevel@tonic-gate 			}
10137c478bd9Sstevel@tonic-gate 
1014da14cebeSEric Cheng 			/*
1015da14cebeSEric Cheng 			 * Put the SQS_PROC_HELD on so the worker
1016da14cebeSEric Cheng 			 * thread can distinguish where its called from. We
1017da14cebeSEric Cheng 			 * can remove the SQS_PROC flag here and turn off the
1018da14cebeSEric Cheng 			 * polling so that it wouldn't matter who gets the
1019da14cebeSEric Cheng 			 * processing but we get better performance this way
1020da14cebeSEric Cheng 			 * and save the cost of turn polling off and possibly
1021da14cebeSEric Cheng 			 * on again as soon as we start draining again.
1022da14cebeSEric Cheng 			 *
1023da14cebeSEric Cheng 			 * We can't remove the SQS_PROC flag without turning
1024da14cebeSEric Cheng 			 * polling off until we can guarantee that control
1025da14cebeSEric Cheng 			 * will return to squeue_drain immediately.
1026da14cebeSEric Cheng 			 */
1027da14cebeSEric Cheng 			sqp->sq_state |= SQS_PROC_HELD;
1028da14cebeSEric Cheng 			sqp->sq_state &= ~SQS_GET_PKTS;
1029233fee3fSPatrick Mooney 			squeue_worker_wakeup(sqp);
1030da14cebeSEric Cheng 		} else if (sqp->sq_first == NULL &&
1031da14cebeSEric Cheng 		    !(sqp->sq_state & SQS_WORKER)) {
1032da14cebeSEric Cheng 			/*
1033da14cebeSEric Cheng 			 * Nothing queued and worker thread not running.
1034da14cebeSEric Cheng 			 * Since we hold the proc, no other thread is
1035da14cebeSEric Cheng 			 * processing the squeue. This means that there
1036da14cebeSEric Cheng 			 * is no work to be done and nothing is queued
1037da14cebeSEric Cheng 			 * in squeue or in NIC. Turn polling off and go
1038da14cebeSEric Cheng 			 * back to interrupt mode.
1039da14cebeSEric Cheng 			 */
1040da14cebeSEric Cheng 			sqp->sq_state &= ~(SQS_PROC|SQS_GET_PKTS);
1041da14cebeSEric Cheng 			/* LINTED: constant in conditional context */
1042da14cebeSEric Cheng 			SQS_POLLING_OFF(sqp, B_TRUE, sq_rx_ring);
10434cc34124SThirumalai Srinivasan 
10444cc34124SThirumalai Srinivasan 			/*
10454cc34124SThirumalai Srinivasan 			 * If there is a pending control operation
10464cc34124SThirumalai Srinivasan 			 * wake up the worker, since it is currently
10474cc34124SThirumalai Srinivasan 			 * not running.
10484cc34124SThirumalai Srinivasan 			 */
1049233fee3fSPatrick Mooney 			if (sqp->sq_state & SQS_WORKER_THR_CONTROL) {
1050233fee3fSPatrick Mooney 				squeue_worker_wakeup(sqp);
1051233fee3fSPatrick Mooney 			}
1052da14cebeSEric Cheng 		} else {
1053da14cebeSEric Cheng 			/*
1054da14cebeSEric Cheng 			 * Worker thread is already running. We don't need
1055da14cebeSEric Cheng 			 * to do anything. Indicate that poll thread is done.
1056da14cebeSEric Cheng 			 */
1057da14cebeSEric Cheng 			sqp->sq_state &= ~SQS_GET_PKTS;
1058da14cebeSEric Cheng 		}
1059da14cebeSEric Cheng 		if (sqp->sq_state & SQS_POLL_THR_CONTROL) {
1060da14cebeSEric Cheng 			/*
1061da14cebeSEric Cheng 			 * Act on control requests to quiesce, cleanup or
1062da14cebeSEric Cheng 			 * restart an squeue
1063da14cebeSEric Cheng 			 */
1064da14cebeSEric Cheng 			squeue_poll_thr_control(sqp);
10657c478bd9Sstevel@tonic-gate 		}
10667c478bd9Sstevel@tonic-gate 	}
10677c478bd9Sstevel@tonic-gate }
10687c478bd9Sstevel@tonic-gate 
1069da14cebeSEric Cheng /*
1070da14cebeSEric Cheng  * The squeue worker thread acts on any control requests to quiesce, cleanup
1071da14cebeSEric Cheng  * or restart an ill_rx_ring_t by calling this function. The worker thread
1072da14cebeSEric Cheng  * synchronizes with the squeue poll thread to complete the request and finally
1073da14cebeSEric Cheng  * wakes up the requestor when the request is completed.
1074da14cebeSEric Cheng  */
1075da14cebeSEric Cheng static void
squeue_worker_thr_control(squeue_t * sqp)1076da14cebeSEric Cheng squeue_worker_thr_control(squeue_t *sqp)
10777c478bd9Sstevel@tonic-gate {
1078da14cebeSEric Cheng 	ill_t	*ill;
1079da14cebeSEric Cheng 	ill_rx_ring_t	*rx_ring;
10807c478bd9Sstevel@tonic-gate 
1081da14cebeSEric Cheng 	ASSERT(MUTEX_HELD(&sqp->sq_lock));
10827c478bd9Sstevel@tonic-gate 
1083da14cebeSEric Cheng 	if (sqp->sq_state & SQS_POLL_RESTART) {
1084da14cebeSEric Cheng 		/* Restart implies a previous quiesce. */
1085da14cebeSEric Cheng 		ASSERT((sqp->sq_state & (SQS_PROC_HELD |
1086da14cebeSEric Cheng 		    SQS_POLL_QUIESCE_DONE | SQS_PROC | SQS_WORKER)) ==
1087da14cebeSEric Cheng 		    (SQS_POLL_QUIESCE_DONE | SQS_PROC | SQS_WORKER));
1088da14cebeSEric Cheng 		/*
1089da14cebeSEric Cheng 		 * Request the squeue poll thread to restart and wait till
1090da14cebeSEric Cheng 		 * it actually restarts.
1091da14cebeSEric Cheng 		 */
1092da14cebeSEric Cheng 		sqp->sq_state &= ~SQS_POLL_QUIESCE_DONE;
1093da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_THR_RESTART;
1094da14cebeSEric Cheng 		cv_signal(&sqp->sq_poll_cv);
1095da14cebeSEric Cheng 		while (sqp->sq_state & SQS_POLL_THR_QUIESCED)
1096da14cebeSEric Cheng 			cv_wait(&sqp->sq_worker_cv, &sqp->sq_lock);
1097da14cebeSEric Cheng 		sqp->sq_state &= ~(SQS_POLL_RESTART | SQS_PROC |
1098da14cebeSEric Cheng 		    SQS_WORKER);
1099da14cebeSEric Cheng 		/*
1100da14cebeSEric Cheng 		 * Signal any waiter that is waiting for the restart
1101da14cebeSEric Cheng 		 * to complete
1102da14cebeSEric Cheng 		 */
1103da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_RESTART_DONE;
1104da14cebeSEric Cheng 		cv_signal(&sqp->sq_ctrlop_done_cv);
1105da14cebeSEric Cheng 		return;
1106da14cebeSEric Cheng 	}
11077c478bd9Sstevel@tonic-gate 
1108da14cebeSEric Cheng 	if (sqp->sq_state & SQS_PROC_HELD) {
1109da14cebeSEric Cheng 		/* The squeue poll thread handed control to us */
1110da14cebeSEric Cheng 		ASSERT(sqp->sq_state & SQS_PROC);
1111da14cebeSEric Cheng 	}
11127c478bd9Sstevel@tonic-gate 
1113da14cebeSEric Cheng 	/*
1114da14cebeSEric Cheng 	 * Prevent any other thread from processing the squeue
1115da14cebeSEric Cheng 	 * until we finish the control actions by setting SQS_PROC.
1116da14cebeSEric Cheng 	 * But allow ourself to reenter by setting SQS_WORKER
1117da14cebeSEric Cheng 	 */
1118da14cebeSEric Cheng 	sqp->sq_state |= (SQS_PROC | SQS_WORKER);
1119da14cebeSEric Cheng 
1120da14cebeSEric Cheng 	/* Signal the squeue poll thread and wait for it to quiesce itself */
1121da14cebeSEric Cheng 	if (!(sqp->sq_state & SQS_POLL_THR_QUIESCED)) {
1122da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_THR_QUIESCE;
1123da14cebeSEric Cheng 		cv_signal(&sqp->sq_poll_cv);
1124da14cebeSEric Cheng 		while (!(sqp->sq_state & SQS_POLL_THR_QUIESCED))
1125da14cebeSEric Cheng 			cv_wait(&sqp->sq_worker_cv, &sqp->sq_lock);
1126da14cebeSEric Cheng 	}
1127da14cebeSEric Cheng 
1128da14cebeSEric Cheng 	rx_ring = sqp->sq_rx_ring;
1129da14cebeSEric Cheng 	ill = rx_ring->rr_ill;
1130da14cebeSEric Cheng 	/*
1131da14cebeSEric Cheng 	 * The lock hierarchy is as follows.
1132da14cebeSEric Cheng 	 * cpu_lock -> ill_lock -> sqset_lock -> sq_lock
1133da14cebeSEric Cheng 	 */
11347c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
1135da14cebeSEric Cheng 	mutex_enter(&ill->ill_lock);
1136da14cebeSEric Cheng 	mutex_enter(&sqp->sq_lock);
11377c478bd9Sstevel@tonic-gate 
1138da14cebeSEric Cheng 	SQS_POLLING_OFF(sqp, (sqp->sq_state & SQS_POLL_CAPAB) != 0,
1139da14cebeSEric Cheng 	    sqp->sq_rx_ring);
1140da14cebeSEric Cheng 	sqp->sq_state &= ~(SQS_POLL_CAPAB | SQS_GET_PKTS | SQS_PROC_HELD);
1141da14cebeSEric Cheng 	if (sqp->sq_state & SQS_POLL_CLEANUP) {
1142da14cebeSEric Cheng 		/*
1143da14cebeSEric Cheng 		 * Disassociate this squeue from its ill_rx_ring_t.
1144da14cebeSEric Cheng 		 * The rr_sqp, sq_rx_ring fields are protected by the
1145da14cebeSEric Cheng 		 * corresponding squeue, ill_lock* and sq_lock. Holding any
1146da14cebeSEric Cheng 		 * of them will ensure that the ring to squeue mapping does
1147da14cebeSEric Cheng 		 * not change.
1148da14cebeSEric Cheng 		 */
1149da14cebeSEric Cheng 		ASSERT(!(sqp->sq_state & SQS_DEFAULT));
11507c478bd9Sstevel@tonic-gate 
1151da14cebeSEric Cheng 		sqp->sq_rx_ring = NULL;
1152da14cebeSEric Cheng 		rx_ring->rr_sqp = NULL;
1153da14cebeSEric Cheng 
1154da14cebeSEric Cheng 		sqp->sq_state &= ~(SQS_POLL_CLEANUP | SQS_POLL_THR_QUIESCED |
1155da14cebeSEric Cheng 		    SQS_POLL_QUIESCE_DONE);
1156da14cebeSEric Cheng 		sqp->sq_ill = NULL;
1157da14cebeSEric Cheng 
1158da14cebeSEric Cheng 		rx_ring->rr_rx_handle = NULL;
1159da14cebeSEric Cheng 		rx_ring->rr_intr_handle = NULL;
1160da14cebeSEric Cheng 		rx_ring->rr_intr_enable = NULL;
1161da14cebeSEric Cheng 		rx_ring->rr_intr_disable = NULL;
1162da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_CLEANUP_DONE;
1163da14cebeSEric Cheng 	} else {
1164da14cebeSEric Cheng 		sqp->sq_state &= ~SQS_POLL_QUIESCE;
1165da14cebeSEric Cheng 		sqp->sq_state |= SQS_POLL_QUIESCE_DONE;
1166da14cebeSEric Cheng 	}
1167da14cebeSEric Cheng 	/*
1168da14cebeSEric Cheng 	 * Signal any waiter that is waiting for the quiesce or cleanup
1169da14cebeSEric Cheng 	 * to complete and also wait for it to actually see and reset the
1170da14cebeSEric Cheng 	 * SQS_POLL_CLEANUP_DONE.
1171da14cebeSEric Cheng 	 */
1172da14cebeSEric Cheng 	cv_signal(&sqp->sq_ctrlop_done_cv);
1173da14cebeSEric Cheng 	mutex_exit(&ill->ill_lock);
1174da14cebeSEric Cheng 	if (sqp->sq_state & SQS_POLL_CLEANUP_DONE) {
1175da14cebeSEric Cheng 		cv_wait(&sqp->sq_worker_cv, &sqp->sq_lock);
1176da14cebeSEric Cheng 		sqp->sq_state &= ~(SQS_PROC | SQS_WORKER);
1177da14cebeSEric Cheng 	}
11787c478bd9Sstevel@tonic-gate }
11797c478bd9Sstevel@tonic-gate 
1180da14cebeSEric Cheng static void
squeue_worker(squeue_t * sqp)1181da14cebeSEric Cheng squeue_worker(squeue_t *sqp)
11827c478bd9Sstevel@tonic-gate {
1183da14cebeSEric Cheng 	kmutex_t *lock = &sqp->sq_lock;
1184da14cebeSEric Cheng 	kcondvar_t *async = &sqp->sq_worker_cv;
1185da14cebeSEric Cheng 	callb_cpr_t cprinfo;
1186da14cebeSEric Cheng 	hrtime_t now;
1187da14cebeSEric Cheng 
1188da14cebeSEric Cheng 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "sq_worker");
1189da14cebeSEric Cheng 	mutex_enter(lock);
1190da14cebeSEric Cheng 
1191da14cebeSEric Cheng 	for (;;) {
1192da14cebeSEric Cheng 		for (;;) {
1193da14cebeSEric Cheng 			/*
1194da14cebeSEric Cheng 			 * If the poll thread has handed control to us
1195da14cebeSEric Cheng 			 * we need to break out of the wait.
1196da14cebeSEric Cheng 			 */
1197da14cebeSEric Cheng 			if (sqp->sq_state & SQS_PROC_HELD)
1198da14cebeSEric Cheng 				break;
1199da14cebeSEric Cheng 
1200da14cebeSEric Cheng 			/*
1201da14cebeSEric Cheng 			 * If the squeue is not being processed and we either
1202da14cebeSEric Cheng 			 * have messages to drain or some thread has signaled
1203da14cebeSEric Cheng 			 * some control activity we need to break
1204da14cebeSEric Cheng 			 */
1205da14cebeSEric Cheng 			if (!(sqp->sq_state & SQS_PROC) &&
1206da14cebeSEric Cheng 			    ((sqp->sq_state & SQS_WORKER_THR_CONTROL) ||
1207da14cebeSEric Cheng 			    (sqp->sq_first != NULL)))
1208da14cebeSEric Cheng 				break;
1209da14cebeSEric Cheng 
1210da14cebeSEric Cheng 			/*
1211da14cebeSEric Cheng 			 * If we have started some control action, then check
1212da14cebeSEric Cheng 			 * for the SQS_WORKER flag (since we don't
1213da14cebeSEric Cheng 			 * release the squeue) to make sure we own the squeue
1214da14cebeSEric Cheng 			 * and break out
1215da14cebeSEric Cheng 			 */
1216da14cebeSEric Cheng 			if ((sqp->sq_state & SQS_WORKER_THR_CONTROL) &&
1217da14cebeSEric Cheng 			    (sqp->sq_state & SQS_WORKER))
1218da14cebeSEric Cheng 				break;
1219da14cebeSEric Cheng 
1220da14cebeSEric Cheng 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
1221da14cebeSEric Cheng 			cv_wait(async, lock);
1222da14cebeSEric Cheng 			CALLB_CPR_SAFE_END(&cprinfo, lock);
1223da14cebeSEric Cheng 		}
1224da14cebeSEric Cheng 		if (sqp->sq_state & SQS_WORKER_THR_CONTROL) {
1225da14cebeSEric Cheng 			squeue_worker_thr_control(sqp);
1226da14cebeSEric Cheng 			continue;
1227da14cebeSEric Cheng 		}
1228da14cebeSEric Cheng 		ASSERT(!(sqp->sq_state & (SQS_POLL_THR_QUIESCED |
1229da14cebeSEric Cheng 		    SQS_POLL_CLEANUP_DONE | SQS_POLL_QUIESCE_DONE |
1230da14cebeSEric Cheng 		    SQS_WORKER_THR_CONTROL | SQS_POLL_THR_CONTROL)));
1231da14cebeSEric Cheng 
1232da14cebeSEric Cheng 		if (sqp->sq_state & SQS_PROC_HELD)
1233da14cebeSEric Cheng 			sqp->sq_state &= ~SQS_PROC_HELD;
1234da14cebeSEric Cheng 
1235da14cebeSEric Cheng 		now = gethrtime();
1236da14cebeSEric Cheng 		sqp->sq_run = curthread;
1237da14cebeSEric Cheng 		sqp->sq_drain(sqp, SQS_WORKER, now +  squeue_drain_ns);
1238da14cebeSEric Cheng 		sqp->sq_run = NULL;
1239da14cebeSEric Cheng 	}
12407c478bd9Sstevel@tonic-gate }
12417c478bd9Sstevel@tonic-gate 
12427c478bd9Sstevel@tonic-gate uintptr_t *
squeue_getprivate(squeue_t * sqp,sqprivate_t p)12437c478bd9Sstevel@tonic-gate squeue_getprivate(squeue_t *sqp, sqprivate_t p)
12447c478bd9Sstevel@tonic-gate {
12457c478bd9Sstevel@tonic-gate 	ASSERT(p < SQPRIVATE_MAX);
12467c478bd9Sstevel@tonic-gate 
12477c478bd9Sstevel@tonic-gate 	return (&sqp->sq_private[p]);
12487c478bd9Sstevel@tonic-gate }
12490f1702c5SYu Xiangning 
12500f1702c5SYu Xiangning /* ARGSUSED */
12510f1702c5SYu Xiangning void
squeue_wakeup_conn(void * arg,mblk_t * mp,void * arg2,ip_recv_attr_t * dummy)1252bd670b35SErik Nordmark squeue_wakeup_conn(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
12530f1702c5SYu Xiangning {
12540f1702c5SYu Xiangning 	conn_t *connp = (conn_t *)arg;
12550f1702c5SYu Xiangning 	squeue_t *sqp = connp->conn_sqp;
12560f1702c5SYu Xiangning 
12570f1702c5SYu Xiangning 	/*
12580f1702c5SYu Xiangning 	 * Mark the squeue as paused before waking up the thread stuck
12590f1702c5SYu Xiangning 	 * in squeue_synch_enter().
12600f1702c5SYu Xiangning 	 */
12610f1702c5SYu Xiangning 	mutex_enter(&sqp->sq_lock);
12620f1702c5SYu Xiangning 	sqp->sq_state |= SQS_PAUSE;
12630f1702c5SYu Xiangning 
12640f1702c5SYu Xiangning 	/*
12650f1702c5SYu Xiangning 	 * Notify the thread that it's OK to proceed; that is done by
12660f1702c5SYu Xiangning 	 * clearing the MSGWAITSYNC flag. The synch thread will free the mblk.
12670f1702c5SYu Xiangning 	 */
12680f1702c5SYu Xiangning 	ASSERT(mp->b_flag & MSGWAITSYNC);
12690f1702c5SYu Xiangning 	mp->b_flag &= ~MSGWAITSYNC;
12700f1702c5SYu Xiangning 	cv_broadcast(&connp->conn_sq_cv);
12710f1702c5SYu Xiangning 
12720f1702c5SYu Xiangning 	/*
12730f1702c5SYu Xiangning 	 * We are doing something on behalf of another thread, so we have to
12740f1702c5SYu Xiangning 	 * pause and wait until it finishes.
12750f1702c5SYu Xiangning 	 */
12760f1702c5SYu Xiangning 	while (sqp->sq_state & SQS_PAUSE) {
12770f1702c5SYu Xiangning 		cv_wait(&sqp->sq_synch_cv, &sqp->sq_lock);
12780f1702c5SYu Xiangning 	}
12790f1702c5SYu Xiangning 	mutex_exit(&sqp->sq_lock);
12800f1702c5SYu Xiangning }
12810f1702c5SYu Xiangning 
12820f1702c5SYu Xiangning int
squeue_synch_enter(conn_t * connp,mblk_t * use_mp)12839ee3959aSAnders Persson squeue_synch_enter(conn_t *connp, mblk_t *use_mp)
12840f1702c5SYu Xiangning {
12859ee3959aSAnders Persson 	squeue_t *sqp;
12869ee3959aSAnders Persson 
12879ee3959aSAnders Persson again:
12889ee3959aSAnders Persson 	sqp = connp->conn_sqp;
12899ee3959aSAnders Persson 
12900f1702c5SYu Xiangning 	mutex_enter(&sqp->sq_lock);
12910f1702c5SYu Xiangning 	if (sqp->sq_first == NULL && !(sqp->sq_state & SQS_PROC)) {
12920f1702c5SYu Xiangning 		/*
12930f1702c5SYu Xiangning 		 * We are OK to proceed if the squeue is empty, and
12940f1702c5SYu Xiangning 		 * no one owns the squeue.
12950f1702c5SYu Xiangning 		 *
12960f1702c5SYu Xiangning 		 * The caller won't own the squeue as this is called from the
12970f1702c5SYu Xiangning 		 * application.
12980f1702c5SYu Xiangning 		 */
12990f1702c5SYu Xiangning 		ASSERT(sqp->sq_run == NULL);
13000f1702c5SYu Xiangning 
13010f1702c5SYu Xiangning 		sqp->sq_state |= SQS_PROC;
13020f1702c5SYu Xiangning 		sqp->sq_run = curthread;
13030f1702c5SYu Xiangning 		mutex_exit(&sqp->sq_lock);
13040f1702c5SYu Xiangning 
13059ee3959aSAnders Persson 		/*
13069ee3959aSAnders Persson 		 * Handle squeue switching. The conn's squeue can only change
13079ee3959aSAnders Persson 		 * while there is a thread in the squeue, which is why we do
13089ee3959aSAnders Persson 		 * the check after entering the squeue. If it has changed, exit
13099ee3959aSAnders Persson 		 * this squeue and redo everything with the new sqeueue.
13109ee3959aSAnders Persson 		 */
13119ee3959aSAnders Persson 		if (sqp != connp->conn_sqp) {
13129ee3959aSAnders Persson 			mutex_enter(&sqp->sq_lock);
13139ee3959aSAnders Persson 			sqp->sq_state &= ~SQS_PROC;
13149ee3959aSAnders Persson 			sqp->sq_run = NULL;
13159ee3959aSAnders Persson 			mutex_exit(&sqp->sq_lock);
13169ee3959aSAnders Persson 			goto again;
13179ee3959aSAnders Persson 		}
13180f1702c5SYu Xiangning #if SQUEUE_DEBUG
13190f1702c5SYu Xiangning 		sqp->sq_curmp = NULL;
13200f1702c5SYu Xiangning 		sqp->sq_curproc = NULL;
13210f1702c5SYu Xiangning 		sqp->sq_connp = connp;
13220f1702c5SYu Xiangning #endif
13230f1702c5SYu Xiangning 		connp->conn_on_sqp = B_TRUE;
13240f1702c5SYu Xiangning 		return (0);
13250f1702c5SYu Xiangning 	} else {
13260f1702c5SYu Xiangning 		mblk_t  *mp;
13270f1702c5SYu Xiangning 
1328f3124163SAnders Persson 		mp = (use_mp == NULL) ? allocb(0, BPRI_MED) : use_mp;
13290f1702c5SYu Xiangning 		if (mp == NULL) {
13300f1702c5SYu Xiangning 			mutex_exit(&sqp->sq_lock);
13310f1702c5SYu Xiangning 			return (ENOMEM);
13320f1702c5SYu Xiangning 		}
13330f1702c5SYu Xiangning 
13340f1702c5SYu Xiangning 		/*
13350f1702c5SYu Xiangning 		 * We mark the mblk as awaiting synchronous squeue access
13360f1702c5SYu Xiangning 		 * by setting the MSGWAITSYNC flag. Once squeue_wakeup_conn
13370f1702c5SYu Xiangning 		 * fires, MSGWAITSYNC is cleared, at which point we know we
13380f1702c5SYu Xiangning 		 * have exclusive access.
13390f1702c5SYu Xiangning 		 */
13400f1702c5SYu Xiangning 		mp->b_flag |= MSGWAITSYNC;
13410f1702c5SYu Xiangning 
13420f1702c5SYu Xiangning 		CONN_INC_REF(connp);
13430f1702c5SYu Xiangning 		SET_SQUEUE(mp, squeue_wakeup_conn, connp);
13440f1702c5SYu Xiangning 		ENQUEUE_CHAIN(sqp, mp, mp, 1);
13450f1702c5SYu Xiangning 
13460f1702c5SYu Xiangning 		ASSERT(sqp->sq_run != curthread);
13470f1702c5SYu Xiangning 
13480f1702c5SYu Xiangning 		/* Wait until the enqueued mblk get processed. */
13490f1702c5SYu Xiangning 		while (mp->b_flag & MSGWAITSYNC)
13500f1702c5SYu Xiangning 			cv_wait(&connp->conn_sq_cv, &sqp->sq_lock);
13510f1702c5SYu Xiangning 		mutex_exit(&sqp->sq_lock);
13520f1702c5SYu Xiangning 
1353f3124163SAnders Persson 		if (use_mp == NULL)
1354f3124163SAnders Persson 			freeb(mp);
13550f1702c5SYu Xiangning 
13560f1702c5SYu Xiangning 		return (0);
13570f1702c5SYu Xiangning 	}
13580f1702c5SYu Xiangning }
13590f1702c5SYu Xiangning 
1360196b393bSPatrick Mooney /*
1361196b393bSPatrick Mooney  * If possible, attempt to immediately process a single queued request, should
1362196b393bSPatrick Mooney  * it match the supplied conn_t reference.  This is primarily intended to elide
1363196b393bSPatrick Mooney  * squeue worker thread wake-ups during local TCP connect() or close()
1364196b393bSPatrick Mooney  * operations where the response is placed on the squeue during processing.
1365196b393bSPatrick Mooney  */
1366196b393bSPatrick Mooney static void
squeue_try_drain_one(squeue_t * sqp,conn_t * compare_conn)1367196b393bSPatrick Mooney squeue_try_drain_one(squeue_t *sqp, conn_t *compare_conn)
13680f1702c5SYu Xiangning {
1369196b393bSPatrick Mooney 	mblk_t *next, *mp = sqp->sq_first;
1370196b393bSPatrick Mooney 	conn_t *connp;
1371196b393bSPatrick Mooney 	sqproc_t proc = (sqproc_t)mp->b_queue;
1372196b393bSPatrick Mooney 	ip_recv_attr_t iras, *ira = NULL;
13739ee3959aSAnders Persson 
1374196b393bSPatrick Mooney 	ASSERT(MUTEX_HELD(&sqp->sq_lock));
1375196b393bSPatrick Mooney 	ASSERT((sqp->sq_state & SQS_PROC) == 0);
1376196b393bSPatrick Mooney 	ASSERT(sqp->sq_run == NULL);
1377196b393bSPatrick Mooney 	VERIFY(mp != NULL);
13780f1702c5SYu Xiangning 
1379196b393bSPatrick Mooney 	/*
1380196b393bSPatrick Mooney 	 * There is no guarantee that compare_conn references a valid object at
1381196b393bSPatrick Mooney 	 * this time, so under no circumstance may it be deferenced unless it
1382196b393bSPatrick Mooney 	 * matches the squeue entry.
1383196b393bSPatrick Mooney 	 */
1384196b393bSPatrick Mooney 	connp = (conn_t *)mp->b_prev;
1385196b393bSPatrick Mooney 	if (connp != compare_conn) {
1386196b393bSPatrick Mooney 		return;
1387196b393bSPatrick Mooney 	}
13880f1702c5SYu Xiangning 
1389196b393bSPatrick Mooney 	next = mp->b_next;
1390196b393bSPatrick Mooney 	proc = (sqproc_t)mp->b_queue;
1391196b393bSPatrick Mooney 
1392196b393bSPatrick Mooney 	ASSERT(proc != NULL);
1393196b393bSPatrick Mooney 	ASSERT(sqp->sq_count > 0);
1394196b393bSPatrick Mooney 
1395196b393bSPatrick Mooney 	/* Dequeue item from squeue */
1396196b393bSPatrick Mooney 	if (next == NULL) {
1397196b393bSPatrick Mooney 		sqp->sq_first = NULL;
1398196b393bSPatrick Mooney 		sqp->sq_last = NULL;
13990f1702c5SYu Xiangning 	} else {
1400196b393bSPatrick Mooney 		sqp->sq_first = next;
1401196b393bSPatrick Mooney 	}
1402196b393bSPatrick Mooney 	sqp->sq_count--;
1403196b393bSPatrick Mooney 
1404196b393bSPatrick Mooney 	sqp->sq_state |= SQS_PROC;
1405196b393bSPatrick Mooney 	sqp->sq_run = curthread;
1406196b393bSPatrick Mooney 	mutex_exit(&sqp->sq_lock);
1407196b393bSPatrick Mooney 
1408196b393bSPatrick Mooney 	/* Prep mblk_t and retrieve ira if needed */
1409196b393bSPatrick Mooney 	mp->b_prev = NULL;
1410196b393bSPatrick Mooney 	mp->b_queue = NULL;
1411196b393bSPatrick Mooney 	mp->b_next = NULL;
1412196b393bSPatrick Mooney 	if (ip_recv_attr_is_mblk(mp)) {
1413196b393bSPatrick Mooney 		mblk_t	*attrmp = mp;
1414196b393bSPatrick Mooney 
1415196b393bSPatrick Mooney 		ASSERT(attrmp->b_cont != NULL);
1416196b393bSPatrick Mooney 
1417196b393bSPatrick Mooney 		mp = attrmp->b_cont;
1418196b393bSPatrick Mooney 		attrmp->b_cont = NULL;
1419196b393bSPatrick Mooney 
1420196b393bSPatrick Mooney 		ASSERT(mp->b_queue == NULL);
1421196b393bSPatrick Mooney 		ASSERT(mp->b_prev == NULL);
1422196b393bSPatrick Mooney 
1423196b393bSPatrick Mooney 		if (!ip_recv_attr_from_mblk(attrmp, &iras)) {
1424196b393bSPatrick Mooney 			/* ill_t or ip_stack_t disappeared */
1425196b393bSPatrick Mooney 			ip_drop_input("ip_recv_attr_from_mblk", mp, NULL);
1426196b393bSPatrick Mooney 			ira_cleanup(&iras, B_TRUE);
1427196b393bSPatrick Mooney 			CONN_DEC_REF(connp);
1428196b393bSPatrick Mooney 			goto done;
1429196b393bSPatrick Mooney 		}
1430196b393bSPatrick Mooney 		ira = &iras;
1431196b393bSPatrick Mooney 	}
1432196b393bSPatrick Mooney 
1433196b393bSPatrick Mooney 	SQUEUE_DBG_SET(sqp, mp, proc, connp, mp->b_tag);
1434196b393bSPatrick Mooney 	connp->conn_on_sqp = B_TRUE;
1435196b393bSPatrick Mooney 	DTRACE_PROBE3(squeue__proc__start, squeue_t *, sqp, mblk_t *, mp,
1436196b393bSPatrick Mooney 	    conn_t *, connp);
1437196b393bSPatrick Mooney 	(*proc)(connp, mp, sqp, ira);
1438196b393bSPatrick Mooney 	DTRACE_PROBE2(squeue__proc__end, squeue_t *, sqp, conn_t *, connp);
1439196b393bSPatrick Mooney 	connp->conn_on_sqp = B_FALSE;
1440196b393bSPatrick Mooney 	CONN_DEC_REF(connp);
1441196b393bSPatrick Mooney 	SQUEUE_DBG_CLEAR(sqp);
1442196b393bSPatrick Mooney 
1443*3a7e2f8dSAndy Fiddaman 	if (ira != NULL)
1444*3a7e2f8dSAndy Fiddaman 		ira_cleanup(ira, B_TRUE);
1445*3a7e2f8dSAndy Fiddaman 
1446196b393bSPatrick Mooney done:
1447196b393bSPatrick Mooney 	mutex_enter(&sqp->sq_lock);
1448196b393bSPatrick Mooney 	sqp->sq_state &= ~(SQS_PROC);
1449196b393bSPatrick Mooney 	sqp->sq_run = NULL;
1450196b393bSPatrick Mooney }
1451196b393bSPatrick Mooney 
1452196b393bSPatrick Mooney void
squeue_synch_exit(conn_t * connp,int flag)1453196b393bSPatrick Mooney squeue_synch_exit(conn_t *connp, int flag)
1454196b393bSPatrick Mooney {
1455196b393bSPatrick Mooney 	squeue_t *sqp = connp->conn_sqp;
1456196b393bSPatrick Mooney 
1457196b393bSPatrick Mooney 	ASSERT(flag == SQ_NODRAIN || flag == SQ_PROCESS);
1458196b393bSPatrick Mooney 
1459196b393bSPatrick Mooney 	mutex_enter(&sqp->sq_lock);
1460196b393bSPatrick Mooney 	if (sqp->sq_run != curthread) {
14610f1702c5SYu Xiangning 		/*
14620f1702c5SYu Xiangning 		 * The caller doesn't own the squeue, clear the SQS_PAUSE flag,
14630f1702c5SYu Xiangning 		 * and wake up the squeue owner, such that owner can continue
14640f1702c5SYu Xiangning 		 * processing.
14650f1702c5SYu Xiangning 		 */
14660f1702c5SYu Xiangning 		ASSERT(sqp->sq_state & SQS_PAUSE);
14670f1702c5SYu Xiangning 		sqp->sq_state &= ~SQS_PAUSE;
14680f1702c5SYu Xiangning 
14690f1702c5SYu Xiangning 		/* There should be only one thread blocking on sq_synch_cv. */
14700f1702c5SYu Xiangning 		cv_signal(&sqp->sq_synch_cv);
1471196b393bSPatrick Mooney 		mutex_exit(&sqp->sq_lock);
1472196b393bSPatrick Mooney 		return;
1473196b393bSPatrick Mooney 	}
1474196b393bSPatrick Mooney 
1475196b393bSPatrick Mooney 	ASSERT(sqp->sq_state & SQS_PROC);
1476196b393bSPatrick Mooney 
1477196b393bSPatrick Mooney 	sqp->sq_state &= ~SQS_PROC;
1478196b393bSPatrick Mooney 	sqp->sq_run = NULL;
1479196b393bSPatrick Mooney 	connp->conn_on_sqp = B_FALSE;
1480196b393bSPatrick Mooney 
1481196b393bSPatrick Mooney 	/* If the caller opted in, attempt to process the head squeue item. */
1482196b393bSPatrick Mooney 	if (flag == SQ_PROCESS && sqp->sq_first != NULL) {
1483196b393bSPatrick Mooney 		squeue_try_drain_one(sqp, connp);
1484196b393bSPatrick Mooney 	}
1485196b393bSPatrick Mooney 
1486196b393bSPatrick Mooney 	/* Wake up the worker if further requests are pending. */
1487196b393bSPatrick Mooney 	if (sqp->sq_first != NULL) {
1488196b393bSPatrick Mooney 		squeue_worker_wakeup(sqp);
14890f1702c5SYu Xiangning 	}
1490233fee3fSPatrick Mooney 	mutex_exit(&sqp->sq_lock);
14910f1702c5SYu Xiangning }
1492