xref: /illumos-gate/usr/src/uts/common/os/putnext.c (revision 5d3b8cb7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
27 /*	  All Rights Reserved  	*/
28 
29 
30 /*
31  *		UNIX Device Driver Interface functions
32  *	This file contains the C-versions of putnext() and put().
33  *	Assembly language versions exist for some architectures.
34  */
35 
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/cpuvar.h>
39 #include <sys/debug.h>
40 #include <sys/t_lock.h>
41 #include <sys/stream.h>
42 #include <sys/thread.h>
43 #include <sys/strsubr.h>
44 #include <sys/ddi.h>
45 #include <sys/vtrace.h>
46 #include <sys/cmn_err.h>
47 #include <sys/strft.h>
48 #include <sys/stack.h>
49 #include <sys/archsystm.h>
50 
51 /*
52  * Streams with many modules may create long chains of calls via putnext() which
53  * may exhaust stack space. When putnext detects that the stack space left is
54  * too small (less then PUT_STACK_NEEDED), the call chain is broken and
55  * further processing is delegated to the background thread via call to
56  * putnext_tail(). Unfortunately there is no generic solution with fixed stack
57  * size, and putnext() is recursive function, so this hack is a necessary evil.
58  *
59  * The redzone value is chosen dependent on the default stack size which is 8K
60  * on 32-bit kernels and on x86 and 16K on 64-bit kernels. The values are chosen
61  * empirically. For 64-bit kernels it is 5000 and for 32-bit kernels it is 3000.
62  * Experiments showed that 2500 is not enough for either 32-bit or 64-bit
63  * kernels.
64  *
65  * The redzone value is a tuneable rather then a constant to allow adjustments
66  * in the field.
67  *
68  * The check in PUT_STACK_NOTENOUGH is taken from segkp_map_red() function. It
69  * is possible to define it as a generic function exported by seg_kp, but
70  *
71  * a) It may sound like an open invitation to use the facility indiscriminately.
72  * b) It adds extra function call in putnext path.
73  *
74  * We keep a global counter `put_stack_notenough' which keeps track how many
75  * times the stack switching hack was used.
76  */
77 
78 static ulong_t put_stack_notenough;
79 
80 #ifdef	_LP64
81 #define	PUT_STACK_NEEDED 5000
82 #else
83 #define	PUT_STACK_NEEDED 3000
84 #endif
85 
86 int put_stack_needed = PUT_STACK_NEEDED;
87 
88 #if defined(STACK_GROWTH_DOWN)
89 #define	PUT_STACK_NOTENOUGH() 					\
90 	(((STACK_BIAS + (uintptr_t)getfp() -			\
91 	    (uintptr_t)curthread->t_stkbase) < put_stack_needed) && \
92 	++put_stack_notenough)
93 #else
94 #error	"STACK_GROWTH_DOWN undefined"
95 #endif
96 
97 boolean_t	UseFastlocks = B_FALSE;
98 
99 /*
100  * function: putnext()
101  * purpose:  call the put routine of the queue linked to qp
102  *
103  * Note: this function is written to perform well on modern computer
104  * architectures by e.g. preloading values into registers and "smearing" out
105  * code.
106  *
107  * A note on the fastput mechanism.  The most significant bit of a
108  * putcount is considered the "FASTPUT" bit.  If set, then there is
109  * nothing stoping a concurrent put from occuring (note that putcounts
110  * are only allowed on CIPUT perimiters).  If, however, it is cleared,
111  * then we need to take the normal lock path by aquiring the SQLOCK.
112  * This is a slowlock.  When a thread starts exclusiveness, e.g. wants
113  * writer access, it will clear the FASTPUT bit, causing new threads
114  * to take the slowlock path.  This assures that putcounts will not
115  * increase in value, so the want-writer does not need to constantly
116  * aquire the putlocks to sum the putcounts.  This does have the
117  * possibility of having the count drop right after reading, but that
118  * is no different than aquiring, reading and then releasing.  However,
119  * in this mode, it cannot go up, so eventually they will drop to zero
120  * and the want-writer can proceed.
121  *
122  * If the FASTPUT bit is set, or in the slowlock path we see that there
123  * are no writers or want-writers, we make the choice of calling the
124  * putproc, or a "fast-fill_syncq".  The fast-fill is a fill with
125  * immediate intention to drain.  This is done because there are
126  * messages already at the queue waiting to drain.  To preserve message
127  * ordering, we need to put this message at the end, and pickup the
128  * messages at the beginning.  We call the macro that actually
129  * enqueues the message on the queue, and then call qdrain_syncq.  If
130  * there is already a drainer, we just return.  We could make that
131  * check before calling qdrain_syncq, but it is a little more clear
132  * to have qdrain_syncq do this (we might try the above optimization
133  * as this behavior evolves).  qdrain_syncq assumes that SQ_EXCL is set
134  * already if this is a non-CIPUT perimiter, and that an appropriate
135  * claim has been made.  So we do all that work before dropping the
136  * SQLOCK with our claim.
137  *
138  * If we cannot proceed with the putproc/fast-fill, we just fall
139  * through to the qfill_syncq, and then tail processing.  If state
140  * has changed in that cycle, or wakeups are needed, it will occur
141  * there.
142  */
143 void
putnext(queue_t * qp,mblk_t * mp)144 putnext(queue_t *qp, mblk_t *mp)
145 {
146 	queue_t		*fqp = qp; /* For strft tracing */
147 	syncq_t		*sq;
148 	uint16_t	flags;
149 	uint16_t	drain_mask;
150 	struct qinit	*qi;
151 	int		(*putproc)();
152 	struct stdata	*stp;
153 	int		ix;
154 	boolean_t	queued = B_FALSE;
155 	kmutex_t	*sdlock = NULL;
156 	kmutex_t	*sqciplock = NULL;
157 	ushort_t	*sqcipcount = NULL;
158 
159 	TRACE_2(TR_FAC_STREAMS_FR, TR_PUTNEXT_START,
160 	    "putnext_start:(%p, %p)", qp, mp);
161 
162 	ASSERT(mp->b_datap->db_ref != 0);
163 	ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
164 	stp = STREAM(qp);
165 	ASSERT(stp != NULL);
166 	if (stp->sd_ciputctrl != NULL) {
167 		ix = CPU->cpu_seqid & stp->sd_nciputctrl;
168 		sdlock = &stp->sd_ciputctrl[ix].ciputctrl_lock;
169 		mutex_enter(sdlock);
170 	} else {
171 		mutex_enter(sdlock = &stp->sd_lock);
172 	}
173 	qp = qp->q_next;
174 	sq = qp->q_syncq;
175 	ASSERT(sq != NULL);
176 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
177 	qi = qp->q_qinfo;
178 
179 	if (sq->sq_ciputctrl != NULL) {
180 		/* fastlock: */
181 		ASSERT(sq->sq_flags & SQ_CIPUT);
182 		ix = CPU->cpu_seqid & sq->sq_nciputctrl;
183 		sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
184 		sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
185 		mutex_enter(sqciplock);
186 		if (!((*sqcipcount) & SQ_FASTPUT) ||
187 		    (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
188 			mutex_exit(sqciplock);
189 			sqciplock = NULL;
190 			goto slowlock;
191 		}
192 		mutex_exit(sdlock);
193 		(*sqcipcount)++;
194 		ASSERT(*sqcipcount != 0);
195 		queued = qp->q_sqflags & Q_SQQUEUED;
196 		mutex_exit(sqciplock);
197 	} else {
198 	slowlock:
199 		ASSERT(sqciplock == NULL);
200 		mutex_enter(SQLOCK(sq));
201 		mutex_exit(sdlock);
202 		flags = sq->sq_flags;
203 		/*
204 		 * We are going to drop SQLOCK, so make a claim to prevent syncq
205 		 * from closing.
206 		 */
207 		sq->sq_count++;
208 		ASSERT(sq->sq_count != 0);		/* Wraparound */
209 		/*
210 		 * If there are writers or exclusive waiters, there is not much
211 		 * we can do.  Place the message on the syncq and schedule a
212 		 * background thread to drain it.
213 		 *
214 		 * Also if we are approaching end of stack, fill the syncq and
215 		 * switch processing to a background thread - see comments on
216 		 * top.
217 		 */
218 		if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
219 		    (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {
220 
221 			TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
222 			    "putnext_end:(%p, %p, %p) SQ_EXCL fill",
223 			    qp, mp, sq);
224 
225 			/*
226 			 * NOTE: qfill_syncq will need QLOCK. It is safe to drop
227 			 * SQLOCK because positive sq_count keeps the syncq from
228 			 * closing.
229 			 */
230 			mutex_exit(SQLOCK(sq));
231 
232 			qfill_syncq(sq, qp, mp);
233 			/*
234 			 * NOTE: after the call to qfill_syncq() qp may be
235 			 * closed, both qp and sq should not be referenced at
236 			 * this point.
237 			 *
238 			 * This ASSERT is located here to prevent stack frame
239 			 * consumption in the DEBUG code.
240 			 */
241 			ASSERT(sqciplock == NULL);
242 			return;
243 		}
244 
245 		queued = qp->q_sqflags & Q_SQQUEUED;
246 		/*
247 		 * If not a concurrent perimiter, we need to acquire
248 		 * it exclusively.  It could not have been previously
249 		 * set since we held the SQLOCK before testing
250 		 * SQ_GOAWAY above (which includes SQ_EXCL).
251 		 * We do this here because we hold the SQLOCK, and need
252 		 * to make this state change BEFORE dropping it.
253 		 */
254 		if (!(flags & SQ_CIPUT)) {
255 			ASSERT((sq->sq_flags & SQ_EXCL) == 0);
256 			ASSERT(!(sq->sq_type & SQ_CIPUT));
257 			sq->sq_flags |= SQ_EXCL;
258 		}
259 		mutex_exit(SQLOCK(sq));
260 	}
261 
262 	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
263 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
264 
265 	/*
266 	 * We now have a claim on the syncq, we are either going to
267 	 * put the message on the syncq and then drain it, or we are
268 	 * going to call the putproc().
269 	 */
270 	putproc = qi->qi_putp;
271 	if (!queued) {
272 		STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
273 		    mp->b_datap->db_base);
274 		(*putproc)(qp, mp);
275 		ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
276 		ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
277 	} else {
278 		mutex_enter(QLOCK(qp));
279 		/*
280 		 * If there are no messages in front of us, just call putproc(),
281 		 * otherwise enqueue the message and drain the queue.
282 		 */
283 		if (qp->q_syncqmsgs == 0) {
284 			mutex_exit(QLOCK(qp));
285 			STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
286 			    mp->b_datap->db_base);
287 			(*putproc)(qp, mp);
288 			ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
289 		} else {
290 			/*
291 			 * We are doing a fill with the intent to
292 			 * drain (meaning we are filling because
293 			 * there are messages in front of us ane we
294 			 * need to preserve message ordering)
295 			 * Therefore, put the message on the queue
296 			 * and call qdrain_syncq (must be done with
297 			 * the QLOCK held).
298 			 */
299 			STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
300 			    mp->b_rptr - mp->b_datap->db_base);
301 
302 #ifdef DEBUG
303 			/*
304 			 * These two values were in the original code for
305 			 * all syncq messages.  This is unnecessary in
306 			 * the current implementation, but was retained
307 			 * in debug mode as it is usefull to know where
308 			 * problems occur.
309 			 */
310 			mp->b_queue = qp;
311 			mp->b_prev = (mblk_t *)putproc;
312 #endif
313 			SQPUT_MP(qp, mp);
314 			qdrain_syncq(sq, qp);
315 			ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
316 		}
317 	}
318 	/*
319 	 * Before we release our claim, we need to see if any
320 	 * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
321 	 * we were responsible for going exclusive and, therefore,
322 	 * are resposible for draining.
323 	 */
324 	if (sq->sq_flags & (SQ_EXCL)) {
325 		drain_mask = 0;
326 	} else {
327 		drain_mask = SQ_QUEUED;
328 	}
329 
330 	if (sqciplock != NULL) {
331 		mutex_enter(sqciplock);
332 		flags = sq->sq_flags;
333 		ASSERT(flags & SQ_CIPUT);
334 		/* SQ_EXCL could have been set by qwriter_inner */
335 		if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
336 			/*
337 			 * we need SQLOCK to handle
338 			 * wakeups/drains/flags change.  sqciplock
339 			 * is needed to decrement sqcipcount.
340 			 * SQLOCK has to be grabbed before sqciplock
341 			 * for lock ordering purposes.
342 			 * after sqcipcount is decremented some lock
343 			 * still needs to be held to make sure
344 			 * syncq won't get freed on us.
345 			 *
346 			 * To prevent deadlocks we try to grab SQLOCK and if it
347 			 * is held already we drop sqciplock, acquire SQLOCK and
348 			 * reacqwire sqciplock again.
349 			 */
350 			if (mutex_tryenter(SQLOCK(sq)) == 0) {
351 				mutex_exit(sqciplock);
352 				mutex_enter(SQLOCK(sq));
353 				mutex_enter(sqciplock);
354 			}
355 			flags = sq->sq_flags;
356 			ASSERT(*sqcipcount != 0);
357 			(*sqcipcount)--;
358 			mutex_exit(sqciplock);
359 		} else {
360 			ASSERT(*sqcipcount != 0);
361 			(*sqcipcount)--;
362 			mutex_exit(sqciplock);
363 			TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
364 			"putnext_end:(%p, %p, %p) done", qp, mp, sq);
365 			return;
366 		}
367 	} else {
368 		mutex_enter(SQLOCK(sq));
369 		flags = sq->sq_flags;
370 		ASSERT(sq->sq_count != 0);
371 		sq->sq_count--;
372 	}
373 	if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
374 		putnext_tail(sq, qp, (flags & ~drain_mask));
375 		/*
376 		 * The only purpose of this ASSERT is to preserve calling stack
377 		 * in DEBUG kernel.
378 		 */
379 		ASSERT(sq != NULL);
380 		return;
381 	}
382 	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
383 	ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
384 	/*
385 	 * Safe to always drop SQ_EXCL:
386 	 *	Not SQ_CIPUT means we set SQ_EXCL above
387 	 *	For SQ_CIPUT SQ_EXCL will only be set if the put
388 	 *	procedure did a qwriter(INNER) in which case
389 	 *	nobody else is in the inner perimeter and we
390 	 *	are exiting.
391 	 *
392 	 * I would like to make the following assertion:
393 	 *
394 	 * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
395 	 * 	sq->sq_count == 0);
396 	 *
397 	 * which indicates that if we are both putshared and exclusive,
398 	 * we became exclusive while executing the putproc, and the only
399 	 * claim on the syncq was the one we dropped a few lines above.
400 	 * But other threads that enter putnext while the syncq is exclusive
401 	 * need to make a claim as they may need to drop SQLOCK in the
402 	 * has_writers case to avoid deadlocks.  If these threads are
403 	 * delayed or preempted, it is possible that the writer thread can
404 	 * find out that there are other claims making the (sq_count == 0)
405 	 * test invalid.
406 	 */
407 
408 	sq->sq_flags = flags & ~SQ_EXCL;
409 	mutex_exit(SQLOCK(sq));
410 	TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
411 	    "putnext_end:(%p, %p, %p) done", qp, mp, sq);
412 }
413 
414 
415 /*
416  * wrapper for qi_putp entry in module ops vec.
417  * implements asynchronous putnext().
418  * Note, that unlike putnext(), this routine is NOT optimized for the
419  * fastpath.  Calling this routine will grab whatever locks are necessary
420  * to protect the stream head, q_next, and syncq's.
421  * And since it is in the normal locks path, we do not use putlocks if
422  * they exist (though this can be changed by swapping the value of
423  * UseFastlocks).
424  */
425 void
put(queue_t * qp,mblk_t * mp)426 put(queue_t *qp, mblk_t *mp)
427 {
428 	queue_t		*fqp = qp; /* For strft tracing */
429 	syncq_t		*sq;
430 	uint16_t	flags;
431 	uint16_t	drain_mask;
432 	struct qinit	*qi;
433 	int		(*putproc)();
434 	int		ix;
435 	boolean_t	queued = B_FALSE;
436 	kmutex_t	*sqciplock = NULL;
437 	ushort_t	*sqcipcount = NULL;
438 
439 	TRACE_2(TR_FAC_STREAMS_FR, TR_PUT_START,
440 	    "put:(%X, %X)", qp, mp);
441 	ASSERT(mp->b_datap->db_ref != 0);
442 	ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
443 
444 	sq = qp->q_syncq;
445 	ASSERT(sq != NULL);
446 	qi = qp->q_qinfo;
447 
448 	if (UseFastlocks && sq->sq_ciputctrl != NULL) {
449 		/* fastlock: */
450 		ASSERT(sq->sq_flags & SQ_CIPUT);
451 		ix = CPU->cpu_seqid & sq->sq_nciputctrl;
452 		sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
453 		sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
454 		mutex_enter(sqciplock);
455 		if (!((*sqcipcount) & SQ_FASTPUT) ||
456 		    (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
457 			mutex_exit(sqciplock);
458 			sqciplock = NULL;
459 			goto slowlock;
460 		}
461 		(*sqcipcount)++;
462 		ASSERT(*sqcipcount != 0);
463 		queued = qp->q_sqflags & Q_SQQUEUED;
464 		mutex_exit(sqciplock);
465 	} else {
466 	slowlock:
467 		ASSERT(sqciplock == NULL);
468 		mutex_enter(SQLOCK(sq));
469 		flags = sq->sq_flags;
470 		/*
471 		 * We are going to drop SQLOCK, so make a claim to prevent syncq
472 		 * from closing.
473 		 */
474 		sq->sq_count++;
475 		ASSERT(sq->sq_count != 0);		/* Wraparound */
476 		/*
477 		 * If there are writers or exclusive waiters, there is not much
478 		 * we can do.  Place the message on the syncq and schedule a
479 		 * background thread to drain it.
480 		 *
481 		 * Also if we are approaching end of stack, fill the syncq and
482 		 * switch processing to a background thread - see comments on
483 		 * top.
484 		 */
485 		if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
486 		    (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {
487 
488 			TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
489 			    "putnext_end:(%p, %p, %p) SQ_EXCL fill",
490 			    qp, mp, sq);
491 
492 			/*
493 			 * NOTE: qfill_syncq will need QLOCK. It is safe to drop
494 			 * SQLOCK because positive sq_count keeps the syncq from
495 			 * closing.
496 			 */
497 			mutex_exit(SQLOCK(sq));
498 
499 			qfill_syncq(sq, qp, mp);
500 			/*
501 			 * NOTE: after the call to qfill_syncq() qp may be
502 			 * closed, both qp and sq should not be referenced at
503 			 * this point.
504 			 *
505 			 * This ASSERT is located here to prevent stack frame
506 			 * consumption in the DEBUG code.
507 			 */
508 			ASSERT(sqciplock == NULL);
509 			return;
510 		}
511 
512 		queued = qp->q_sqflags & Q_SQQUEUED;
513 		/*
514 		 * If not a concurrent perimiter, we need to acquire
515 		 * it exclusively.  It could not have been previously
516 		 * set since we held the SQLOCK before testing
517 		 * SQ_GOAWAY above (which includes SQ_EXCL).
518 		 * We do this here because we hold the SQLOCK, and need
519 		 * to make this state change BEFORE dropping it.
520 		 */
521 		if (!(flags & SQ_CIPUT)) {
522 			ASSERT((sq->sq_flags & SQ_EXCL) == 0);
523 			ASSERT(!(sq->sq_type & SQ_CIPUT));
524 			sq->sq_flags |= SQ_EXCL;
525 		}
526 		mutex_exit(SQLOCK(sq));
527 	}
528 
529 	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
530 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
531 
532 	/*
533 	 * We now have a claim on the syncq, we are either going to
534 	 * put the message on the syncq and then drain it, or we are
535 	 * going to call the putproc().
536 	 */
537 	putproc = qi->qi_putp;
538 	if (!queued) {
539 		STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
540 		    mp->b_datap->db_base);
541 		(*putproc)(qp, mp);
542 		ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
543 		ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
544 	} else {
545 		mutex_enter(QLOCK(qp));
546 		/*
547 		 * If there are no messages in front of us, just call putproc(),
548 		 * otherwise enqueue the message and drain the queue.
549 		 */
550 		if (qp->q_syncqmsgs == 0) {
551 			mutex_exit(QLOCK(qp));
552 			STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
553 			    mp->b_datap->db_base);
554 			(*putproc)(qp, mp);
555 			ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
556 		} else {
557 			/*
558 			 * We are doing a fill with the intent to
559 			 * drain (meaning we are filling because
560 			 * there are messages in front of us ane we
561 			 * need to preserve message ordering)
562 			 * Therefore, put the message on the queue
563 			 * and call qdrain_syncq (must be done with
564 			 * the QLOCK held).
565 			 */
566 			STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
567 			    mp->b_rptr - mp->b_datap->db_base);
568 
569 #ifdef DEBUG
570 			/*
571 			 * These two values were in the original code for
572 			 * all syncq messages.  This is unnecessary in
573 			 * the current implementation, but was retained
574 			 * in debug mode as it is usefull to know where
575 			 * problems occur.
576 			 */
577 			mp->b_queue = qp;
578 			mp->b_prev = (mblk_t *)putproc;
579 #endif
580 			SQPUT_MP(qp, mp);
581 			qdrain_syncq(sq, qp);
582 			ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
583 		}
584 	}
585 	/*
586 	 * Before we release our claim, we need to see if any
587 	 * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
588 	 * we were responsible for going exclusive and, therefore,
589 	 * are resposible for draining.
590 	 */
591 	if (sq->sq_flags & (SQ_EXCL)) {
592 		drain_mask = 0;
593 	} else {
594 		drain_mask = SQ_QUEUED;
595 	}
596 
597 	if (sqciplock != NULL) {
598 		mutex_enter(sqciplock);
599 		flags = sq->sq_flags;
600 		ASSERT(flags & SQ_CIPUT);
601 		/* SQ_EXCL could have been set by qwriter_inner */
602 		if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
603 			/*
604 			 * we need SQLOCK to handle
605 			 * wakeups/drains/flags change.  sqciplock
606 			 * is needed to decrement sqcipcount.
607 			 * SQLOCK has to be grabbed before sqciplock
608 			 * for lock ordering purposes.
609 			 * after sqcipcount is decremented some lock
610 			 * still needs to be held to make sure
611 			 * syncq won't get freed on us.
612 			 *
613 			 * To prevent deadlocks we try to grab SQLOCK and if it
614 			 * is held already we drop sqciplock, acquire SQLOCK and
615 			 * reacqwire sqciplock again.
616 			 */
617 			if (mutex_tryenter(SQLOCK(sq)) == 0) {
618 				mutex_exit(sqciplock);
619 				mutex_enter(SQLOCK(sq));
620 				mutex_enter(sqciplock);
621 			}
622 			flags = sq->sq_flags;
623 			ASSERT(*sqcipcount != 0);
624 			(*sqcipcount)--;
625 			mutex_exit(sqciplock);
626 		} else {
627 			ASSERT(*sqcipcount != 0);
628 			(*sqcipcount)--;
629 			mutex_exit(sqciplock);
630 			TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
631 			"putnext_end:(%p, %p, %p) done", qp, mp, sq);
632 			return;
633 		}
634 	} else {
635 		mutex_enter(SQLOCK(sq));
636 		flags = sq->sq_flags;
637 		ASSERT(sq->sq_count != 0);
638 		sq->sq_count--;
639 	}
640 	if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
641 		putnext_tail(sq, qp, (flags & ~drain_mask));
642 		/*
643 		 * The only purpose of this ASSERT is to preserve calling stack
644 		 * in DEBUG kernel.
645 		 */
646 		ASSERT(sq != NULL);
647 		return;
648 	}
649 	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
650 	ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
651 	/*
652 	 * Safe to always drop SQ_EXCL:
653 	 *	Not SQ_CIPUT means we set SQ_EXCL above
654 	 *	For SQ_CIPUT SQ_EXCL will only be set if the put
655 	 *	procedure did a qwriter(INNER) in which case
656 	 *	nobody else is in the inner perimeter and we
657 	 *	are exiting.
658 	 *
659 	 * I would like to make the following assertion:
660 	 *
661 	 * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
662 	 * 	sq->sq_count == 0);
663 	 *
664 	 * which indicates that if we are both putshared and exclusive,
665 	 * we became exclusive while executing the putproc, and the only
666 	 * claim on the syncq was the one we dropped a few lines above.
667 	 * But other threads that enter putnext while the syncq is exclusive
668 	 * need to make a claim as they may need to drop SQLOCK in the
669 	 * has_writers case to avoid deadlocks.  If these threads are
670 	 * delayed or preempted, it is possible that the writer thread can
671 	 * find out that there are other claims making the (sq_count == 0)
672 	 * test invalid.
673 	 */
674 
675 	sq->sq_flags = flags & ~SQ_EXCL;
676 	mutex_exit(SQLOCK(sq));
677 	TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
678 	    "putnext_end:(%p, %p, %p) done", qp, mp, sq);
679 }
680