xref: /illumos-gate/usr/src/uts/common/disp/disp_lock.c (revision 2d6eb4a5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/sysmacros.h>
29 #include <sys/systm.h>
30 #include <sys/cmn_err.h>
31 #include <sys/debug.h>
32 #include <sys/inline.h>
33 #include <sys/disp.h>
34 #include <sys/kmem.h>
35 #include <sys/cpuvar.h>
36 #include <sys/vtrace.h>
37 #include <sys/lockstat.h>
38 #include <sys/spl.h>
39 #include <sys/atomic.h>
40 #include <sys/cpu.h>
41 
42 /*
43  * We check CPU_ON_INTR(CPU) when exiting a disp lock, rather than when
44  * entering it, for a purely pragmatic reason: when exiting a disp lock
45  * we know that we must be at PIL 10, and thus not preemptible; therefore
46  * we can safely load the CPU pointer without worrying about it changing.
47  */
48 static void
disp_onintr_panic(void)49 disp_onintr_panic(void)
50 {
51 	panic("dispatcher invoked from high-level interrupt handler");
52 }
53 
54 /* ARGSUSED */
55 void
disp_lock_init(disp_lock_t * lp,char * name)56 disp_lock_init(disp_lock_t *lp, char *name)
57 {
58 	DISP_LOCK_INIT(lp);
59 }
60 
61 /* ARGSUSED */
62 void
disp_lock_destroy(disp_lock_t * lp)63 disp_lock_destroy(disp_lock_t *lp)
64 {
65 	DISP_LOCK_DESTROY(lp);
66 }
67 
68 void
disp_lock_enter_high(disp_lock_t * lp)69 disp_lock_enter_high(disp_lock_t *lp)
70 {
71 	lock_set(lp);
72 }
73 
74 void
disp_lock_exit_high(disp_lock_t * lp)75 disp_lock_exit_high(disp_lock_t *lp)
76 {
77 	if (CPU_ON_INTR(CPU) != 0)
78 		disp_onintr_panic();
79 	ASSERT(DISP_LOCK_HELD(lp));
80 	lock_clear(lp);
81 }
82 
83 void
disp_lock_enter(disp_lock_t * lp)84 disp_lock_enter(disp_lock_t *lp)
85 {
86 	lock_set_spl(lp, ipltospl(DISP_LEVEL), &curthread->t_oldspl);
87 }
88 
89 void
disp_lock_exit(disp_lock_t * lp)90 disp_lock_exit(disp_lock_t *lp)
91 {
92 	if (CPU_ON_INTR(CPU) != 0)
93 		disp_onintr_panic();
94 	ASSERT(DISP_LOCK_HELD(lp));
95 	if (CPU->cpu_kprunrun) {
96 		lock_clear_splx(lp, curthread->t_oldspl);
97 		kpreempt(KPREEMPT_SYNC);
98 	} else {
99 		lock_clear_splx(lp, curthread->t_oldspl);
100 	}
101 }
102 
103 void
disp_lock_exit_nopreempt(disp_lock_t * lp)104 disp_lock_exit_nopreempt(disp_lock_t *lp)
105 {
106 	if (CPU_ON_INTR(CPU) != 0)
107 		disp_onintr_panic();
108 	ASSERT(DISP_LOCK_HELD(lp));
109 	lock_clear_splx(lp, curthread->t_oldspl);
110 }
111 
112 /*
113  * Thread_lock() - get the correct dispatcher lock for the thread.
114  */
115 void
thread_lock(kthread_id_t t)116 thread_lock(kthread_id_t t)
117 {
118 	int s = splhigh();
119 
120 	if (CPU_ON_INTR(CPU) != 0)
121 		disp_onintr_panic();
122 
123 	for (;;) {
124 		lock_t *volatile *tlpp = &t->t_lockp;
125 		lock_t *lp = *tlpp;
126 		if (lock_try(lp)) {
127 			if (lp == *tlpp) {
128 				curthread->t_oldspl = (ushort_t)s;
129 				return;
130 			}
131 			lock_clear(lp);
132 		} else {
133 			hrtime_t spin_time =
134 			    LOCKSTAT_START_TIME(LS_THREAD_LOCK_SPIN);
135 			/*
136 			 * Lower spl and spin on lock with non-atomic load
137 			 * to avoid cache activity.  Spin until the lock
138 			 * becomes available or spontaneously changes.
139 			 */
140 			splx(s);
141 			while (lp == *tlpp && LOCK_HELD(lp)) {
142 				if (panicstr) {
143 					curthread->t_oldspl = splhigh();
144 					return;
145 				}
146 				SMT_PAUSE();
147 			}
148 
149 			LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_SPIN,
150 			    lp, spin_time);
151 			s = splhigh();
152 		}
153 	}
154 }
155 
156 /*
157  * Thread_lock_high() - get the correct dispatcher lock for the thread.
158  *	This version is called when already at high spl.
159  */
160 void
thread_lock_high(kthread_id_t t)161 thread_lock_high(kthread_id_t t)
162 {
163 	if (CPU_ON_INTR(CPU) != 0)
164 		disp_onintr_panic();
165 
166 	for (;;) {
167 		lock_t *volatile *tlpp = &t->t_lockp;
168 		lock_t *lp = *tlpp;
169 		if (lock_try(lp)) {
170 			if (lp == *tlpp)
171 				return;
172 			lock_clear(lp);
173 		} else {
174 			hrtime_t spin_time =
175 			    LOCKSTAT_START_TIME(LS_THREAD_LOCK_HIGH_SPIN);
176 			while (lp == *tlpp && LOCK_HELD(lp)) {
177 				if (panicstr)
178 					return;
179 				SMT_PAUSE();
180 			}
181 			LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_HIGH_SPIN,
182 			    lp, spin_time);
183 		}
184 	}
185 }
186 
187 /*
188  * Called by THREAD_TRANSITION macro to change the thread state to
189  * the intermediate state-in-transititon state.
190  */
191 void
thread_transition(kthread_id_t t)192 thread_transition(kthread_id_t t)
193 {
194 	disp_lock_t	*lp;
195 
196 	ASSERT(THREAD_LOCK_HELD(t));
197 	ASSERT(t->t_lockp != &transition_lock);
198 
199 	lp = t->t_lockp;
200 	t->t_lockp = &transition_lock;
201 	disp_lock_exit_high(lp);
202 }
203 
204 /*
205  * Put thread in stop state, and set the lock pointer to the stop_lock.
206  * This effectively drops the lock on the thread, since the stop_lock
207  * isn't held.
208  * Eventually, stop_lock could be hashed if there is too much contention.
209  */
210 void
thread_stop(kthread_id_t t)211 thread_stop(kthread_id_t t)
212 {
213 	disp_lock_t	*lp;
214 
215 	ASSERT(THREAD_LOCK_HELD(t));
216 	ASSERT(t->t_lockp != &stop_lock);
217 
218 	lp = t->t_lockp;
219 	t->t_state = TS_STOPPED;
220 	/*
221 	 * Ensure that t_state reaches global visibility before t_lockp
222 	 */
223 	membar_producer();
224 	t->t_lockp = &stop_lock;
225 	disp_lock_exit(lp);
226 }
227