1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Copyright 2020 Joyent, Inc.
26 */
27
28#include "assym.h"
29
30#include <sys/t_lock.h>
31#include <sys/mutex.h>
32#include <sys/mutex_impl.h>
33#include <sys/rwlock_impl.h>
34#include <sys/asm_linkage.h>
35#include <sys/machlock.h>
36#include <sys/machthread.h>
37#include <sys/lockstat.h>
38
39/* #define DEBUG */
40
41#ifdef DEBUG
42#include <sys/machparam.h>
43#endif /* DEBUG */
44
45/************************************************************************
46 *		ATOMIC OPERATIONS
47 */
48
49/*
50 * uint8_t	ldstub(uint8_t *cp)
51 *
52 * Store 0xFF at the specified location, and return its previous content.
53 */
54
55	ENTRY(ldstub)
56	retl
57	ldstub	[%o0], %o0
58	SET_SIZE(ldstub)
59
60/************************************************************************
61 *		MEMORY BARRIERS -- see atomic.h for full descriptions.
62 */
63
64#ifdef SF_ERRATA_51
65	.align 32
66	ENTRY(membar_return)
67	retl
68	nop
69	SET_SIZE(membar_return)
70#define	MEMBAR_RETURN	ba,pt %icc, membar_return
71#else
72#define	MEMBAR_RETURN	retl
73#endif
74
75	ENTRY(membar_enter)
76	MEMBAR_RETURN
77	membar	#StoreLoad|#StoreStore
78	SET_SIZE(membar_enter)
79
80	ENTRY(membar_exit)
81	MEMBAR_RETURN
82	membar	#LoadStore|#StoreStore
83	SET_SIZE(membar_exit)
84
85	ENTRY(membar_producer)
86	MEMBAR_RETURN
87	membar	#StoreStore
88	SET_SIZE(membar_producer)
89
90	ENTRY(membar_consumer)
91	MEMBAR_RETURN
92	membar	#LoadLoad
93	SET_SIZE(membar_consumer)
94
95/************************************************************************
96 *		MINIMUM LOCKS
97 */
98
99/*
100 * lock_try(lp), ulock_try(lp)
101 * - returns non-zero on success.
102 * - doesn't block interrupts so don't use this to spin on a lock.
103 * - uses "0xFF is busy, anything else is free" model.
104 *
105 * ulock_try() is for a lock in the user address space.
106 */
107
108	.align	32
109	ENTRY(lock_try)
110	ldstub	[%o0], %o1		! try to set lock, get value in %o1
111	brnz,pn	%o1, 1f
112	membar	#LoadLoad
113.lock_try_lockstat_patch_point:
114	retl
115	or	%o0, 1, %o0		! ensure lo32 != 0
1161:
117	retl
118	clr	%o0
119	SET_SIZE(lock_try)
120
121	.align	32
122	ENTRY(lock_spin_try)
123	ldstub	[%o0], %o1		! try to set lock, get value in %o1
124	brnz,pn	%o1, 1f
125	membar	#LoadLoad
126	retl
127	or	%o0, 1, %o0		! ensure lo32 != 0
1281:
129	retl
130	clr	%o0
131	SET_SIZE(lock_spin_try)
132
133	.align	32
134	ENTRY(lock_set)
135	ldstub	[%o0], %o1
136	brnz,pn	%o1, 1f			! go to C for the hard case
137	membar	#LoadLoad
138.lock_set_lockstat_patch_point:
139	retl
140	nop
1411:
142	sethi	%hi(lock_set_spin), %o2	! load up for jump to C
143	jmp	%o2 + %lo(lock_set_spin)
144	nop				! delay: do nothing
145	SET_SIZE(lock_set)
146
147	ENTRY(lock_clear)
148	membar	#LoadStore|#StoreStore
149.lock_clear_lockstat_patch_point:
150	retl
151	clrb	[%o0]
152	SET_SIZE(lock_clear)
153
154	.align	32
155	ENTRY(ulock_try)
156	ldstuba	[%o0]ASI_USER, %o1	! try to set lock, get value in %o1
157	xor	%o1, 0xff, %o0		! delay - return non-zero if success
158	retl
159	  membar	#LoadLoad
160	SET_SIZE(ulock_try)
161
162	ENTRY(ulock_clear)
163	membar  #LoadStore|#StoreStore
164	retl
165	  stba	%g0, [%o0]ASI_USER	! clear lock
166	SET_SIZE(ulock_clear)
167
168
169/*
170 * lock_set_spl(lp, new_pil, *old_pil_addr)
171 * 	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
172 */
173
174	ENTRY(lock_set_spl)
175	rdpr	%pil, %o3			! %o3 = current pil
176	cmp	%o3, %o1			! is current pil high enough?
177	bl,a,pt %icc, 1f			! if not, write %pil in delay
178	wrpr	%g0, %o1, %pil
1791:
180	ldstub	[%o0], %o4			! try the lock
181	brnz,pn	%o4, 2f				! go to C for the miss case
182	membar	#LoadLoad
183.lock_set_spl_lockstat_patch_point:
184	retl
185	sth	%o3, [%o2]			! delay - save original pil
1862:
187	sethi	%hi(lock_set_spl_spin), %o5	! load up jmp to C
188	jmp	%o5 + %lo(lock_set_spl_spin)	! jmp to lock_set_spl_spin
189	nop					! delay: do nothing
190	SET_SIZE(lock_set_spl)
191
192/*
193 * lock_clear_splx(lp, s)
194 */
195
196	ENTRY(lock_clear_splx)
197	ldn	[THREAD_REG + T_CPU], %o2	! get CPU pointer
198	membar	#LoadStore|#StoreStore
199	ld	[%o2 + CPU_BASE_SPL], %o2
200	clrb	[%o0]				! clear lock
201	cmp	%o2, %o1			! compare new to base
202	movl	%xcc, %o1, %o2			! use new pri if base is less
203.lock_clear_splx_lockstat_patch_point:
204	retl
205	wrpr	%g0, %o2, %pil
206	SET_SIZE(lock_clear_splx)
207
208/*
209 * mutex_enter() and mutex_exit().
210 *
211 * These routines handle the simple cases of mutex_enter() (adaptive
212 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
213 * If anything complicated is going on we punt to mutex_vector_enter().
214 *
215 * mutex_tryenter() is similar to mutex_enter() but returns zero if
216 * the lock cannot be acquired, nonzero on success.
217 *
218 * If mutex_exit() gets preempted in the window between checking waiters
219 * and clearing the lock, we can miss wakeups.  Disabling preemption
220 * in the mutex code is prohibitively expensive, so instead we detect
221 * mutex preemption by examining the trapped PC in the interrupt path.
222 * If we interrupt a thread in mutex_exit() that has not yet cleared
223 * the lock, pil_interrupt() resets its PC back to the beginning of
224 * mutex_exit() so it will check again for waiters when it resumes.
225 *
226 * The lockstat code below is activated when the lockstat driver
227 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
228 * Note that we don't need to test lockstat_event_mask here -- we won't
229 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
230 */
231
232	.align	32
233	ENTRY(mutex_enter)
234	mov	THREAD_REG, %o1
235	casx	[%o0], %g0, %o1			! try to acquire as adaptive
236	brnz,pn	%o1, 1f				! locked or wrong type
237	membar	#LoadLoad
238.mutex_enter_lockstat_patch_point:
239	retl
240	nop
2411:
242	sethi	%hi(mutex_vector_enter), %o2	! load up for jump to C
243	jmp	%o2 + %lo(mutex_vector_enter)
244	nop
245	SET_SIZE(mutex_enter)
246
247	ENTRY(mutex_tryenter)
248	mov	THREAD_REG, %o1
249	casx	[%o0], %g0, %o1			! try to acquire as adaptive
250	brnz,pn	%o1, 1f				! locked or wrong type continue
251	membar	#LoadLoad
252.mutex_tryenter_lockstat_patch_point:
253	retl
254	or	%o0, 1, %o0			! ensure lo32 != 0
2551:
256	sethi	%hi(mutex_vector_tryenter), %o2		! hi bits
257	jmp	%o2 + %lo(mutex_vector_tryenter)	! go to C
258	nop
259	SET_SIZE(mutex_tryenter)
260
261	ENTRY(mutex_adaptive_tryenter)
262	mov	THREAD_REG, %o1
263	casx	[%o0], %g0, %o1			! try to acquire as adaptive
264	brnz,pn	%o1, 0f				! locked or wrong type
265	membar	#LoadLoad
266	retl
267	or	%o0, 1, %o0			! ensure lo32 != 0
2680:
269	retl
270	mov	%g0, %o0
271	SET_SIZE(mutex_adaptive_tryenter)
272
273	! these need to be together and cache aligned for performance.
274	.align 64
275	.global	mutex_exit_critical_size
276	.global	mutex_exit_critical_start
277	.global mutex_owner_running_critical_size
278	.global mutex_owner_running_critical_start
279
280mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
281
282	.align	32
283
284	ENTRY(mutex_exit)
285mutex_exit_critical_start:		! If we are interrupted, restart here
286	ldn	[%o0], %o1		! get the owner field
287	membar	#LoadStore|#StoreStore
288	cmp	THREAD_REG, %o1		! do we own lock with no waiters?
289	be,a,pt	%ncc, 1f		! if so, drive on ...
290	stn	%g0, [%o0]		! delay: clear lock if we owned it
291.mutex_exit_critical_end:		! for pil_interrupt() hook
292	ba,a,pt	%xcc, mutex_vector_exit	! go to C for the hard cases
2931:
294.mutex_exit_lockstat_patch_point:
295	retl
296	nop
297	SET_SIZE(mutex_exit)
298
299mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
300
301	.align  32
302
303	ENTRY(mutex_owner_running)
304mutex_owner_running_critical_start:	! If interrupted restart here
305	ldn	[%o0], %o1		! get the owner field
306	and	%o1, MUTEX_THREAD, %o1	! remove the waiters bit if any
307	brz,pn	%o1, 1f			! if so, drive on ...
308	nop
309	ldn	[%o1+T_CPU], %o2	! get owner->t_cpu
310	ldn	[%o2+CPU_THREAD], %o3	! get owner->t_cpu->cpu_thread
311.mutex_owner_running_critical_end:	! for pil_interrupt() hook
312	cmp	%o1, %o3		! owner == running thread?
313	be,a,pt	%xcc, 2f		! yes, go return cpu
314	nop
3151:
316	retl
317	mov	%g0, %o0		! return 0 (owner not running)
3182:
319	retl
320	mov	%o2, %o0		! owner running, return cpu
321	SET_SIZE(mutex_owner_running)
322
323/*
324 * rw_enter() and rw_exit().
325 *
326 * These routines handle the simple cases of rw_enter (write-locking an unheld
327 * lock or read-locking a lock that's neither write-locked nor write-wanted)
328 * and rw_exit (no waiters or not the last reader).  If anything complicated
329 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
330 */
331
332	.align	16
333	ENTRY(rw_enter)
334	cmp	%o1, RW_WRITER			! entering as writer?
335	be,a,pn	%icc, 2f			! if so, go do it ...
336	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
337	ld	[THREAD_REG + T_KPRI_REQ], %o3	! begin THREAD_KPRI_REQUEST()
338	ldn	[%o0], %o4			! %o4 = old lock value
339	inc	%o3				! bump kpri
340	st	%o3, [THREAD_REG + T_KPRI_REQ]	! store new kpri
3411:
342	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
343	bz,pt	%xcc, 3f	 		! if so, prepare to block
344	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
345	sethi	%hi(rw_enter_sleep), %o2	! load up jump
346	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
347	nop					! delay: do nothing
3483:
349	casx	[%o0], %o4, %o5			! try to grab read lock
350	cmp	%o4, %o5			! did we get it?
351#ifdef sun4v
352	be,a,pt %xcc, 0f
353	membar  #LoadLoad
354	sethi	%hi(rw_enter_sleep), %o2	! load up jump
355	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
356	nop					! delay: do nothing
3570:
358#else /* sun4v */
359	bne,pn	%xcc, 1b			! if not, try again
360	mov	%o5, %o4			! delay: %o4 = old lock value
361	membar	#LoadLoad
362#endif /* sun4v */
363.rw_read_enter_lockstat_patch_point:
364	retl
365	nop
3662:
367	casx	[%o0], %g0, %o5			! try to grab write lock
368	brz,pt %o5, 4f				! branch around if we got it
369	membar	#LoadLoad			! done regardless of where we go
370	sethi	%hi(rw_enter_sleep), %o2
371	jmp	%o2 + %lo(rw_enter_sleep)	! jump to rw_enter_sleep if not
372	nop					! delay: do nothing
3734:
374.rw_write_enter_lockstat_patch_point:
375	retl
376	nop
377	SET_SIZE(rw_enter)
378
379	.align	16
380	ENTRY(rw_exit)
381	ldn	[%o0], %o4			! %o4 = old lock value
382	membar	#LoadStore|#StoreStore		! membar_exit()
383	subcc	%o4, RW_READ_LOCK, %o5		! %o5 = new lock value if reader
384	bnz,pn	%xcc, 2f			! single reader, no waiters?
385	clr	%o1
3861:
387	ld	[THREAD_REG + T_KPRI_REQ], %g1	! begin THREAD_KPRI_RELEASE()
388	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
389	casx	[%o0], %o4, %o5			! try to drop lock
390	cmp	%o4, %o5			! did we succeed?
391	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
392	dec	%g1				! delay: drop kpri
393.rw_read_exit_lockstat_patch_point:
394	retl
395	st	%g1, [THREAD_REG + T_KPRI_REQ]	! delay: store new kpri
3962:
397	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
398	bnz,a,pt %xcc, 3f
399	or	THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
400	cmp	%o5, RW_READ_LOCK		! would lock still be held?
401	bge,pt	%xcc, 1b			! if so, go ahead and drop it
402	nop
403	ba,pt	%xcc, rw_exit_wakeup		! otherwise, wake waiters
404	nop
4053:
406	casx	[%o0], %o4, %o1			! try to drop write lock
407	cmp	%o4, %o1			! did we succeed?
408	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
409	nop
410.rw_write_exit_lockstat_patch_point:
411	retl
412	nop
413	SET_SIZE(rw_exit)
414
415#define	RETL			0x81c3e008
416#define	NOP			0x01000000
417#define BA			0x10800000
418
419#define	DISP22			((1 << 22) - 1)
420#define	ANNUL			0x20000000
421
422#define	HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)		\
423	ba	1f;							\
424	rd	%pc, %o0;						\
425	save	%sp, -SA(MINFRAME), %sp;				\
426	set	lockstat_probemap, %l1;					\
427	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
428	brz,pn	%o0, 0f;						\
429	ldub	[THREAD_REG + T_LOCKSTAT], %l0;				\
430	add	%l0, 1, %l2;						\
431	stub	%l2, [THREAD_REG + T_LOCKSTAT];				\
432	set	lockstat_probe, %g1;					\
433	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
434	brz,a,pn %o0, 0f;						\
435	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
436	ldn	[%g1], %g2;						\
437	mov	rs, %o2;						\
438	jmpl	%g2, %o7;						\
439	mov	%i0, %o1;						\
440	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
4410:	ret;								\
442	restore	%g0, 1, %o0;	/* for mutex_tryenter / lock_try */	\
4431:	set	addr, %o1;						\
444	sub	%o0, %o1, %o0;						\
445	srl	%o0, 2, %o0;						\
446	inc	%o0;							\
447	set	DISP22, %o1;						\
448	and	%o1, %o0, %o0;						\
449	set	BA, %o1;						\
450	or	%o1, %o0, %o0;						\
451	sethi	%hi(annul), %o2;					\
452	add	%o0, %o2, %o2;						\
453	set	addr, %o0;						\
454	set	normal_instr, %o1;					\
455	ld	[%i0 + (event * DTRACE_IDSIZE)], %o3;			\
456	tst	%o3;							\
457	movnz	%icc, %o2, %o1;						\
458	call	hot_patch_kernel_text;					\
459	mov	4, %o2;							\
460	membar	#Sync
461
462#define	HOT_PATCH(addr, event, normal_instr)	\
463	HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
464
465#define	HOT_PATCH_ARG(addr, event, normal_instr, arg)	\
466	HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
467
468#define HOT_PATCH_ANNULLED(addr, event, normal_instr)	\
469	HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
470
471	ENTRY(lockstat_hot_patch)
472	save	%sp, -SA(MINFRAME), %sp
473	set	lockstat_probemap, %i0
474	HOT_PATCH(.mutex_enter_lockstat_patch_point,
475		LS_MUTEX_ENTER_ACQUIRE, RETL)
476	HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
477		LS_MUTEX_TRYENTER_ACQUIRE, RETL)
478	HOT_PATCH(.mutex_exit_lockstat_patch_point,
479		LS_MUTEX_EXIT_RELEASE, RETL)
480	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
481		LS_RW_ENTER_ACQUIRE, RETL)
482	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
483		LS_RW_ENTER_ACQUIRE, RETL)
484	HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
485		LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
486	HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
487		LS_RW_EXIT_RELEASE, RETL, RW_READER)
488	HOT_PATCH(.lock_set_lockstat_patch_point,
489		LS_LOCK_SET_ACQUIRE, RETL)
490	HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
491		LS_LOCK_TRY_ACQUIRE, RETL)
492	HOT_PATCH(.lock_clear_lockstat_patch_point,
493		LS_LOCK_CLEAR_RELEASE, RETL)
494	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
495		LS_LOCK_SET_SPL_ACQUIRE, RETL)
496	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
497		LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
498	ret
499	restore
500	SET_SIZE(lockstat_hot_patch)
501
502/*
503 * asm_mutex_spin_enter(mutex_t *)
504 *
505 * For use by assembly interrupt handler only.
506 * Does not change spl, since the interrupt handler is assumed to be
507 * running at high level already.
508 * Traps may be off, so cannot panic.
509 * Does not keep statistics on the lock.
510 *
511 * Entry:	%l6 - points to mutex
512 * 		%l7 - address of call (returns to %l7+8)
513 * Uses:	%l6, %l5
514 */
515	.align 16
516	ENTRY_NP(asm_mutex_spin_enter)
517	ldstub	[%l6 + M_SPINLOCK], %l5	! try to set lock, get value in %l5
5181:
519	tst	%l5
520	bnz	3f			! lock already held - go spin
521	nop
5222:
523	jmp	%l7 + 8			! return
524	membar	#LoadLoad
525	!
526	! Spin on lock without using an atomic operation to prevent the caches
527	! from unnecessarily moving ownership of the line around.
528	!
5293:
530	ldub	[%l6 + M_SPINLOCK], %l5
5314:
532	tst	%l5
533	bz,a	1b			! lock appears to be free, try again
534	ldstub	[%l6 + M_SPINLOCK], %l5	! delay slot - try to set lock
535
536	sethi	%hi(panicstr) , %l5
537	ldn	[%l5 + %lo(panicstr)], %l5
538	tst 	%l5
539	bnz	2b			! after panic, feign success
540	nop
541	b	4b
542	ldub	[%l6 + M_SPINLOCK], %l5	! delay - reload lock
543	SET_SIZE(asm_mutex_spin_enter)
544
545/*
546 * asm_mutex_spin_exit(mutex_t *)
547 *
548 * For use by assembly interrupt handler only.
549 * Does not change spl, since the interrupt handler is assumed to be
550 * running at high level already.
551 *
552 * Entry:	%l6 - points to mutex
553 * 		%l7 - address of call (returns to %l7+8)
554 * Uses:	none
555 */
556	ENTRY_NP(asm_mutex_spin_exit)
557	membar	#LoadStore|#StoreStore
558	jmp	%l7 + 8			! return
559	clrb	[%l6 + M_SPINLOCK]	! delay - clear lock
560	SET_SIZE(asm_mutex_spin_exit)
561
562/*
563 * thread_onproc()
564 * Set thread in onproc state for the specified CPU.
565 * Also set the thread lock pointer to the CPU's onproc lock.
566 * Since the new lock isn't held, the store ordering is important.
567 * If not done in assembler, the compiler could reorder the stores.
568 */
569
570	ENTRY(thread_onproc)
571	set	TS_ONPROC, %o2		! TS_ONPROC state
572	st	%o2, [%o0 + T_STATE]	! store state
573	add	%o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
574	retl				! return
575	stn	%o3, [%o0 + T_LOCKP]	! delay - store new lock pointer
576	SET_SIZE(thread_onproc)
577
578/* delay function used in some mutex code - just do 3 nop cas ops */
579	ENTRY(cas_delay)
580	casx [%o0], %g0, %g0
581	casx [%o0], %g0, %g0
582	retl
583	casx [%o0], %g0, %g0
584	SET_SIZE(cas_delay)
585
586/*
587 * alternative delay function for some niagara processors.   The rd
588 * instruction uses less resources than casx on those cpus.
589 */
590	ENTRY(rdccr_delay)
591	rd	%ccr, %g0
592	rd	%ccr, %g0
593	retl
594	rd	%ccr, %g0
595	SET_SIZE(rdccr_delay)
596
597/*
598 * mutex_delay_default(void)
599 * Spins for approx a few hundred processor cycles and returns to caller.
600 */
601
602	ENTRY(mutex_delay_default)
603	mov	72,%o0
6041:	brgz	%o0, 1b
605	dec	%o0
606	retl
607	nop
608	SET_SIZE(mutex_delay_default)
609
610