xref: /illumos-gate/usr/src/uts/intel/ml/lock_prim.s (revision f0089e39)
17c478bd9Sstevel@tonic-gate/*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5ee88d2b9Skchow * Common Development and Distribution License (the "License").
6ee88d2b9Skchow * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate/*
222850d85bSmv * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
269b0bb795SJohn Levon/*
279b0bb795SJohn Levon * Copyright 2019 Joyent, Inc.
289b0bb795SJohn Levon */
299b0bb795SJohn Levon
307c478bd9Sstevel@tonic-gate#include "assym.h"
317c478bd9Sstevel@tonic-gate
32575a7426Spt#include <sys/mutex_impl.h>
337c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
347c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h>
357c478bd9Sstevel@tonic-gate#include <sys/regset.h>
367c478bd9Sstevel@tonic-gate#include <sys/rwlock_impl.h>
377c478bd9Sstevel@tonic-gate#include <sys/lockstat.h>
387c478bd9Sstevel@tonic-gate
397c478bd9Sstevel@tonic-gate/*
407c478bd9Sstevel@tonic-gate * lock_try(lp), ulock_try(lp)
417c478bd9Sstevel@tonic-gate *	- returns non-zero on success.
427c478bd9Sstevel@tonic-gate *	- doesn't block interrupts so don't use this to spin on a lock.
437c478bd9Sstevel@tonic-gate *
447c478bd9Sstevel@tonic-gate * ulock_try() is for a lock in the user address space.
457c478bd9Sstevel@tonic-gate */
467c478bd9Sstevel@tonic-gate
477c478bd9Sstevel@tonic-gate	.globl	kernelbase
487c478bd9Sstevel@tonic-gate
497c478bd9Sstevel@tonic-gate	ENTRY(lock_try)
507c478bd9Sstevel@tonic-gate	movb	$-1, %dl
517c478bd9Sstevel@tonic-gate	movzbq	%dl, %rax
527c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)
537c478bd9Sstevel@tonic-gate	xorb	%dl, %al
547c478bd9Sstevel@tonic-gate.lock_try_lockstat_patch_point:
557c478bd9Sstevel@tonic-gate	ret
567c478bd9Sstevel@tonic-gate	testb	%al, %al
577c478bd9Sstevel@tonic-gate	jnz	0f
587c478bd9Sstevel@tonic-gate	ret
597c478bd9Sstevel@tonic-gate0:
607c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
617c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
627c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_TRY_ACQUIRE, %edi /* edi = event */
637c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
647c478bd9Sstevel@tonic-gate	SET_SIZE(lock_try)
657c478bd9Sstevel@tonic-gate
667c478bd9Sstevel@tonic-gate	ENTRY(lock_spin_try)
677c478bd9Sstevel@tonic-gate	movb	$-1, %dl
687c478bd9Sstevel@tonic-gate	movzbq	%dl, %rax
697c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)
707c478bd9Sstevel@tonic-gate	xorb	%dl, %al
717c478bd9Sstevel@tonic-gate	ret
727c478bd9Sstevel@tonic-gate	SET_SIZE(lock_spin_try)
737c478bd9Sstevel@tonic-gate
747c478bd9Sstevel@tonic-gate	ENTRY(ulock_try)
757c478bd9Sstevel@tonic-gate#ifdef DEBUG
767c478bd9Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
777c478bd9Sstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
787c478bd9Sstevel@tonic-gate	jb	ulock_pass		/*	uaddr < kernelbase, proceed */
797c478bd9Sstevel@tonic-gate
807c478bd9Sstevel@tonic-gate	movq	%rdi, %r12		/* preserve lock ptr for debugging */
817c478bd9Sstevel@tonic-gate	leaq	.ulock_panic_msg(%rip), %rdi
827c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
837c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
847c478bd9Sstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
857c478bd9Sstevel@tonic-gate	call	panic
867c478bd9Sstevel@tonic-gate
877c478bd9Sstevel@tonic-gate#endif /* DEBUG */
887c478bd9Sstevel@tonic-gate
897c478bd9Sstevel@tonic-gateulock_pass:
907c478bd9Sstevel@tonic-gate	movl	$1, %eax
917c478bd9Sstevel@tonic-gate	xchgb	%al, (%rdi)
927c478bd9Sstevel@tonic-gate	xorb	$1, %al
937c478bd9Sstevel@tonic-gate	ret
947c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_try)
957c478bd9Sstevel@tonic-gate
967c478bd9Sstevel@tonic-gate#ifdef DEBUG
977c478bd9Sstevel@tonic-gate	.data
987c478bd9Sstevel@tonic-gate.ulock_panic_msg:
997c478bd9Sstevel@tonic-gate	.string "ulock_try: Argument is above kernelbase"
1007c478bd9Sstevel@tonic-gate	.text
1017c478bd9Sstevel@tonic-gate#endif	/* DEBUG */
1027c478bd9Sstevel@tonic-gate
1037c478bd9Sstevel@tonic-gate/*
1047c478bd9Sstevel@tonic-gate * lock_clear(lp)
1057c478bd9Sstevel@tonic-gate *	- unlock lock without changing interrupt priority level.
1067c478bd9Sstevel@tonic-gate */
1077c478bd9Sstevel@tonic-gate
1087c478bd9Sstevel@tonic-gate	ENTRY(lock_clear)
1097c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)
1107c478bd9Sstevel@tonic-gate.lock_clear_lockstat_patch_point:
1117c478bd9Sstevel@tonic-gate	ret
1127c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock addr */
1137c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread addr */
1147c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_RELEASE, %edi	/* edi = event */
1157c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
1167c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear)
1177c478bd9Sstevel@tonic-gate
1187c478bd9Sstevel@tonic-gate	ENTRY(ulock_clear)
1197c478bd9Sstevel@tonic-gate#ifdef DEBUG
1207c478bd9Sstevel@tonic-gate	movq	kernelbase(%rip), %rcx
1217c478bd9Sstevel@tonic-gate	cmpq	%rcx, %rdi		/* test uaddr < kernelbase */
1227c478bd9Sstevel@tonic-gate	jb	ulock_clr		/*	 uaddr < kernelbase, proceed */
1237c478bd9Sstevel@tonic-gate
1247c478bd9Sstevel@tonic-gate	leaq	.ulock_clear_msg(%rip), %rdi
1257c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
1267c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
1277c478bd9Sstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
1287c478bd9Sstevel@tonic-gate	call	panic
1297c478bd9Sstevel@tonic-gate#endif
1307c478bd9Sstevel@tonic-gate
1317c478bd9Sstevel@tonic-gateulock_clr:
1327c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)
1337c478bd9Sstevel@tonic-gate	ret
1347c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_clear)
1357c478bd9Sstevel@tonic-gate
1367c478bd9Sstevel@tonic-gate#ifdef DEBUG
1377c478bd9Sstevel@tonic-gate	.data
1387c478bd9Sstevel@tonic-gate.ulock_clear_msg:
1397c478bd9Sstevel@tonic-gate	.string "ulock_clear: Argument is above kernelbase"
1407c478bd9Sstevel@tonic-gate	.text
1417c478bd9Sstevel@tonic-gate#endif	/* DEBUG */
1427c478bd9Sstevel@tonic-gate
1437c478bd9Sstevel@tonic-gate
1447c478bd9Sstevel@tonic-gate/*
1457c478bd9Sstevel@tonic-gate * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
1467c478bd9Sstevel@tonic-gate * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
1477c478bd9Sstevel@tonic-gate */
1487c478bd9Sstevel@tonic-gate
1497c478bd9Sstevel@tonic-gate	ENTRY(lock_set_spl)
1507c478bd9Sstevel@tonic-gate	pushq	%rbp
1517c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
1527c478bd9Sstevel@tonic-gate	subq	$32, %rsp
1537c478bd9Sstevel@tonic-gate	movl	%esi, 8(%rsp)		/* save priority level */
1547c478bd9Sstevel@tonic-gate	movq	%rdx, 16(%rsp)		/* save old pil ptr */
1557c478bd9Sstevel@tonic-gate	movq	%rdi, 24(%rsp)		/* save lock pointer */
1567c478bd9Sstevel@tonic-gate	movl	%esi, %edi		/* pass priority level */
1577c478bd9Sstevel@tonic-gate	call	splr			/* raise priority level */
1587c478bd9Sstevel@tonic-gate	movq	24(%rsp), %rdi		/* rdi = lock addr */
1597c478bd9Sstevel@tonic-gate	movb	$-1, %dl
1607c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
1617c478bd9Sstevel@tonic-gate	testb	%dl, %dl		/* did we get the lock? ... */
1627c478bd9Sstevel@tonic-gate	jnz	.lss_miss		/* ... no, go to C for the hard case */
1637c478bd9Sstevel@tonic-gate	movq	16(%rsp), %rdx		/* rdx = old pil addr */
1647c478bd9Sstevel@tonic-gate	movw	%ax, (%rdx)		/* store old pil */
1657c478bd9Sstevel@tonic-gate	leave
1667c478bd9Sstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
1677c478bd9Sstevel@tonic-gate	ret
1687c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
1697c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
1707c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_SET_SPL_ACQUIRE, %edi
1717c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
1727c478bd9Sstevel@tonic-gate.lss_miss:
1737c478bd9Sstevel@tonic-gate	movl	8(%rsp), %esi		/* new_pil */
1747c478bd9Sstevel@tonic-gate	movq	16(%rsp), %rdx		/* old_pil_addr */
1757c478bd9Sstevel@tonic-gate	movl	%eax, %ecx		/* original pil */
1767c478bd9Sstevel@tonic-gate	leave				/* unwind stack */
1777c478bd9Sstevel@tonic-gate	jmp	lock_set_spl_spin
1787c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set_spl)
1797c478bd9Sstevel@tonic-gate
1807c478bd9Sstevel@tonic-gate/*
1817c478bd9Sstevel@tonic-gate * void
1827c478bd9Sstevel@tonic-gate * lock_init(lp)
1837c478bd9Sstevel@tonic-gate */
1847c478bd9Sstevel@tonic-gate
1857c478bd9Sstevel@tonic-gate	ENTRY(lock_init)
1867c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)
1877c478bd9Sstevel@tonic-gate	ret
1887c478bd9Sstevel@tonic-gate	SET_SIZE(lock_init)
1897c478bd9Sstevel@tonic-gate
1907c478bd9Sstevel@tonic-gate/*
1917c478bd9Sstevel@tonic-gate * void
1927c478bd9Sstevel@tonic-gate * lock_set(lp)
1937c478bd9Sstevel@tonic-gate */
1947c478bd9Sstevel@tonic-gate
1957c478bd9Sstevel@tonic-gate	ENTRY(lock_set)
1967c478bd9Sstevel@tonic-gate	movb	$-1, %dl
1977c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
1987c478bd9Sstevel@tonic-gate	testb	%dl, %dl		/* did we get it? */
1997c478bd9Sstevel@tonic-gate	jnz	lock_set_spin		/* no, go to C for the hard case */
2007c478bd9Sstevel@tonic-gate.lock_set_lockstat_patch_point:
2017c478bd9Sstevel@tonic-gate	ret
2027c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
2037c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
2047c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_SET_ACQUIRE, %edi
2057c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
2067c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set)
2077c478bd9Sstevel@tonic-gate
2087c478bd9Sstevel@tonic-gate/*
2097c478bd9Sstevel@tonic-gate * lock_clear_splx(lp, s)
2107c478bd9Sstevel@tonic-gate */
2117c478bd9Sstevel@tonic-gate
2127c478bd9Sstevel@tonic-gate	ENTRY(lock_clear_splx)
2137c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)		/* clear lock */
2147c478bd9Sstevel@tonic-gate.lock_clear_splx_lockstat_patch_point:
2157c478bd9Sstevel@tonic-gate	jmp	0f
2167c478bd9Sstevel@tonic-gate0:
2177c478bd9Sstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
2187c478bd9Sstevel@tonic-gate	jmp	splx			/* let splx do its thing */
2197c478bd9Sstevel@tonic-gate.lock_clear_splx_lockstat:
2207c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
2217c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
2227c478bd9Sstevel@tonic-gate	subq	$16, %rsp		/* space to save args across splx */
2237c478bd9Sstevel@tonic-gate	movq	%rdi, 8(%rsp)		/* save lock ptr across splx call */
2247c478bd9Sstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
2257c478bd9Sstevel@tonic-gate	call	splx			/* lower the priority */
2267c478bd9Sstevel@tonic-gate	movq	8(%rsp), %rsi		/* rsi = lock ptr */
2277c478bd9Sstevel@tonic-gate	leave				/* unwind stack */
2287c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
2297c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %edi
2307c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
2317c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear_splx)
2327c478bd9Sstevel@tonic-gate
233ae115bc7Smrj#if defined(__GNUC_AS__)
234ae115bc7Smrj#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
235ae115bc7Smrj	(.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
236ae115bc7Smrj
237ae115bc7Smrj#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
238ae115bc7Smrj	(.lock_clear_splx_lockstat_patch_point + 1)
239ae115bc7Smrj#else
240ae115bc7Smrj#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
241ae115bc7Smrj	[.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2]
242ae115bc7Smrj
243ae115bc7Smrj#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
244ae115bc7Smrj	[.lock_clear_splx_lockstat_patch_point + 1]
245ae115bc7Smrj#endif
246ae115bc7Smrj
2477c478bd9Sstevel@tonic-gate/*
2487c478bd9Sstevel@tonic-gate * mutex_enter() and mutex_exit().
2497c478bd9Sstevel@tonic-gate *
2507c478bd9Sstevel@tonic-gate * These routines handle the simple cases of mutex_enter() (adaptive
2517c478bd9Sstevel@tonic-gate * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
2527c478bd9Sstevel@tonic-gate * If anything complicated is going on we punt to mutex_vector_enter().
2537c478bd9Sstevel@tonic-gate *
2547c478bd9Sstevel@tonic-gate * mutex_tryenter() is similar to mutex_enter() but returns zero if
2557c478bd9Sstevel@tonic-gate * the lock cannot be acquired, nonzero on success.
2567c478bd9Sstevel@tonic-gate *
2577c478bd9Sstevel@tonic-gate * If mutex_exit() gets preempted in the window between checking waiters
2587c478bd9Sstevel@tonic-gate * and clearing the lock, we can miss wakeups.  Disabling preemption
2597c478bd9Sstevel@tonic-gate * in the mutex code is prohibitively expensive, so instead we detect
2607c478bd9Sstevel@tonic-gate * mutex preemption by examining the trapped PC in the interrupt path.
2617c478bd9Sstevel@tonic-gate * If we interrupt a thread in mutex_exit() that has not yet cleared
2627c478bd9Sstevel@tonic-gate * the lock, cmnint() resets its PC back to the beginning of
2637c478bd9Sstevel@tonic-gate * mutex_exit() so it will check again for waiters when it resumes.
2647c478bd9Sstevel@tonic-gate *
2657c478bd9Sstevel@tonic-gate * The lockstat code below is activated when the lockstat driver
2667c478bd9Sstevel@tonic-gate * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
2677c478bd9Sstevel@tonic-gate * Note that we don't need to test lockstat_event_mask here -- we won't
2687c478bd9Sstevel@tonic-gate * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
2697c478bd9Sstevel@tonic-gate */
2707c478bd9Sstevel@tonic-gate
2717c478bd9Sstevel@tonic-gate	ENTRY_NP(mutex_enter)
2727c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
2737c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
2747c478bd9Sstevel@tonic-gate	lock
2757c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
2767c478bd9Sstevel@tonic-gate	jnz	mutex_vector_enter
2777c478bd9Sstevel@tonic-gate.mutex_enter_lockstat_patch_point:
278ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
279ee88d2b9Skchow.mutex_enter_6323525_patch_point:
280ee88d2b9Skchow	ret					/* nop space for lfence */
281ee88d2b9Skchow	nop
282ee88d2b9Skchow	nop
2836a0b1217SPatrick Mooney.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
284ee88d2b9Skchow	nop
285ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
2867c478bd9Sstevel@tonic-gate	ret
287ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
2887c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi
2897c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
2907c478bd9Sstevel@tonic-gate/*
2917c478bd9Sstevel@tonic-gate * expects %rdx=thread, %rsi=lock, %edi=lockstat event
2927c478bd9Sstevel@tonic-gate */
2937c478bd9Sstevel@tonic-gate	ALTENTRY(lockstat_wrapper)
2947c478bd9Sstevel@tonic-gate	incb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat++ */
2957c478bd9Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
2967c478bd9Sstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
2977c478bd9Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
2987c478bd9Sstevel@tonic-gate	jz	1f
2997c478bd9Sstevel@tonic-gate	pushq	%rbp				/* align stack properly */
3007c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
3017c478bd9Sstevel@tonic-gate	movl	%eax, %edi
30265f20420SRobert Mustacchi	movq	lockstat_probe, %rax
30365f20420SRobert Mustacchi	INDIRECT_CALL_REG(rax)
3047c478bd9Sstevel@tonic-gate	leave					/* unwind stack */
3057c478bd9Sstevel@tonic-gate1:
3067c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
3077c478bd9Sstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
3087c478bd9Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
3097c478bd9Sstevel@tonic-gate	ret
3107c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper)
3117c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_enter)
3127c478bd9Sstevel@tonic-gate
3137c478bd9Sstevel@tonic-gate/*
3147c478bd9Sstevel@tonic-gate * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
3157c478bd9Sstevel@tonic-gate */
3167c478bd9Sstevel@tonic-gate	ENTRY(lockstat_wrapper_arg)
3177c478bd9Sstevel@tonic-gate	incb	T_LOCKSTAT(%rcx)		/* curthread->t_lockstat++ */
3187c478bd9Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
3197c478bd9Sstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
3207c478bd9Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
3217c478bd9Sstevel@tonic-gate	jz	1f
3227c478bd9Sstevel@tonic-gate	pushq	%rbp				/* align stack properly */
3237c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
3247c478bd9Sstevel@tonic-gate	movl	%eax, %edi
32565f20420SRobert Mustacchi	movq	lockstat_probe, %rax
32665f20420SRobert Mustacchi	INDIRECT_CALL_REG(rax)
3277c478bd9Sstevel@tonic-gate	leave					/* unwind stack */
3287c478bd9Sstevel@tonic-gate1:
3297c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
3307c478bd9Sstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
3317c478bd9Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
3327c478bd9Sstevel@tonic-gate	ret
3337c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper_arg)
3347c478bd9Sstevel@tonic-gate
3357c478bd9Sstevel@tonic-gate
3367c478bd9Sstevel@tonic-gate	ENTRY(mutex_tryenter)
3377c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
3387c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
3397c478bd9Sstevel@tonic-gate	lock
3407c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
3417c478bd9Sstevel@tonic-gate	jnz	mutex_vector_tryenter
3427c478bd9Sstevel@tonic-gate	not	%eax				/* return success (nonzero) */
343ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
344ee88d2b9Skchow.mutex_tryenter_lockstat_patch_point:
345ee88d2b9Skchow.mutex_tryenter_6323525_patch_point:
346ee88d2b9Skchow	ret					/* nop space for lfence */
347ee88d2b9Skchow	nop
348ee88d2b9Skchow	nop
349ee88d2b9Skchow.mutex_tryenter_lockstat_6323525_patch_point:	/* new patch point if lfence */
350ee88d2b9Skchow	nop
351ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
3527c478bd9Sstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
3537c478bd9Sstevel@tonic-gate	ret
354ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
3557c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi
3567c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
3577c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
3587c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_tryenter)
3597c478bd9Sstevel@tonic-gate
3607c478bd9Sstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
3617c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
3627c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
3637c478bd9Sstevel@tonic-gate	lock
3647c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
3657c478bd9Sstevel@tonic-gate	jnz	0f
3667c478bd9Sstevel@tonic-gate	not	%eax				/* return success (nonzero) */
367ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
368ee88d2b9Skchow.mutex_atryenter_6323525_patch_point:
369ee88d2b9Skchow	ret					/* nop space for lfence */
370ee88d2b9Skchow	nop
371ee88d2b9Skchow	nop
372ee88d2b9Skchow	nop
373ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
3747c478bd9Sstevel@tonic-gate	ret
375ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
3767c478bd9Sstevel@tonic-gate0:
3777c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* return failure */
3787c478bd9Sstevel@tonic-gate	ret
3797c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
3807c478bd9Sstevel@tonic-gate
381575a7426Spt	.globl	mutex_owner_running_critical_start
382575a7426Spt
383575a7426Spt	ENTRY(mutex_owner_running)
384575a7426Sptmutex_owner_running_critical_start:
385575a7426Spt	movq	(%rdi), %r11		/* get owner field */
386575a7426Spt	andq	$MUTEX_THREAD, %r11	/* remove waiters bit */
387575a7426Spt	cmpq	$0, %r11		/* if free, skip */
388575a7426Spt	je	1f			/* go return 0 */
389575a7426Spt	movq	T_CPU(%r11), %r8	/* get owner->t_cpu */
390575a7426Spt	movq	CPU_THREAD(%r8), %r9	/* get t_cpu->cpu_thread */
391575a7426Spt.mutex_owner_running_critical_end:
392575a7426Spt	cmpq	%r11, %r9	/* owner == running thread? */
393575a7426Spt	je	2f		/* yes, go return cpu */
394575a7426Spt1:
395575a7426Spt	xorq	%rax, %rax	/* return 0 */
396575a7426Spt	ret
397575a7426Spt2:
398575a7426Spt	movq	%r8, %rax		/* return cpu */
399575a7426Spt	ret
400575a7426Spt	SET_SIZE(mutex_owner_running)
401575a7426Spt
402575a7426Spt	.globl	mutex_owner_running_critical_size
403575a7426Spt	.type	mutex_owner_running_critical_size, @object
404575a7426Spt	.align	CPTRSIZE
405575a7426Sptmutex_owner_running_critical_size:
406575a7426Spt	.quad	.mutex_owner_running_critical_end - mutex_owner_running_critical_start
407575a7426Spt	SET_SIZE(mutex_owner_running_critical_size)
408575a7426Spt
409ee88d2b9Skchow	.globl	mutex_exit_critical_start
4107c478bd9Sstevel@tonic-gate
4117c478bd9Sstevel@tonic-gate	ENTRY(mutex_exit)
4127c478bd9Sstevel@tonic-gatemutex_exit_critical_start:		/* If interrupted, restart here */
4137c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx
4147c478bd9Sstevel@tonic-gate	cmpq	%rdx, (%rdi)
4157c478bd9Sstevel@tonic-gate	jne	mutex_vector_exit		/* wrong type or wrong owner */
4167c478bd9Sstevel@tonic-gate	movq	$0, (%rdi)			/* clear owner AND lock */
4177c478bd9Sstevel@tonic-gate.mutex_exit_critical_end:
4187c478bd9Sstevel@tonic-gate.mutex_exit_lockstat_patch_point:
4197c478bd9Sstevel@tonic-gate	ret
4207c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi
4217c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_EXIT_RELEASE, %edi
4227c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
4237c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit)
4247c478bd9Sstevel@tonic-gate
4257c478bd9Sstevel@tonic-gate	.globl	mutex_exit_critical_size
4267c478bd9Sstevel@tonic-gate	.type	mutex_exit_critical_size, @object
4277c478bd9Sstevel@tonic-gate	.align	CPTRSIZE
4287c478bd9Sstevel@tonic-gatemutex_exit_critical_size:
4297c478bd9Sstevel@tonic-gate	.quad	.mutex_exit_critical_end - mutex_exit_critical_start
4307c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit_critical_size)
4317c478bd9Sstevel@tonic-gate
4327c478bd9Sstevel@tonic-gate/*
4337c478bd9Sstevel@tonic-gate * rw_enter() and rw_exit().
4347c478bd9Sstevel@tonic-gate *
4357c478bd9Sstevel@tonic-gate * These routines handle the simple cases of rw_enter (write-locking an unheld
4367c478bd9Sstevel@tonic-gate * lock or read-locking a lock that's neither write-locked nor write-wanted)
4377c478bd9Sstevel@tonic-gate * and rw_exit (no waiters or not the last reader).  If anything complicated
4387c478bd9Sstevel@tonic-gate * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
4397c478bd9Sstevel@tonic-gate */
4407c478bd9Sstevel@tonic-gate
4417c478bd9Sstevel@tonic-gate	ENTRY(rw_enter)
4427c478bd9Sstevel@tonic-gate	cmpl	$RW_WRITER, %esi
4437c478bd9Sstevel@tonic-gate	je	.rw_write_enter
4447c478bd9Sstevel@tonic-gate	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
4457c478bd9Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
4467c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
4477c478bd9Sstevel@tonic-gate	leaq	RW_READ_LOCK(%rax), %rdx	/* rdx = new rw_wwwh value */
4487c478bd9Sstevel@tonic-gate	lock
4497c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to grab read lock */
4507c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
4517c478bd9Sstevel@tonic-gate.rw_read_enter_lockstat_patch_point:
4527c478bd9Sstevel@tonic-gate	ret
4537c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
4547c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
4557c478bd9Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %edi
4567c478bd9Sstevel@tonic-gate	movl	$RW_READER, %edx
4577c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
4587c478bd9Sstevel@tonic-gate.rw_write_enter:
4596a0b1217SPatrick Mooney	movq	%gs:CPU_THREAD, %rdx
4607c478bd9Sstevel@tonic-gate	orq	$RW_WRITE_LOCKED, %rdx		/* rdx = write-locked value */
4617c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = unheld value */
4627c478bd9Sstevel@tonic-gate	lock
4637c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to grab write lock */
4647c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
465ee88d2b9Skchow
466ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
467ee88d2b9Skchow.rw_write_enter_lockstat_patch_point:
468ee88d2b9Skchow.rw_write_enter_6323525_patch_point:
469ee88d2b9Skchow	ret
470ee88d2b9Skchow	nop
471ee88d2b9Skchow	nop
472ee88d2b9Skchow.rw_write_enter_lockstat_6323525_patch_point:
473ee88d2b9Skchow	nop
474ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
4757c478bd9Sstevel@tonic-gate.rw_write_enter_lockstat_patch_point:
4767c478bd9Sstevel@tonic-gate	ret
477ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
478ee88d2b9Skchow
4797c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
4807c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
4817c478bd9Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %edi
4827c478bd9Sstevel@tonic-gate	movl	$RW_WRITER, %edx
4837c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
4847c478bd9Sstevel@tonic-gate	SET_SIZE(rw_enter)
4857c478bd9Sstevel@tonic-gate
4867c478bd9Sstevel@tonic-gate	ENTRY(rw_exit)
4877c478bd9Sstevel@tonic-gate	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
4887c478bd9Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
4897c478bd9Sstevel@tonic-gate	jne	.rw_not_single_reader
4907c478bd9Sstevel@tonic-gate	xorl	%edx, %edx			/* rdx = new value (unheld) */
4917c478bd9Sstevel@tonic-gate.rw_read_exit:
4927c478bd9Sstevel@tonic-gate	lock
4937c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
4947c478bd9Sstevel@tonic-gate	jnz	rw_exit_wakeup
4957c478bd9Sstevel@tonic-gate.rw_read_exit_lockstat_patch_point:
4967c478bd9Sstevel@tonic-gate	ret
497a24f6116SJohn Levon	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
4987c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
4997c478bd9Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %edi
5007c478bd9Sstevel@tonic-gate	movl	$RW_READER, %edx
5017c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
5027c478bd9Sstevel@tonic-gate.rw_not_single_reader:
5037c478bd9Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
5047c478bd9Sstevel@tonic-gate	jnz	.rw_write_exit
5057c478bd9Sstevel@tonic-gate	leaq	-RW_READ_LOCK(%rax), %rdx	/* rdx = new value */
5067c478bd9Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %edx
5077c478bd9Sstevel@tonic-gate	jge	.rw_read_exit		/* not last reader, safe to drop */
5087c478bd9Sstevel@tonic-gate	jmp	rw_exit_wakeup			/* last reader with waiters */
5097c478bd9Sstevel@tonic-gate.rw_write_exit:
5107c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax		/* rax = thread ptr */
5117c478bd9Sstevel@tonic-gate	xorl	%edx, %edx			/* rdx = new value (unheld) */
5127c478bd9Sstevel@tonic-gate	orq	$RW_WRITE_LOCKED, %rax		/* eax = write-locked value */
5137c478bd9Sstevel@tonic-gate	lock
5147c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
5157c478bd9Sstevel@tonic-gate	jnz	rw_exit_wakeup
5167c478bd9Sstevel@tonic-gate.rw_write_exit_lockstat_patch_point:
5177c478bd9Sstevel@tonic-gate	ret
5187c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
5197c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi - lock ptr */
5207c478bd9Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %edi
5217c478bd9Sstevel@tonic-gate	movl	$RW_WRITER, %edx
5227c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
5237c478bd9Sstevel@tonic-gate	SET_SIZE(rw_exit)
5247c478bd9Sstevel@tonic-gate
525ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
526ee88d2b9Skchow
527ee88d2b9Skchow/*
528ee88d2b9Skchow * If it is necessary to patch the lock enter routines with the lfence
529ee88d2b9Skchow * workaround, workaround_6323525_patched is set to a non-zero value so that
530ee88d2b9Skchow * the lockstat_hat_patch routine can patch to the new location of the 'ret'
531ee88d2b9Skchow * instruction.
532ee88d2b9Skchow */
533ee88d2b9Skchow	DGDEF3(workaround_6323525_patched, 4, 4)
534ee88d2b9Skchow	.long	0
535ee88d2b9Skchow
536ee88d2b9Skchow#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size)	\
537ee88d2b9Skchow	movq	$size, %rbx;			\
538ee88d2b9Skchow	movq	$dstaddr, %r13;			\
539ee88d2b9Skchow	addq	%rbx, %r13;			\
540ee88d2b9Skchow	movq	$srcaddr, %r12;			\
541ee88d2b9Skchow	addq	%rbx, %r12;			\
542ee88d2b9Skchow0:						\
543ee88d2b9Skchow	decq	%r13;				\
544ee88d2b9Skchow	decq	%r12;				\
545ee88d2b9Skchow	movzbl	(%r12), %esi;			\
546ee88d2b9Skchow	movq	$1, %rdx;			\
547ee88d2b9Skchow	movq	%r13, %rdi;			\
548ee88d2b9Skchow	call	hot_patch_kernel_text;		\
549ee88d2b9Skchow	decq	%rbx;				\
550ee88d2b9Skchow	testq	%rbx, %rbx;			\
551ee88d2b9Skchow	jg	0b;
552ee88d2b9Skchow
553ee88d2b9Skchow/*
554ee88d2b9Skchow * patch_workaround_6323525: provide workaround for 6323525
555ee88d2b9Skchow *
556ee88d2b9Skchow * The workaround is to place a fencing instruction (lfence) between the
557ee88d2b9Skchow * mutex operation and the subsequent read-modify-write instruction.
558ee88d2b9Skchow *
559ee88d2b9Skchow * This routine hot patches the lfence instruction on top of the space
560ee88d2b9Skchow * reserved by nops in the lock enter routines.
561ee88d2b9Skchow */
562ee88d2b9Skchow	ENTRY_NP(patch_workaround_6323525)
563ee88d2b9Skchow	pushq	%rbp
564ee88d2b9Skchow	movq	%rsp, %rbp
565ee88d2b9Skchow	pushq	%r12
566ee88d2b9Skchow	pushq	%r13
567ee88d2b9Skchow	pushq	%rbx
568ee88d2b9Skchow
569ee88d2b9Skchow	/*
570ee88d2b9Skchow	 * lockstat_hot_patch() to use the alternate lockstat workaround
571ee88d2b9Skchow	 * 6323525 patch points (points past the lfence instruction to the
572ee88d2b9Skchow	 * new ret) when workaround_6323525_patched is set.
573ee88d2b9Skchow	 */
574ee88d2b9Skchow	movl	$1, workaround_6323525_patched
575ee88d2b9Skchow
576ee88d2b9Skchow	/*
577ee88d2b9Skchow	 * patch ret/nop/nop/nop to lfence/ret at the end of the lock enter
578ee88d2b9Skchow	 * routines. The 4 bytes are patched in reverse order so that the
579ee88d2b9Skchow	 * the existing ret is overwritten last. This provides lock enter
580ee88d2b9Skchow	 * sanity during the intermediate patching stages.
581ee88d2b9Skchow	 */
582ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
583ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
584ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
585ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
586ee88d2b9Skchow
587ee88d2b9Skchow	popq	%rbx
588ee88d2b9Skchow	popq	%r13
589ee88d2b9Skchow	popq	%r12
590ee88d2b9Skchow	movq	%rbp, %rsp
591ee88d2b9Skchow	popq	%rbp
592ee88d2b9Skchow	ret
593ee88d2b9Skchow_lfence_insn:
594ee88d2b9Skchow	lfence
595ee88d2b9Skchow	ret
596ee88d2b9Skchow	SET_SIZE(patch_workaround_6323525)
597ee88d2b9Skchow
598ee88d2b9Skchow
599ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
600ee88d2b9Skchow
601ee88d2b9Skchow
6027c478bd9Sstevel@tonic-gate#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
6037c478bd9Sstevel@tonic-gate	movq	$normal_instr, %rsi;		\
6047c478bd9Sstevel@tonic-gate	movq	$active_instr, %rdi;		\
6057c478bd9Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax;	\
6066a0b1217SPatrick Mooney	movl	_MUL(event, DTRACE_IDSIZE)(%rax), %eax;	\
6077c478bd9Sstevel@tonic-gate	testl	%eax, %eax;			\
6087c478bd9Sstevel@tonic-gate	jz	9f;				\
6097c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi;			\
6107c478bd9Sstevel@tonic-gate9:						\
6117c478bd9Sstevel@tonic-gate	movq	$len, %rdx;			\
6127c478bd9Sstevel@tonic-gate	movq	$addr, %rdi;			\
6137c478bd9Sstevel@tonic-gate	call	hot_patch_kernel_text
6147c478bd9Sstevel@tonic-gate
6157c478bd9Sstevel@tonic-gate	ENTRY(lockstat_hot_patch)
6167c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
6177c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
618ee88d2b9Skchow
619ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
620ee88d2b9Skchow	cmpl	$0, workaround_6323525_patched
621ee88d2b9Skchow	je	1f
622ee88d2b9Skchow	HOT_PATCH(.mutex_enter_lockstat_6323525_patch_point,
623ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
624ee88d2b9Skchow	HOT_PATCH(.mutex_tryenter_lockstat_6323525_patch_point,
625ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
626ee88d2b9Skchow	HOT_PATCH(.rw_write_enter_lockstat_6323525_patch_point,
627ee88d2b9Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
628ee88d2b9Skchow	jmp	2f
629ee88d2b9Skchow1:
630ee88d2b9Skchow	HOT_PATCH(.mutex_enter_lockstat_patch_point,
631ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
632ee88d2b9Skchow	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
633ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
634ee88d2b9Skchow	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
635ee88d2b9Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
636ee88d2b9Skchow2:
637ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
6387c478bd9Sstevel@tonic-gate	HOT_PATCH(.mutex_enter_lockstat_patch_point,
6397c478bd9Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
6407c478bd9Sstevel@tonic-gate	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
6417c478bd9Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
6427c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
6437c478bd9Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
644ee88d2b9Skchow#endif	/* !OPTERON_WORKAROUND_6323525 */
645ee88d2b9Skchow	HOT_PATCH(.mutex_exit_lockstat_patch_point,
646ee88d2b9Skchow		LS_MUTEX_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
6477c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
6487c478bd9Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
6497c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_write_exit_lockstat_patch_point,
6507c478bd9Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
6517c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_read_exit_lockstat_patch_point,
6527c478bd9Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
6537c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_set_lockstat_patch_point,
6547c478bd9Sstevel@tonic-gate		LS_LOCK_SET_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
6557c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_try_lockstat_patch_point,
6567c478bd9Sstevel@tonic-gate		LS_LOCK_TRY_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
6577c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_clear_lockstat_patch_point,
6587c478bd9Sstevel@tonic-gate		LS_LOCK_CLEAR_RELEASE, NOP_INSTR, RET_INSTR, 1)
6597c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
6607c478bd9Sstevel@tonic-gate		LS_LOCK_SET_SPL_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
6617c478bd9Sstevel@tonic-gate
6627c478bd9Sstevel@tonic-gate	HOT_PATCH(LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT,
6637c478bd9Sstevel@tonic-gate		LS_LOCK_CLEAR_SPLX_RELEASE,
6647c478bd9Sstevel@tonic-gate		LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL, 0, 1);
6657c478bd9Sstevel@tonic-gate	leave			/* unwind stack */
6667c478bd9Sstevel@tonic-gate	ret
6677c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_hot_patch)
6687c478bd9Sstevel@tonic-gate
6697c478bd9Sstevel@tonic-gate	ENTRY(membar_enter)
6707c478bd9Sstevel@tonic-gate	ALTENTRY(membar_exit)
6712850d85bSmv	ALTENTRY(membar_sync)
6727c478bd9Sstevel@tonic-gate	mfence			/* lighter weight than lock; xorq $0,(%rsp) */
6737c478bd9Sstevel@tonic-gate	ret
6742850d85bSmv	SET_SIZE(membar_sync)
6757c478bd9Sstevel@tonic-gate	SET_SIZE(membar_exit)
6767c478bd9Sstevel@tonic-gate	SET_SIZE(membar_enter)
6777c478bd9Sstevel@tonic-gate
6787c478bd9Sstevel@tonic-gate	ENTRY(membar_producer)
6797c478bd9Sstevel@tonic-gate	sfence
6807c478bd9Sstevel@tonic-gate	ret
6817c478bd9Sstevel@tonic-gate	SET_SIZE(membar_producer)
6827c478bd9Sstevel@tonic-gate
6837c478bd9Sstevel@tonic-gate	ENTRY(membar_consumer)
6847c478bd9Sstevel@tonic-gate	lfence
6857c478bd9Sstevel@tonic-gate	ret
6867c478bd9Sstevel@tonic-gate	SET_SIZE(membar_consumer)
6877c478bd9Sstevel@tonic-gate
6887c478bd9Sstevel@tonic-gate/*
6897c478bd9Sstevel@tonic-gate * thread_onproc()
6907c478bd9Sstevel@tonic-gate * Set thread in onproc state for the specified CPU.
6917c478bd9Sstevel@tonic-gate * Also set the thread lock pointer to the CPU's onproc lock.
6927c478bd9Sstevel@tonic-gate * Since the new lock isn't held, the store ordering is important.
6937c478bd9Sstevel@tonic-gate * If not done in assembler, the compiler could reorder the stores.
6947c478bd9Sstevel@tonic-gate */
6957c478bd9Sstevel@tonic-gate
6967c478bd9Sstevel@tonic-gate	ENTRY(thread_onproc)
6977c478bd9Sstevel@tonic-gate	addq	$CPU_THREAD_LOCK, %rsi	/* pointer to disp_lock while running */
6987c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%rdi)	/* set state to TS_ONPROC */
6997c478bd9Sstevel@tonic-gate	movq	%rsi, T_LOCKP(%rdi)	/* store new lock pointer */
7007c478bd9Sstevel@tonic-gate	ret
7017c478bd9Sstevel@tonic-gate	SET_SIZE(thread_onproc)
7027c478bd9Sstevel@tonic-gate
703575a7426Spt/*
704575a7426Spt * mutex_delay_default(void)
705575a7426Spt * Spins for approx a few hundred processor cycles and returns to caller.
706575a7426Spt */
707575a7426Spt
708575a7426Spt	ENTRY(mutex_delay_default)
709575a7426Spt	movq	$92,%r11
710575a7426Spt0:	decq	%r11
711575a7426Spt	jg	0b
712575a7426Spt	ret
713575a7426Spt	SET_SIZE(mutex_delay_default)
714575a7426Spt
715