17c478bdstevel@tonic-gate/*
27c478bdstevel@tonic-gate * CDDL HEADER START
37c478bdstevel@tonic-gate *
47c478bdstevel@tonic-gate * The contents of this file are subject to the terms of the
5ee88d2bkchow * Common Development and Distribution License (the "License").
6ee88d2bkchow * You may not use this file except in compliance with the License.
77c478bdstevel@tonic-gate *
87c478bdstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bdstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bdstevel@tonic-gate * See the License for the specific language governing permissions
117c478bdstevel@tonic-gate * and limitations under the License.
127c478bdstevel@tonic-gate *
137c478bdstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bdstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bdstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bdstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bdstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bdstevel@tonic-gate *
197c478bdstevel@tonic-gate * CDDL HEADER END
207c478bdstevel@tonic-gate */
217c478bdstevel@tonic-gate/*
222850d85mv * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bdstevel@tonic-gate * Use is subject to license terms.
247c478bdstevel@tonic-gate */
257c478bdstevel@tonic-gate
269b0bb79John Levon/*
279b0bb79John Levon * Copyright 2019 Joyent, Inc.
289b0bb79John Levon */
299b0bb79John Levon
307c478bdstevel@tonic-gate#include "assym.h"
317c478bdstevel@tonic-gate
32575a742pt#include <sys/mutex_impl.h>
337c478bdstevel@tonic-gate#include <sys/asm_linkage.h>
347c478bdstevel@tonic-gate#include <sys/asm_misc.h>
357c478bdstevel@tonic-gate#include <sys/regset.h>
367c478bdstevel@tonic-gate#include <sys/rwlock_impl.h>
377c478bdstevel@tonic-gate#include <sys/lockstat.h>
387c478bdstevel@tonic-gate
397c478bdstevel@tonic-gate/*
407c478bdstevel@tonic-gate * lock_try(lp), ulock_try(lp)
417c478bdstevel@tonic-gate *	- returns non-zero on success.
427c478bdstevel@tonic-gate *	- doesn't block interrupts so don't use this to spin on a lock.
437c478bdstevel@tonic-gate *
447c478bdstevel@tonic-gate * ulock_try() is for a lock in the user address space.
457c478bdstevel@tonic-gate */
467c478bdstevel@tonic-gate
477c478bdstevel@tonic-gate	.globl	kernelbase
487c478bdstevel@tonic-gate
497c478bdstevel@tonic-gate	ENTRY(lock_try)
507c478bdstevel@tonic-gate	movb	$-1, %dl
517c478bdstevel@tonic-gate	movzbq	%dl, %rax
527c478bdstevel@tonic-gate	xchgb	%dl, (%rdi)
537c478bdstevel@tonic-gate	xorb	%dl, %al
547c478bdstevel@tonic-gate.lock_try_lockstat_patch_point:
557c478bdstevel@tonic-gate	ret
567c478bdstevel@tonic-gate	testb	%al, %al
577c478bdstevel@tonic-gate	jnz	0f
587c478bdstevel@tonic-gate	ret
597c478bdstevel@tonic-gate0:
607c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
617c478bdstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
627c478bdstevel@tonic-gate	movl	$LS_LOCK_TRY_ACQUIRE, %edi /* edi = event */
637c478bdstevel@tonic-gate	jmp	lockstat_wrapper
647c478bdstevel@tonic-gate	SET_SIZE(lock_try)
657c478bdstevel@tonic-gate
667c478bdstevel@tonic-gate	ENTRY(lock_spin_try)
677c478bdstevel@tonic-gate	movb	$-1, %dl
687c478bdstevel@tonic-gate	movzbq	%dl, %rax
697c478bdstevel@tonic-gate	xchgb	%dl, (%rdi)
707c478bdstevel@tonic-gate	xorb	%dl, %al
717c478bdstevel@tonic-gate	ret
727c478bdstevel@tonic-gate	SET_SIZE(lock_spin_try)
737c478bdstevel@tonic-gate
747c478bdstevel@tonic-gate	ENTRY(ulock_try)
757c478bdstevel@tonic-gate#ifdef DEBUG
767c478bdstevel@tonic-gate	movq	kernelbase(%rip), %rax
777c478bdstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
787c478bdstevel@tonic-gate	jb	ulock_pass		/*	uaddr < kernelbase, proceed */
797c478bdstevel@tonic-gate
807c478bdstevel@tonic-gate	movq	%rdi, %r12		/* preserve lock ptr for debugging */
817c478bdstevel@tonic-gate	leaq	.ulock_panic_msg(%rip), %rdi
827c478bdstevel@tonic-gate	pushq	%rbp			/* align stack properly */
837c478bdstevel@tonic-gate	movq	%rsp, %rbp
847c478bdstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
857c478bdstevel@tonic-gate	call	panic
867c478bdstevel@tonic-gate
877c478bdstevel@tonic-gate#endif /* DEBUG */
887c478bdstevel@tonic-gate
897c478bdstevel@tonic-gateulock_pass:
907c478bdstevel@tonic-gate	movl	$1, %eax
917c478bdstevel@tonic-gate	xchgb	%al, (%rdi)
927c478bdstevel@tonic-gate	xorb	$1, %al
937c478bdstevel@tonic-gate	ret
947c478bdstevel@tonic-gate	SET_SIZE(ulock_try)
957c478bdstevel@tonic-gate
967c478bdstevel@tonic-gate#ifdef DEBUG
977c478bdstevel@tonic-gate	.data
987c478bdstevel@tonic-gate.ulock_panic_msg:
997c478bdstevel@tonic-gate	.string "ulock_try: Argument is above kernelbase"
1007c478bdstevel@tonic-gate	.text
1017c478bdstevel@tonic-gate#endif	/* DEBUG */
1027c478bdstevel@tonic-gate
1037c478bdstevel@tonic-gate/*
1047c478bdstevel@tonic-gate * lock_clear(lp)
1057c478bdstevel@tonic-gate *	- unlock lock without changing interrupt priority level.
1067c478bdstevel@tonic-gate */
1077c478bdstevel@tonic-gate
1087c478bdstevel@tonic-gate	ENTRY(lock_clear)
1097c478bdstevel@tonic-gate	movb	$0, (%rdi)
1107c478bdstevel@tonic-gate.lock_clear_lockstat_patch_point:
1117c478bdstevel@tonic-gate	ret
1127c478bdstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock addr */
1137c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread addr */
1147c478bdstevel@tonic-gate	movl	$LS_LOCK_CLEAR_RELEASE, %edi	/* edi = event */
1157c478bdstevel@tonic-gate	jmp	lockstat_wrapper
1167c478bdstevel@tonic-gate	SET_SIZE(lock_clear)
1177c478bdstevel@tonic-gate
1187c478bdstevel@tonic-gate	ENTRY(ulock_clear)
1197c478bdstevel@tonic-gate#ifdef DEBUG
1207c478bdstevel@tonic-gate	movq	kernelbase(%rip), %rcx
1217c478bdstevel@tonic-gate	cmpq	%rcx, %rdi		/* test uaddr < kernelbase */
1227c478bdstevel@tonic-gate	jb	ulock_clr		/*	 uaddr < kernelbase, proceed */
1237c478bdstevel@tonic-gate
1247c478bdstevel@tonic-gate	leaq	.ulock_clear_msg(%rip), %rdi
1257c478bdstevel@tonic-gate	pushq	%rbp			/* align stack properly */
1267c478bdstevel@tonic-gate	movq	%rsp, %rbp
1277c478bdstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
1287c478bdstevel@tonic-gate	call	panic
1297c478bdstevel@tonic-gate#endif
1307c478bdstevel@tonic-gate
1317c478bdstevel@tonic-gateulock_clr:
1327c478bdstevel@tonic-gate	movb	$0, (%rdi)
1337c478bdstevel@tonic-gate	ret
1347c478bdstevel@tonic-gate	SET_SIZE(ulock_clear)
1357c478bdstevel@tonic-gate
1367c478bdstevel@tonic-gate#ifdef DEBUG
1377c478bdstevel@tonic-gate	.data
1387c478bdstevel@tonic-gate.ulock_clear_msg:
1397c478bdstevel@tonic-gate	.string "ulock_clear: Argument is above kernelbase"
1407c478bdstevel@tonic-gate	.text
1417c478bdstevel@tonic-gate#endif	/* DEBUG */
1427c478bdstevel@tonic-gate
1437c478bdstevel@tonic-gate
1447c478bdstevel@tonic-gate/*
1457c478bdstevel@tonic-gate * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
1467c478bdstevel@tonic-gate * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
1477c478bdstevel@tonic-gate */
1487c478bdstevel@tonic-gate
1497c478bdstevel@tonic-gate	ENTRY(lock_set_spl)
1507c478bdstevel@tonic-gate	pushq	%rbp
1517c478bdstevel@tonic-gate	movq	%rsp, %rbp
1527c478bdstevel@tonic-gate	subq	$32, %rsp
1537c478bdstevel@tonic-gate	movl	%esi, 8(%rsp)		/* save priority level */
1547c478bdstevel@tonic-gate	movq	%rdx, 16(%rsp)		/* save old pil ptr */
1557c478bdstevel@tonic-gate	movq	%rdi, 24(%rsp)		/* save lock pointer */
1567c478bdstevel@tonic-gate	movl	%esi, %edi		/* pass priority level */
1577c478bdstevel@tonic-gate	call	splr			/* raise priority level */
1587c478bdstevel@tonic-gate	movq	24(%rsp), %rdi		/* rdi = lock addr */
1597c478bdstevel@tonic-gate	movb	$-1, %dl
1607c478bdstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
1617c478bdstevel@tonic-gate	testb	%dl, %dl		/* did we get the lock? ... */
1627c478bdstevel@tonic-gate	jnz	.lss_miss		/* ... no, go to C for the hard case */
1637c478bdstevel@tonic-gate	movq	16(%rsp), %rdx		/* rdx = old pil addr */
1647c478bdstevel@tonic-gate	movw	%ax, (%rdx)		/* store old pil */
1657c478bdstevel@tonic-gate	leave
1667c478bdstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
1677c478bdstevel@tonic-gate	ret
1687c478bdstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
1697c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
1707c478bdstevel@tonic-gate	movl	$LS_LOCK_SET_SPL_ACQUIRE, %edi
1717c478bdstevel@tonic-gate	jmp	lockstat_wrapper
1727c478bdstevel@tonic-gate.lss_miss:
1737c478bdstevel@tonic-gate	movl	8(%rsp), %esi		/* new_pil */
1747c478bdstevel@tonic-gate	movq	16(%rsp), %rdx		/* old_pil_addr */
1757c478bdstevel@tonic-gate	movl	%eax, %ecx		/* original pil */
1767c478bdstevel@tonic-gate	leave				/* unwind stack */
1777c478bdstevel@tonic-gate	jmp	lock_set_spl_spin
1787c478bdstevel@tonic-gate	SET_SIZE(lock_set_spl)
1797c478bdstevel@tonic-gate
1807c478bdstevel@tonic-gate/*
1817c478bdstevel@tonic-gate * void
1827c478bdstevel@tonic-gate * lock_init(lp)
1837c478bdstevel@tonic-gate */
1847c478bdstevel@tonic-gate
1857c478bdstevel@tonic-gate	ENTRY(lock_init)
1867c478bdstevel@tonic-gate	movb	$0, (%rdi)
1877c478bdstevel@tonic-gate	ret
1887c478bdstevel@tonic-gate	SET_SIZE(lock_init)
1897c478bdstevel@tonic-gate
1907c478bdstevel@tonic-gate/*
1917c478bdstevel@tonic-gate * void
1927c478bdstevel@tonic-gate * lock_set(lp)
1937c478bdstevel@tonic-gate */
1947c478bdstevel@tonic-gate
1957c478bdstevel@tonic-gate	ENTRY(lock_set)
1967c478bdstevel@tonic-gate	movb	$-1, %dl
1977c478bdstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
1987c478bdstevel@tonic-gate	testb	%dl, %dl		/* did we get it? */
1997c478bdstevel@tonic-gate	jnz	lock_set_spin		/* no, go to C for the hard case */
2007c478bdstevel@tonic-gate.lock_set_lockstat_patch_point:
2017c478bdstevel@tonic-gate	ret
2027c478bdstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
2037c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
2047c478bdstevel@tonic-gate	movl	$LS_LOCK_SET_ACQUIRE, %edi
2057c478bdstevel@tonic-gate	jmp	lockstat_wrapper
2067c478bdstevel@tonic-gate	SET_SIZE(lock_set)
2077c478bdstevel@tonic-gate
2087c478bdstevel@tonic-gate/*
2097c478bdstevel@tonic-gate * lock_clear_splx(lp, s)
2107c478bdstevel@tonic-gate */
2117c478bdstevel@tonic-gate
2127c478bdstevel@tonic-gate	ENTRY(lock_clear_splx)
2137c478bdstevel@tonic-gate	movb	$0, (%rdi)		/* clear lock */
2147c478bdstevel@tonic-gate.lock_clear_splx_lockstat_patch_point:
2157c478bdstevel@tonic-gate	jmp	0f
2167c478bdstevel@tonic-gate0:
2177c478bdstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
2187c478bdstevel@tonic-gate	jmp	splx			/* let splx do its thing */
2197c478bdstevel@tonic-gate.lock_clear_splx_lockstat:
2207c478bdstevel@tonic-gate	pushq	%rbp			/* align stack properly */
2217c478bdstevel@tonic-gate	movq	%rsp, %rbp
2227c478bdstevel@tonic-gate	subq	$16, %rsp		/* space to save args across splx */
2237c478bdstevel@tonic-gate	movq	%rdi, 8(%rsp)		/* save lock ptr across splx call */
2247c478bdstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
2257c478bdstevel@tonic-gate	call	splx			/* lower the priority */
2267c478bdstevel@tonic-gate	movq	8(%rsp), %rsi		/* rsi = lock ptr */
2277c478bdstevel@tonic-gate	leave				/* unwind stack */
2287c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
2297c478bdstevel@tonic-gate	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %edi
2307c478bdstevel@tonic-gate	jmp	lockstat_wrapper
2317c478bdstevel@tonic-gate	SET_SIZE(lock_clear_splx)
2327c478bdstevel@tonic-gate
233ae115bcmrj#if defined(__GNUC_AS__)
234ae115bcmrj#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
235ae115bcmrj	(.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
236ae115bcmrj
237ae115bcmrj#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
238ae115bcmrj	(.lock_clear_splx_lockstat_patch_point + 1)
239ae115bcmrj#else
240ae115bcmrj#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
241ae115bcmrj	[.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2]
242ae115bcmrj
243ae115bcmrj#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
244ae115bcmrj	[.lock_clear_splx_lockstat_patch_point + 1]
245ae115bcmrj#endif
246ae115bcmrj
2477c478bdstevel@tonic-gate/*
2487c478bdstevel@tonic-gate * mutex_enter() and mutex_exit().
2497c478bdstevel@tonic-gate *
2507c478bdstevel@tonic-gate * These routines handle the simple cases of mutex_enter() (adaptive
2517c478bdstevel@tonic-gate * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
2527c478bdstevel@tonic-gate * If anything complicated is going on we punt to mutex_vector_enter().
2537c478bdstevel@tonic-gate *
2547c478bdstevel@tonic-gate * mutex_tryenter() is similar to mutex_enter() but returns zero if
2557c478bdstevel@tonic-gate * the lock cannot be acquired, nonzero on success.
2567c478bdstevel@tonic-gate *
2577c478bdstevel@tonic-gate * If mutex_exit() gets preempted in the window between checking waiters
2587c478bdstevel@tonic-gate * and clearing the lock, we can miss wakeups.  Disabling preemption
2597c478bdstevel@tonic-gate * in the mutex code is prohibitively expensive, so instead we detect
2607c478bdstevel@tonic-gate * mutex preemption by examining the trapped PC in the interrupt path.
2617c478bdstevel@tonic-gate * If we interrupt a thread in mutex_exit() that has not yet cleared
2627c478bdstevel@tonic-gate * the lock, cmnint() resets its PC back to the beginning of
2637c478bdstevel@tonic-gate * mutex_exit() so it will check again for waiters when it resumes.
2647c478bdstevel@tonic-gate *
2657c478bdstevel@tonic-gate * The lockstat code below is activated when the lockstat driver
2667c478bdstevel@tonic-gate * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
2677c478bdstevel@tonic-gate * Note that we don't need to test lockstat_event_mask here -- we won't
2687c478bdstevel@tonic-gate * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
2697c478bdstevel@tonic-gate */
2707c478bdstevel@tonic-gate
2717c478bdstevel@tonic-gate	ENTRY_NP(mutex_enter)
2727c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
2737c478bdstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
2747c478bdstevel@tonic-gate	lock
2757c478bdstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
2767c478bdstevel@tonic-gate	jnz	mutex_vector_enter
2777c478bdstevel@tonic-gate.mutex_enter_lockstat_patch_point:
278ee88d2bkchow#if defined(OPTERON_WORKAROUND_6323525)
279ee88d2bkchow.mutex_enter_6323525_patch_point:
280ee88d2bkchow	ret					/* nop space for lfence */
281ee88d2bkchow	nop
282ee88d2bkchow	nop
2836a0b121Patrick Mooney.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
284ee88d2bkchow	nop
285ee88d2bkchow#else	/* OPTERON_WORKAROUND_6323525 */
2867c478bdstevel@tonic-gate	ret
287ee88d2bkchow#endif	/* OPTERON_WORKAROUND_6323525 */
2887c478bdstevel@tonic-gate	movq	%rdi, %rsi
2897c478bdstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
2907c478bdstevel@tonic-gate/*
2917c478bdstevel@tonic-gate * expects %rdx=thread, %rsi=lock, %edi=lockstat event
2927c478bdstevel@tonic-gate */
2937c478bdstevel@tonic-gate	ALTENTRY(lockstat_wrapper)
2947c478bdstevel@tonic-gate	incb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat++ */
2957c478bdstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
2967c478bdstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
2977c478bdstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
2987c478bdstevel@tonic-gate	jz	1f
2997c478bdstevel@tonic-gate	pushq	%rbp				/* align stack properly */
3007c478bdstevel@tonic-gate	movq	%rsp, %rbp
3017c478bdstevel@tonic-gate	movl	%eax, %edi
30265f2042Robert Mustacchi	movq	lockstat_probe, %rax
30365f2042Robert Mustacchi	INDIRECT_CALL_REG(rax)
3047c478bdstevel@tonic-gate	leave					/* unwind stack */
3057c478bdstevel@tonic-gate1:
3067c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
3077c478bdstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
3087c478bdstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
3097c478bdstevel@tonic-gate	ret
3107c478bdstevel@tonic-gate	SET_SIZE(lockstat_wrapper)
3117c478bdstevel@tonic-gate	SET_SIZE(mutex_enter)
3127c478bdstevel@tonic-gate
3137c478bdstevel@tonic-gate/*
3147c478bdstevel@tonic-gate * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
3157c478bdstevel@tonic-gate */
3167c478bdstevel@tonic-gate	ENTRY(lockstat_wrapper_arg)
3177c478bdstevel@tonic-gate	incb	T_LOCKSTAT(%rcx)		/* curthread->t_lockstat++ */
3187c478bdstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
3197c478bdstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
3207c478bdstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
3217c478bdstevel@tonic-gate	jz	1f
3227c478bdstevel@tonic-gate	pushq	%rbp				/* align stack properly */
3237c478bdstevel@tonic-gate	movq	%rsp, %rbp
3247c478bdstevel@tonic-gate	movl	%eax, %edi
32565f2042Robert Mustacchi	movq	lockstat_probe, %rax
32665f2042Robert Mustacchi	INDIRECT_CALL_REG(rax)
3277c478bdstevel@tonic-gate	leave					/* unwind stack */
3287c478bdstevel@tonic-gate1:
3297c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
3307c478bdstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
3317c478bdstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
3327c478bdstevel@tonic-gate	ret
3337c478bdstevel@tonic-gate	SET_SIZE(lockstat_wrapper_arg)
3347c478bdstevel@tonic-gate
3357c478bdstevel@tonic-gate
3367c478bdstevel@tonic-gate	ENTRY(mutex_tryenter)
3377c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
3387c478bdstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
3397c478bdstevel@tonic-gate	lock
3407c478bdstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
3417c478bdstevel@tonic-gate	jnz	mutex_vector_tryenter
3427c478bdstevel@tonic-gate	not	%eax				/* return success (nonzero) */
343ee88d2bkchow#if defined(OPTERON_WORKAROUND_6323525)
344ee88d2bkchow.mutex_tryenter_lockstat_patch_point:
345ee88d2bkchow.mutex_tryenter_6323525_patch_point:
346ee88d2bkchow	ret					/* nop space for lfence */
347ee88d2bkchow	nop
348ee88d2bkchow	nop
349ee88d2bkchow.mutex_tryenter_lockstat_6323525_patch_point:	/* new patch point if lfence */
350ee88d2bkchow	nop
351ee88d2bkchow#else	/* OPTERON_WORKAROUND_6323525 */
3527c478bdstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
3537c478bdstevel@tonic-gate	ret
354ee88d2bkchow#endif	/* OPTERON_WORKAROUND_6323525 */
3557c478bdstevel@tonic-gate	movq	%rdi, %rsi
3567c478bdstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
3577c478bdstevel@tonic-gate	jmp	lockstat_wrapper
3587c478bdstevel@tonic-gate	SET_SIZE(mutex_tryenter)
3597c478bdstevel@tonic-gate
3607c478bdstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
3617c478bdstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
3627c478bdstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
3637c478bdstevel@tonic-gate	lock
3647c478bdstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
3657c478bdstevel@tonic-gate	jnz	0f
3667c478bdstevel@tonic-gate	not	%eax				/* return success (nonzero) */
367ee88d2bkchow#if defined(OPTERON_WORKAROUND_6323525)
368ee88d2bkchow.mutex_atryenter_6323525_patch_point:
369ee88d2bkchow	ret					/* nop space for lfence */
370ee88d2bkchow	nop
371ee88d2bkchow	nop
372ee88d2bkchow	nop
373ee88d2bkchow#else	/* OPTERON_WORKAROUND_6323525 */
3747c478bdstevel@tonic-gate	ret
375ee88d2bkchow#endif	/* OPTERON_WORKAROUND_6323525 */
3767c478bdstevel@tonic-gate0:
3777c478bdstevel@tonic-gate	xorl	%eax, %eax			/* return failure */
3787c478bdstevel@tonic-gate	ret
3797c478bdstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
3807c478bdstevel@tonic-gate
381575a742pt	.globl	mutex_owner_running_critical_start
382575a742pt
383575a742pt	ENTRY(mutex_owner_running)
384575a742ptmutex_owner_running_critical_start:
385575a742pt	movq	(%rdi), %r11		/* get owner field */
386575a742pt	andq	$MUTEX_THREAD, %r11	/* remove waiters bit */
387575a742pt	cmpq	$0, %r11		/* if free, skip */
388575a742pt