Lines Matching refs:lp

242 mutex_panic(char *msg, mutex_impl_t *lp)  in mutex_panic()  argument
247 if (atomic_cas_ptr(&panic_mutex_addr, NULL, lp) == NULL) in mutex_panic()
248 panic_mutex = *lp; in mutex_panic()
251 msg, (void *)lp, (void *)MUTEX_OWNER(&panic_mutex), in mutex_panic()
343 mutex_vector_enter(mutex_impl_t *lp) in mutex_vector_enter() argument
351 volatile mutex_impl_t *vlp = (volatile mutex_impl_t *)lp; in mutex_vector_enter()
357 if (MUTEX_TYPE_SPIN(lp)) { in mutex_vector_enter()
358 lock_set_spl(&lp->m_spin.m_spinlock, lp->m_spin.m_minspl, in mutex_vector_enter()
359 &lp->m_spin.m_oldspl); in mutex_vector_enter()
363 if (!MUTEX_TYPE_ADAPTIVE(lp)) { in mutex_vector_enter()
364 mutex_panic("mutex_enter: bad mutex", lp); in mutex_vector_enter()
377 mutex_panic("mutex_enter: adaptive at high PIL", lp); in mutex_vector_enter()
393 if (mutex_adaptive_tryenter(lp)) { in mutex_vector_enter()
412 mutex_panic("recursive mutex_enter", lp); in mutex_vector_enter()
421 if (mutex_owner_running(lp) != NULL) { in mutex_vector_enter()
429 ts = turnstile_lookup(lp); in mutex_vector_enter()
430 MUTEX_SET_WAITERS(lp); in mutex_vector_enter()
437 if (mutex_owner_running(lp) != NULL) { in mutex_vector_enter()
438 turnstile_exit(lp); in mutex_vector_enter()
448 (void) turnstile_block(ts, TS_WRITER_Q, lp, in mutex_vector_enter()
454 turnstile_exit(lp); in mutex_vector_enter()
458 ASSERT(MUTEX_OWNER(lp) == curthread); in mutex_vector_enter()
465 LOCKSTAT_RECORD(LS_MUTEX_ENTER_BLOCK, lp, sleep_time); in mutex_vector_enter()
470 LOCKSTAT_RECORD_TIME(LS_MUTEX_ENTER_SPIN, lp, in mutex_vector_enter()
474 LOCKSTAT_RECORD0(LS_MUTEX_ENTER_ACQUIRE, lp); in mutex_vector_enter()
482 mutex_vector_tryenter(mutex_impl_t *lp) in mutex_vector_tryenter() argument
486 if (MUTEX_TYPE_ADAPTIVE(lp)) in mutex_vector_tryenter()
489 if (!MUTEX_TYPE_SPIN(lp)) { in mutex_vector_tryenter()
490 mutex_panic("mutex_tryenter: bad mutex", lp); in mutex_vector_tryenter()
494 s = splr(lp->m_spin.m_minspl); in mutex_vector_tryenter()
495 if (lock_try(&lp->m_spin.m_spinlock)) { in mutex_vector_tryenter()
496 lp->m_spin.m_oldspl = (ushort_t)s; in mutex_vector_tryenter()
508 mutex_vector_exit(mutex_impl_t *lp) in mutex_vector_exit() argument
512 if (MUTEX_TYPE_SPIN(lp)) { in mutex_vector_exit()
513 lock_clear_splx(&lp->m_spin.m_spinlock, lp->m_spin.m_oldspl); in mutex_vector_exit()
517 if (MUTEX_OWNER(lp) != curthread) { in mutex_vector_exit()
518 mutex_panic("mutex_exit: not owner", lp); in mutex_vector_exit()
522 ts = turnstile_lookup(lp); in mutex_vector_exit()
523 MUTEX_CLEAR_LOCK_AND_WAITERS(lp); in mutex_vector_exit()
525 turnstile_exit(lp); in mutex_vector_exit()
528 LOCKSTAT_RECORD0(LS_MUTEX_EXIT_RELEASE, lp); in mutex_vector_exit()
534 const mutex_impl_t *lp = (const mutex_impl_t *)mp; in mutex_owned() local
539 if (MUTEX_TYPE_ADAPTIVE(lp)) in mutex_owned()
540 return (MUTEX_OWNER(lp) == curthread); in mutex_owned()
541 return (LOCK_HELD(&lp->m_spin.m_spinlock)); in mutex_owned()
547 const mutex_impl_t *lp = (const mutex_impl_t *)mp; in mutex_owner() local
550 if (MUTEX_TYPE_ADAPTIVE(lp) && (t = MUTEX_OWNER(lp)) != MUTEX_NO_OWNER) in mutex_owner()
569 mutex_impl_t *lp = (mutex_impl_t *)mp; in mutex_init() local
575 MUTEX_SET_TYPE(lp, MUTEX_SPIN); in mutex_init()
576 LOCK_INIT_CLEAR(&lp->m_spin.m_spinlock); in mutex_init()
577 LOCK_INIT_HELD(&lp->m_spin.m_dummylock); in mutex_init()
578 lp->m_spin.m_minspl = (int)(intptr_t)ibc; in mutex_init()
583 if (((uintptr_t)lp & (uintptr_t)(MUTEX_ALIGN - 1)) && in mutex_init()
601 (void *)lp, MUTEX_ALIGN, in mutex_init()
613 MUTEX_SET_TYPE(lp, MUTEX_ADAPTIVE); in mutex_init()
614 MUTEX_CLEAR_LOCK_AND_WAITERS(lp); in mutex_init()
621 mutex_impl_t *lp = (mutex_impl_t *)mp; in mutex_destroy() local
623 if (lp->m_owner == 0 && !MUTEX_HAS_WAITERS(lp)) { in mutex_destroy()
624 MUTEX_DESTROY(lp); in mutex_destroy()
625 } else if (MUTEX_TYPE_SPIN(lp)) { in mutex_destroy()
626 LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp); in mutex_destroy()
627 MUTEX_DESTROY(lp); in mutex_destroy()
628 } else if (MUTEX_TYPE_ADAPTIVE(lp)) { in mutex_destroy()
629 LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp); in mutex_destroy()
630 if (MUTEX_OWNER(lp) != curthread) in mutex_destroy()
631 mutex_panic("mutex_destroy: not owner", lp); in mutex_destroy()
632 if (MUTEX_HAS_WAITERS(lp)) { in mutex_destroy()
633 turnstile_t *ts = turnstile_lookup(lp); in mutex_destroy()
634 turnstile_exit(lp); in mutex_destroy()
636 mutex_panic("mutex_destroy: has waiters", lp); in mutex_destroy()
638 MUTEX_DESTROY(lp); in mutex_destroy()
640 mutex_panic("mutex_destroy: bad mutex", lp); in mutex_destroy()
648 lock_set_spin(lock_t *lp) in lock_set_spin() argument
658 panic("lock_set: %p lock held and only one CPU", (void *)lp); in lock_set_spin()
662 while (LOCK_HELD(lp) || !lock_spin_try(lp)) { in lock_set_spin()
676 LOCKSTAT_RECORD_TIME(LS_LOCK_SET_SPIN, lp, spin_time); in lock_set_spin()
678 LOCKSTAT_RECORD0(LS_LOCK_SET_ACQUIRE, lp); in lock_set_spin()
682 lock_set_spl_spin(lock_t *lp, int new_pil, ushort_t *old_pil_addr, int old_pil) in lock_set_spl_spin() argument
693 (void *)lp); in lock_set_spl_spin()
701 while (LOCK_HELD(lp)) { in lock_set_spl_spin()
717 } while (!lock_spin_try(lp)); in lock_set_spl_spin()
721 LOCKSTAT_RECORD_TIME(LS_LOCK_SET_SPL_SPIN, lp, spin_time); in lock_set_spl_spin()
723 LOCKSTAT_RECORD0(LS_LOCK_SET_SPL_ACQUIRE, lp); in lock_set_spl_spin()