mtx 43 arch/i386/include/mutex.h #define MUTEX_ASSERT_LOCKED(mtx) do { \
mtx 44 arch/i386/include/mutex.h if ((mtx)->mtx_lock != 1 || \
mtx 45 arch/i386/include/mutex.h (mtx)->mtx_owner != curcpu()) \
mtx 46 arch/i386/include/mutex.h panic("mutex %p not held in %s", (mtx), __func__); \
mtx 49 arch/i386/include/mutex.h #define MUTEX_ASSERT_UNLOCKED(mtx) do { \
mtx 50 arch/i386/include/mutex.h if ((mtx)->mtx_lock == 1 && \
mtx 51 arch/i386/include/mutex.h (mtx)->mtx_owner == curcpu()) \
mtx 52 arch/i386/include/mutex.h panic("mutex %p held in %s", (mtx), __func__); \
mtx 55 arch/i386/include/mutex.h #define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl
mtx 476 dev/acpi/dsdt.c struct acpi_mutex *mtx = val->v_mutex;
mtx 488 dev/acpi/dsdt.c rw_enter_write(&mtx->amt_lock);
mtx 489 dev/acpi/dsdt.c while (mtx->amt_ref_count) {
mtx 490 dev/acpi/dsdt.c rw_exit_write(&mtx->amt_lock);
mtx 492 dev/acpi/dsdt.c ts = tsleep(mtx, PWAIT, mtx->amt_name, timeout / hz);
mtx 498 dev/acpi/dsdt.c rw_enter_write(&mtx->amt_lock);
mtx 501 dev/acpi/dsdt.c mtx->amt_ref_count++;
mtx 502 dev/acpi/dsdt.c rw_exit_write(&mtx->amt_lock);
mtx 513 dev/acpi/dsdt.c struct acpi_mutex *mtx = val->v_mutex;
mtx 521 dev/acpi/dsdt.c rw_enter_write(&mtx->amt_lock);
mtx 523 dev/acpi/dsdt.c if (mtx->amt_ref_count == 0) {
mtx 524 dev/acpi/dsdt.c printf("acpi_mutex_release underflow %s\n", mtx->amt_name);
mtx 528 dev/acpi/dsdt.c mtx->amt_ref_count--;
mtx 529 dev/acpi/dsdt.c wakeup(mtx); /* wake all of them up */
mtx 531 dev/acpi/dsdt.c rw_exit_write(&mtx->amt_lock);
mtx 223 dev/ic/athvar.h struct mtx sc_mtx; /* master lock (recursive) */
mtx 279 dev/ic/athvar.h struct mtx sc_txbuflock; /* txbuf lock */
mtx 283 dev/ic/athvar.h struct mtx sc_txqlock; /* lock on txq and txlink */