1 /* $OpenBSD: lock.h,v 1.5 2007/05/29 18:18:20 tom Exp $ */
2 /* $NetBSD: lock.h,v 1.1.2.2 2000/05/03 14:40:55 sommerfeld Exp $ */
3
4 /*-
5 * Copyright (c) 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Machine-dependent spin lock operations.
42 */
43
44 #ifndef _I386_LOCK_H_
45 #define _I386_LOCK_H_
46
47 typedef __volatile int __cpu_simple_lock_t;
48
49 #define __SIMPLELOCK_LOCKED 1
50 #define __SIMPLELOCK_UNLOCKED 0
51
52 /*
53 * compiler barrier: prevent reordering of instructions.
54 * XXX something similar will move to <sys/cdefs.h>
55 * or thereabouts.
56 * This prevents the compiler from reordering code around
57 * this "instruction", acting as a sequence point for code generation.
58 */
59
60 #define __lockbarrier() __asm __volatile("": : :"memory")
61
62 #define SPINLOCK_SPIN_HOOK __asm __volatile("pause": : :"memory")
63
64 #ifdef LOCKDEBUG
65
66 extern void __cpu_simple_lock_init(__cpu_simple_lock_t *);
67 extern void __cpu_simple_lock(__cpu_simple_lock_t *);
68 extern int __cpu_simple_lock_try(__cpu_simple_lock_t *);
69 extern void __cpu_simple_unlock(__cpu_simple_lock_t *);
70
71 #else
72
73 #include <machine/atomic.h>
74
75 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
76 __attribute__((__unused__));
77 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
78 __attribute__((__unused__));
79 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
80 __attribute__((__unused__));
81 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
82 __attribute__((__unused__));
83
84 static __inline void
85 __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
86 {
87 *lockp = __SIMPLELOCK_UNLOCKED;
88 __lockbarrier();
89 }
90
91 static __inline void
92 __cpu_simple_lock(__cpu_simple_lock_t *lockp)
93 {
94 while (i386_atomic_testset_i(lockp, __SIMPLELOCK_LOCKED)
95 == __SIMPLELOCK_LOCKED)
96 SPINLOCK_SPIN_HOOK;
97 __lockbarrier();
98 }
99
100 static __inline int
101 __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
102 {
103 int r = (i386_atomic_testset_i(lockp, __SIMPLELOCK_LOCKED)
104 == __SIMPLELOCK_UNLOCKED);
105
106 __lockbarrier();
107
108 return (r);
109 }
110
111 static __inline void
112 __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
113 {
114 __lockbarrier();
115 *lockp = __SIMPLELOCK_UNLOCKED;
116 }
117
118 #endif /* !LOCKDEBUG */
119
120 #ifdef _KERNEL
121 extern int rw_cas_486(volatile unsigned long *, unsigned long, unsigned long);
122 #define rw_cas rw_cas_486
123 #endif
124
125 #endif /* _I386_LOCK_H_ */