This source file includes following definitions.
- lock_printf
- lockinit
- lockstatus
- lockmgr
- lockmgr_printinfo
- simple_lock_init
- _simple_lock
- _simple_lock_held
- _simple_lock_try
- _simple_unlock
- simple_lock_dump
- simple_lock_freecheck
- simple_lock_switchcheck
- simple_lock_only_held
- _kernel_lock_init
- _kernel_lock
- _kernel_unlock
- _kernel_proc_lock
- _kernel_proc_unlock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/lock.h>
41 #include <sys/systm.h>
42 #include <sys/sched.h>
43
44 #include <machine/cpu.h>
45
46 #ifndef spllock
47 #define spllock() splhigh()
48 #endif
49
50 #ifdef MULTIPROCESSOR
51 #define CPU_NUMBER() cpu_number()
52 #else
53 #define CPU_NUMBER() 0
54 #endif
55
56 void record_stacktrace(int *, int);
57 void playback_stacktrace(int *, int);
58
59
60
61
62
63
64 #ifdef DDB
65 #ifdef MULTIPROCESSOR
66 int simple_lock_debugger = 1;
67 #else
68 int simple_lock_debugger = 0;
69 #endif
70 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
71 #define SLOCK_TRACE() \
72 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
73 TRUE, 65535, "", lock_printf);
74 #else
75 #define SLOCK_DEBUGGER()
76 #define SLOCK_TRACE()
77 #endif
78
79
80
81
82 #define ACQUIRE(lkp, error, extflags, drain, wanted) \
83 do { \
84 for (error = 0; wanted; ) { \
85 if ((drain)) \
86 (lkp)->lk_flags |= LK_WAITDRAIN; \
87 else \
88 (lkp)->lk_waitcount++; \
89 \
90 error = tsleep((drain) ? \
91 (void *)&(lkp)->lk_flags : (void *)(lkp), \
92 (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo); \
93 if ((drain) == 0) \
94 (lkp)->lk_waitcount--; \
95 if (error) \
96 break; \
97 if ((extflags) & LK_SLEEPFAIL) { \
98 error = ENOLCK; \
99 break; \
100 } \
101 } \
102 } while (0)
103
104 #define SETHOLDER(lkp, pid, cpu_id) \
105 (lkp)->lk_lockholder = (pid)
106
107 #define WEHOLDIT(lkp, pid, cpu_id) \
108 (lkp)->lk_lockholder == (pid)
109
110 #define WAKEUP_WAITER(lkp) \
111 do { \
112 if ((lkp)->lk_waitcount) { \
113 \
114 wakeup((void *)(lkp)); \
115 } \
116 } while (0)
117
118 #if defined(LOCKDEBUG)
119 #if defined(MULTIPROCESSOR)
120 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
121
122 #define SPINLOCK_LIST_LOCK() \
123 __cpu_simple_lock(&spinlock_list_slock.lock_data)
124
125 #define SPINLOCK_LIST_UNLOCK() \
126 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
127 #else
128 #define SPINLOCK_LIST_LOCK()
129
130 #define SPINLOCK_LIST_UNLOCK()
131 #endif
132
133 TAILQ_HEAD(, lock) spinlock_list =
134 TAILQ_HEAD_INITIALIZER(spinlock_list);
135 #endif
136
137 #define HAVEIT(lkp) \
138 do { \
139 } while (0)
140
141 #define DONTHAVEIT(lkp) \
142 do { \
143 } while (0)
144
145 #if defined(LOCKDEBUG)
146
147
148
149
150 void
151 lock_printf(const char *fmt, ...)
152 {
153 char b[150];
154 va_list ap;
155
156 va_start(ap, fmt);
157 if (lock_debug_syslog)
158 vlog(LOG_DEBUG, fmt, ap);
159 else {
160 vsnprintf(b, sizeof(b), fmt, ap);
161 printf_nolog("%s", b);
162 }
163 va_end(ap);
164 }
165 #endif
166
167
168
169
170 void
171 lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
172 {
173
174 bzero(lkp, sizeof(struct lock));
175 lkp->lk_flags = flags & LK_EXTFLG_MASK;
176 lkp->lk_lockholder = LK_NOPROC;
177 lkp->lk_prio = prio;
178 lkp->lk_timo = timo;
179 lkp->lk_wmesg = wmesg;
180 #if defined(LOCKDEBUG)
181 lkp->lk_lock_file = NULL;
182 lkp->lk_unlock_file = NULL;
183 #endif
184 }
185
186
187
188
189 int
190 lockstatus(struct lock *lkp)
191 {
192 int lock_type = 0;
193
194 if (lkp->lk_exclusivecount != 0)
195 lock_type = LK_EXCLUSIVE;
196 else if (lkp->lk_sharecount != 0)
197 lock_type = LK_SHARED;
198 return (lock_type);
199 }
200
201
202
203
204
205
206
207
208 int
209 lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
210 {
211 int error;
212 pid_t pid;
213 int extflags;
214 cpuid_t cpu_id;
215 struct proc *p = curproc;
216
217 error = 0;
218 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
219
220 #ifdef DIAGNOSTIC
221 if (p == NULL)
222 panic("lockmgr: process context required");
223 #endif
224
225 pid = p->p_pid;
226 cpu_id = CPU_NUMBER();
227
228
229
230
231
232
233
234
235
236
237 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
238 #ifdef DIAGNOSTIC
239 if (lkp->lk_flags & LK_DRAINED)
240 panic("lockmgr: using decommissioned lock");
241 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
242 WEHOLDIT(lkp, pid, cpu_id) == 0)
243 panic("lockmgr: non-release on draining lock: %d",
244 flags & LK_TYPE_MASK);
245 #endif
246 lkp->lk_flags &= ~LK_DRAINING;
247 lkp->lk_flags |= LK_DRAINED;
248 }
249
250
251
252
253 if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
254 (LK_CANRECURSE|LK_RECURSEFAIL))
255 panic("lockmgr: make up your mind");
256
257 switch (flags & LK_TYPE_MASK) {
258
259 case LK_SHARED:
260 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
261
262
263
264 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
265 (LK_HAVE_EXCL | LK_WANT_EXCL))) {
266 error = EBUSY;
267 break;
268 }
269
270
271
272 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
273 (LK_HAVE_EXCL | LK_WANT_EXCL));
274 if (error)
275 break;
276 lkp->lk_sharecount++;
277 break;
278 }
279
280
281
282
283 lkp->lk_sharecount++;
284
285 if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
286 lkp->lk_exclusivecount == 0)
287 panic("lockmgr: not holding exclusive lock");
288 lkp->lk_sharecount += lkp->lk_exclusivecount;
289 lkp->lk_exclusivecount = 0;
290 lkp->lk_flags &= ~LK_HAVE_EXCL;
291 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
292 #if defined(LOCKDEBUG)
293 lkp->lk_unlock_file = file;
294 lkp->lk_unlock_line = line;
295 #endif
296 DONTHAVEIT(lkp);
297 WAKEUP_WAITER(lkp);
298 break;
299
300 case LK_EXCLUSIVE:
301 if (WEHOLDIT(lkp, pid, cpu_id)) {
302
303
304
305 if ((extflags & LK_CANRECURSE) == 0) {
306 if (extflags & LK_RECURSEFAIL) {
307 error = EDEADLK;
308 break;
309 } else
310 panic("lockmgr: locking against myself");
311 }
312 lkp->lk_exclusivecount++;
313 break;
314 }
315
316
317
318 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
319 (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
320 lkp->lk_sharecount != 0)) {
321 error = EBUSY;
322 break;
323 }
324
325
326
327 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
328 (LK_HAVE_EXCL | LK_WANT_EXCL));
329 if (error)
330 break;
331 lkp->lk_flags |= LK_WANT_EXCL;
332
333
334
335 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
336 lkp->lk_flags &= ~LK_WANT_EXCL;
337 if (error)
338 break;
339 lkp->lk_flags |= LK_HAVE_EXCL;
340 SETHOLDER(lkp, pid, cpu_id);
341 #if defined(LOCKDEBUG)
342 lkp->lk_lock_file = file;
343 lkp->lk_lock_line = line;
344 #endif
345 HAVEIT(lkp);
346 if (lkp->lk_exclusivecount != 0)
347 panic("lockmgr: non-zero exclusive count");
348 lkp->lk_exclusivecount = 1;
349 break;
350
351 case LK_RELEASE:
352 if (lkp->lk_exclusivecount != 0) {
353 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
354 panic("lockmgr: pid %d, not exclusive lock "
355 "holder %d unlocking",
356 pid, lkp->lk_lockholder);
357 }
358 lkp->lk_exclusivecount--;
359 if (lkp->lk_exclusivecount == 0) {
360 lkp->lk_flags &= ~LK_HAVE_EXCL;
361 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
362 #if defined(LOCKDEBUG)
363 lkp->lk_unlock_file = file;
364 lkp->lk_unlock_line = line;
365 #endif
366 DONTHAVEIT(lkp);
367 }
368 } else if (lkp->lk_sharecount != 0) {
369 lkp->lk_sharecount--;
370 }
371 #ifdef DIAGNOSTIC
372 else
373 panic("lockmgr: release of unlocked lock!");
374 #endif
375 WAKEUP_WAITER(lkp);
376 break;
377
378 case LK_DRAIN:
379
380
381
382
383
384
385 if (WEHOLDIT(lkp, pid, cpu_id))
386 panic("lockmgr: draining against myself");
387
388
389
390 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
391 (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
392 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
393 error = EBUSY;
394 break;
395 }
396 ACQUIRE(lkp, error, extflags, 1,
397 ((lkp->lk_flags &
398 (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
399 lkp->lk_sharecount != 0 ||
400 lkp->lk_waitcount != 0));
401 if (error)
402 break;
403 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
404 SETHOLDER(lkp, pid, cpu_id);
405 #if defined(LOCKDEBUG)
406 lkp->lk_lock_file = file;
407 lkp->lk_lock_line = line;
408 #endif
409 HAVEIT(lkp);
410 lkp->lk_exclusivecount = 1;
411 break;
412
413 default:
414 panic("lockmgr: unknown locktype request %d",
415 flags & LK_TYPE_MASK);
416
417 }
418 if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
419 ((lkp->lk_flags &
420 (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
421 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
422 lkp->lk_flags &= ~LK_WAITDRAIN;
423 wakeup((void *)&lkp->lk_flags);
424 }
425 return (error);
426 }
427
428 #ifdef DIAGNOSTIC
429
430
431
432
433 void
434 lockmgr_printinfo(__volatile struct lock *lkp)
435 {
436
437 if (lkp->lk_sharecount)
438 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
439 lkp->lk_sharecount);
440 else if (lkp->lk_flags & LK_HAVE_EXCL) {
441 printf(" lock type %s: EXCL (count %d) by ",
442 lkp->lk_wmesg, lkp->lk_exclusivecount);
443 printf("pid %d", lkp->lk_lockholder);
444 } else
445 printf(" not locked");
446 if (lkp->lk_waitcount > 0)
447 printf(" with %d pending", lkp->lk_waitcount);
448 }
449 #endif
450
451 #if defined(LOCKDEBUG)
452 TAILQ_HEAD(, simplelock) simplelock_list =
453 TAILQ_HEAD_INITIALIZER(simplelock_list);
454
455 #if defined(MULTIPROCESSOR)
456 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
457
458 #define SLOCK_LIST_LOCK() \
459 __cpu_simple_lock(&simplelock_list_slock.lock_data)
460
461 #define SLOCK_LIST_UNLOCK() \
462 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
463
464 #define SLOCK_COUNT(x) \
465 curcpu()->ci_simple_locks += (x)
466 #else
467 u_long simple_locks;
468
469 #define SLOCK_LIST_LOCK()
470
471 #define SLOCK_LIST_UNLOCK()
472
473 #define SLOCK_COUNT(x) simple_locks += (x)
474 #endif
475
476 #ifdef MULTIPROCESSOR
477 #define SLOCK_MP() lock_printf("on cpu %ld\n", \
478 (u_long) cpu_number())
479 #else
480 #define SLOCK_MP()
481 #endif
482
483 #define SLOCK_WHERE(str, alp, id, l) \
484 do { \
485 lock_printf("\n"); \
486 lock_printf(str); \
487 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
488 SLOCK_MP(); \
489 if ((alp)->lock_file != NULL) \
490 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
491 (alp)->lock_line); \
492 if ((alp)->unlock_file != NULL) \
493 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
494 (alp)->unlock_line); \
495 SLOCK_TRACE() \
496 SLOCK_DEBUGGER(); \
497 } while (0)
498
499
500
501
502
503 void
504 simple_lock_init(struct simplelock *lkp)
505 {
506
507 #if defined(MULTIPROCESSOR)
508 __cpu_simple_lock_init(&alp->lock_data);
509 #else
510 alp->lock_data = __SIMPLELOCK_UNLOCKED;
511 #endif
512 alp->lock_file = NULL;
513 alp->lock_line = 0;
514 alp->unlock_file = NULL;
515 alp->unlock_line = 0;
516 alp->lock_holder = LK_NOCPU;
517 }
518
519 void
520 _simple_lock(__volatile struct simplelock *lkp, const char *id, int l)
521 {
522 cpuid_t cpu_id = CPU_NUMBER();
523 int s;
524
525 s = spllock();
526
527
528
529
530
531 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
532 #if defined(MULTIPROCESSOR)
533 if (alp->lock_holder == cpu_id) {
534 SLOCK_WHERE("simple_lock: locking against myself\n",
535 alp, id, l);
536 goto out;
537 }
538 #else
539 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
540 goto out;
541 #endif
542 }
543
544 #if defined(MULTIPROCESSOR)
545
546 splx(s);
547 __cpu_simple_lock(&alp->lock_data);
548 s = spllock();
549 #else
550 alp->lock_data = __SIMPLELOCK_LOCKED;
551 #endif
552
553 if (alp->lock_holder != LK_NOCPU) {
554 SLOCK_WHERE("simple_lock: uninitialized lock\n",
555 alp, id, l);
556 }
557 alp->lock_file = id;
558 alp->lock_line = l;
559 alp->lock_holder = cpu_id;
560
561 SLOCK_LIST_LOCK();
562
563 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
564 SLOCK_LIST_UNLOCK();
565
566 SLOCK_COUNT(1);
567
568 out:
569 splx(s);
570 }
571
572 int
573 _simple_lock_held(__volatile struct simplelock *alp)
574 {
575 cpuid_t cpu_id = CPU_NUMBER();
576 int s, locked = 0;
577
578 s = spllock();
579
580 #if defined(MULTIPROCESSOR)
581 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
582 locked = (alp->lock_holder == cpu_id);
583 else
584 __cpu_simple_unlock(&alp->lock_data);
585 #else
586 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
587 locked = 1;
588 KASSERT(alp->lock_holder == cpu_id);
589 }
590 #endif
591
592 splx(s);
593
594 return (locked);
595 }
596
597 int
598 _simple_lock_try(__volatile struct simplelock *lkp, const char *id, int l)
599 {
600 cpuid_t cpu_id = CPU_NUMBER();
601 int s, rv = 0;
602
603 s = spllock();
604
605
606
607
608
609 #if defined(MULTIPROCESSOR)
610 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
611 if (alp->lock_holder == cpu_id)
612 SLOCK_WHERE("simple_lock_try: locking against myself\n",
613 alp, id, l);
614 goto out;
615 }
616 #else
617 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
618 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
619 goto out;
620 }
621 alp->lock_data = __SIMPLELOCK_LOCKED;
622 #endif
623
624
625
626
627
628 rv = 1;
629
630 alp->lock_file = id;
631 alp->lock_line = l;
632 alp->lock_holder = cpu_id;
633
634 SLOCK_LIST_LOCK();
635
636 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
637 SLOCK_LIST_UNLOCK();
638
639 SLOCK_COUNT(1);
640
641 out:
642 splx(s);
643 return (rv);
644 }
645
646 void
647 _simple_unlock(__volatile struct simplelock *lkp, const char *id, int l)
648 {
649 int s;
650
651 s = spllock();
652
653
654
655
656
657 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
658 SLOCK_WHERE("simple_unlock: lock not held\n",
659 alp, id, l);
660 goto out;
661 }
662
663 SLOCK_LIST_LOCK();
664 TAILQ_REMOVE(&simplelock_list, alp, list);
665 SLOCK_LIST_UNLOCK();
666
667 SLOCK_COUNT(-1);
668
669 alp->list.tqe_next = NULL;
670 alp->list.tqe_prev = NULL;
671
672 alp->unlock_file = id;
673 alp->unlock_line = l;
674
675 #if defined(MULTIPROCESSOR)
676 alp->lock_holder = LK_NOCPU;
677
678 __cpu_simple_unlock(&alp->lock_data);
679 #else
680 alp->lock_data = __SIMPLELOCK_UNLOCKED;
681 KASSERT(alp->lock_holder == CPU_NUMBER());
682 alp->lock_holder = LK_NOCPU;
683 #endif
684
685 out:
686 splx(s);
687 }
688
689 void
690 simple_lock_dump(void)
691 {
692 struct simplelock *alp;
693 int s;
694
695 s = spllock();
696 SLOCK_LIST_LOCK();
697 lock_printf("all simple locks:\n");
698 TAILQ_FOREACH(alp, &simplelock_list, list) {
699 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
700 alp->lock_file, alp->lock_line);
701 }
702 SLOCK_LIST_UNLOCK();
703 splx(s);
704 }
705
706 void
707 simple_lock_freecheck(void *start, void *end)
708 {
709 struct simplelock *alp;
710 int s;
711
712 s = spllock();
713 SLOCK_LIST_LOCK();
714 TAILQ_FOREACH(alp, &simplelock_list, list) {
715 if ((void *)alp >= start && (void *)alp < end) {
716 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
717 alp, alp->lock_holder, alp->lock_file,
718 alp->lock_line);
719 SLOCK_DEBUGGER();
720 }
721 }
722 SLOCK_LIST_UNLOCK();
723 splx(s);
724 }
725
726
727
728
729
730 #ifdef notyet
731 void
732 simple_lock_switchcheck(void)
733 {
734
735 simple_lock_only_held(&sched_lock, "switching");
736 }
737 #endif
738
739 void
740 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
741 {
742 struct simplelock *alp;
743 cpuid_t cpu_id = CPU_NUMBER();
744 int s;
745
746 if (lp) {
747 LOCK_ASSERT(simple_lock_held(lp));
748 }
749 s = spllock();
750 SLOCK_LIST_LOCK();
751 TAILQ_FOREACH(alp, &simplelock_list, list) {
752 if (alp == lp)
753 continue;
754 if (alp->lock_holder == cpu_id)
755 break;
756 }
757 SLOCK_LIST_UNLOCK();
758 splx(s);
759
760 if (alp != NULL) {
761 lock_printf("\n%s with held simple_lock %p "
762 "CPU %lu %s:%d\n",
763 where, alp, alp->lock_holder, alp->lock_file,
764 alp->lock_line);
765 SLOCK_TRACE();
766 SLOCK_DEBUGGER();
767 }
768 }
769 #endif
770
771 #if defined(MULTIPROCESSOR)
772
773
774
775
776
777 struct __mp_lock kernel_lock;
778
779 void
780 _kernel_lock_init(void)
781 {
782 __mp_lock_init(&kernel_lock);
783 }
784
785
786
787
788
789
790 void
791 _kernel_lock(void)
792 {
793 SCHED_ASSERT_UNLOCKED();
794 __mp_lock(&kernel_lock);
795 }
796
797 void
798 _kernel_unlock(void)
799 {
800 __mp_unlock(&kernel_lock);
801 }
802
803
804
805
806
807 void
808 _kernel_proc_lock(struct proc *p)
809 {
810 SCHED_ASSERT_UNLOCKED();
811 __mp_lock(&kernel_lock);
812 atomic_setbits_int(&p->p_flag, P_BIGLOCK);
813 }
814
815 void
816 _kernel_proc_unlock(struct proc *p)
817 {
818 atomic_clearbits_int(&p->p_flag, P_BIGLOCK);
819 __mp_unlock(&kernel_lock);
820 }
821
822 #ifdef MP_LOCKDEBUG
823
824 int __mp_lock_spinout = 200000000;
825 #endif
826
827 #endif