This source file includes following definitions.
- lf_init
- lf_alloc
- lf_free
- lf_advlock
- lf_setlock
- lf_clearlock
- lf_getlock
- lf_getblock
- lf_findoverlap
- lf_split
- lf_wakelock
- lf_print
- lf_printlist
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/file.h>
42 #include <sys/proc.h>
43 #include <sys/vnode.h>
44 #include <sys/pool.h>
45 #include <sys/fcntl.h>
46 #include <sys/lockf.h>
47
48 struct pool lockfpool;
49
50
51
52
53
54 int maxlockdepth = MAXDEPTH;
55
56 #define SELF 0x1
57 #define OTHERS 0x2
58
59 #ifdef LOCKF_DEBUG
60
61 #define DEBUG_SETLOCK 0x01
62 #define DEBUG_CLEARLOCK 0x02
63 #define DEBUG_GETLOCK 0x04
64 #define DEBUG_FINDOVR 0x08
65 #define DEBUG_SPLIT 0x10
66 #define DEBUG_WAKELOCK 0x20
67
68 int lockf_debug = DEBUG_SETLOCK|DEBUG_CLEARLOCK|DEBUG_WAKELOCK;
69
70 #define DPRINTF(args, level) if (lockf_debug & (level)) printf args
71 #else
72 #define DPRINTF(args, level)
73 #endif
74
75 void
76 lf_init(void)
77 {
78 pool_init(&lockfpool, sizeof(struct lockf), 0, 0, 0,
79 "lockfpl", &pool_allocator_nointr);
80 }
81
82 struct lockf *lf_alloc(uid_t, int);
83 void lf_free(struct lockf *);
84
85
86
87
88
89
90
91
92
93
94
95 int maxlocksperuid = 1024;
96
97
98
99
100
101 struct lockf *
102 lf_alloc(uid_t uid, int allowfail)
103 {
104 struct uidinfo *uip;
105 struct lockf *lock;
106
107 uip = uid_find(uid);
108 if (uid && allowfail && uip->ui_lockcnt >
109 (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2)))
110 return (NULL);
111 uip->ui_lockcnt++;
112 lock = pool_get(&lockfpool, PR_WAITOK);
113 lock->lf_uid = uid;
114 return (lock);
115 }
116
117 void
118 lf_free(struct lockf *lock)
119 {
120 struct uidinfo *uip;
121
122 uip = uid_find(lock->lf_uid);
123 uip->ui_lockcnt--;
124 pool_put(&lockfpool, lock);
125 }
126
127
128
129
130
131 int
132 lf_advlock(struct lockf **head, off_t size, caddr_t id, int op,
133 struct flock *fl, int flags)
134 {
135 struct proc *p = curproc;
136 struct lockf *lock;
137 off_t start, end;
138 int error;
139
140
141
142
143 switch (fl->l_whence) {
144
145 case SEEK_SET:
146 case SEEK_CUR:
147
148
149
150
151 start = fl->l_start;
152 break;
153
154 case SEEK_END:
155 start = size + fl->l_start;
156 break;
157
158 default:
159 return (EINVAL);
160 }
161 if (start < 0)
162 return (EINVAL);
163 if (fl->l_len == 0)
164 end = -1;
165 else {
166 end = start + fl->l_len - 1;
167 if (end < start)
168 return (EINVAL);
169 }
170
171
172
173
174 if (*head == NULL) {
175 if (op != F_SETLK) {
176 fl->l_type = F_UNLCK;
177 return (0);
178 }
179 }
180
181
182
183
184 lock = lf_alloc(p->p_ucred->cr_uid, op != F_UNLCK ? 1 : 2);
185 if (!lock)
186 return (ENOMEM);
187 lock->lf_start = start;
188 lock->lf_end = end;
189 lock->lf_id = id;
190 lock->lf_head = head;
191 lock->lf_type = fl->l_type;
192 lock->lf_next = NULL;
193 TAILQ_INIT(&lock->lf_blkhd);
194 lock->lf_flags = flags;
195
196
197
198 switch (op) {
199
200 case F_SETLK:
201 return (lf_setlock(lock));
202
203 case F_UNLCK:
204 error = lf_clearlock(lock);
205 lf_free(lock);
206 return (error);
207
208 case F_GETLK:
209 error = lf_getlock(lock, fl);
210 lf_free(lock);
211 return (error);
212
213 default:
214 lf_free(lock);
215 return (EINVAL);
216 }
217
218 }
219
220
221
222
223 int
224 lf_setlock(struct lockf *lock)
225 {
226 struct lockf *block;
227 struct lockf **head = lock->lf_head;
228 struct lockf **prev, *overlap, *ltmp;
229 static char lockstr[] = "lockf";
230 int ovcase, priority, needtolink, error;
231
232 #ifdef LOCKF_DEBUG
233 if (lockf_debug & DEBUG_SETLOCK)
234 lf_print("lf_setlock", lock);
235 #endif
236
237
238
239
240 priority = PLOCK;
241 if (lock->lf_type == F_WRLCK)
242 priority += 4;
243 priority |= PCATCH;
244
245
246
247 while ((block = lf_getblock(lock)) != NULL) {
248
249
250
251 if ((lock->lf_flags & F_WAIT) == 0) {
252 lf_free(lock);
253 return (EAGAIN);
254 }
255
256
257
258
259
260
261
262
263
264
265 if ((lock->lf_flags & F_POSIX) &&
266 (block->lf_flags & F_POSIX)) {
267 struct proc *wproc;
268 struct lockf *waitblock;
269 int i = 0;
270
271
272 wproc = (struct proc *)block->lf_id;
273 while (wproc->p_wchan &&
274 (wproc->p_wmesg == lockstr) &&
275 (i++ < maxlockdepth)) {
276 waitblock = (struct lockf *)wproc->p_wchan;
277
278 waitblock = waitblock->lf_next;
279 if ((waitblock->lf_flags & F_POSIX) == 0)
280 break;
281 wproc = (struct proc *)waitblock->lf_id;
282 if (wproc == (struct proc *)lock->lf_id) {
283 lf_free(lock);
284 return (EDEADLK);
285 }
286 }
287 }
288
289
290
291
292
293 if ((lock->lf_flags & F_FLOCK) &&
294 lock->lf_type == F_WRLCK) {
295 lock->lf_type = F_UNLCK;
296 (void) lf_clearlock(lock);
297 lock->lf_type = F_WRLCK;
298 }
299
300
301
302
303 lock->lf_next = block;
304 #ifdef LOCKF_DEBUG
305 if (lockf_debug & DEBUG_SETLOCK) {
306 lf_print("lf_setlock", lock);
307 lf_print("lf_setlock: blocking on", block);
308 }
309 #endif
310 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
311 error = tsleep(lock, priority, lockstr, 0);
312 #if 0
313 if (error) {
314
315
316
317 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
318 lf_free(lock);
319 return (error);
320 }
321 #else
322 if (lock->lf_next != NULL) {
323 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
324 lock->lf_next = NULL;
325 }
326 if (error) {
327 lf_free(lock);
328 return (error);
329 }
330 #endif
331 }
332
333
334
335
336
337
338
339
340 prev = head;
341 block = *head;
342 needtolink = 1;
343 for (;;) {
344 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
345 if (ovcase)
346 block = overlap->lf_next;
347
348
349
350
351
352
353
354
355
356 switch (ovcase) {
357 case 0:
358 if (needtolink) {
359 *prev = lock;
360 lock->lf_next = overlap;
361 }
362 break;
363
364 case 1:
365
366
367
368
369 if (lock->lf_type == F_RDLCK &&
370 overlap->lf_type == F_WRLCK)
371 lf_wakelock(overlap);
372 overlap->lf_type = lock->lf_type;
373 lf_free(lock);
374 lock = overlap;
375 break;
376
377 case 2:
378
379
380
381 if (overlap->lf_type == lock->lf_type) {
382 lf_free(lock);
383 lock = overlap;
384 break;
385 }
386 if (overlap->lf_start == lock->lf_start) {
387 *prev = lock;
388 lock->lf_next = overlap;
389 overlap->lf_start = lock->lf_end + 1;
390 } else
391 lf_split(overlap, lock);
392 lf_wakelock(overlap);
393 break;
394
395 case 3:
396
397
398
399
400 if (lock->lf_type == F_RDLCK &&
401 overlap->lf_type == F_WRLCK) {
402 lf_wakelock(overlap);
403 } else {
404 while ((ltmp =
405 TAILQ_FIRST(&overlap->lf_blkhd))) {
406 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
407 lf_block);
408 ltmp->lf_next = lock;
409 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
410 ltmp, lf_block);
411 }
412 }
413
414
415
416 if (needtolink) {
417 *prev = lock;
418 lock->lf_next = overlap->lf_next;
419 prev = &lock->lf_next;
420 needtolink = 0;
421 } else
422 *prev = overlap->lf_next;
423 lf_free(overlap);
424 continue;
425
426 case 4:
427
428
429
430 lock->lf_next = overlap->lf_next;
431 overlap->lf_next = lock;
432 overlap->lf_end = lock->lf_start - 1;
433 prev = &lock->lf_next;
434 lf_wakelock(overlap);
435 needtolink = 0;
436 continue;
437
438 case 5:
439
440
441
442 if (needtolink) {
443 *prev = lock;
444 lock->lf_next = overlap;
445 }
446 overlap->lf_start = lock->lf_end + 1;
447 lf_wakelock(overlap);
448 break;
449 }
450 break;
451 }
452 #ifdef LOCKF_DEBUG
453 if (lockf_debug & DEBUG_SETLOCK) {
454 lf_print("lf_setlock: got the lock", lock);
455 }
456 #endif
457 return (0);
458 }
459
460
461
462
463
464
465
466 int
467 lf_clearlock(struct lockf *lock)
468 {
469 struct lockf **head = lock->lf_head;
470 struct lockf *lf = *head;
471 struct lockf *overlap, **prev;
472 int ovcase;
473
474 if (lf == NULL)
475 return (0);
476 #ifdef LOCKF_DEBUG
477 if (lockf_debug & DEBUG_CLEARLOCK)
478 lf_print("lf_clearlock", lock);
479 #endif
480 prev = head;
481 while ((ovcase = lf_findoverlap(lf, lock, SELF,
482 &prev, &overlap)) != 0) {
483
484
485
486 lf_wakelock(overlap);
487
488 switch (ovcase) {
489
490 case 1:
491 *prev = overlap->lf_next;
492 lf_free(overlap);
493 break;
494
495 case 2:
496 if (overlap->lf_start == lock->lf_start) {
497 overlap->lf_start = lock->lf_end + 1;
498 break;
499 }
500 lf_split(overlap, lock);
501 overlap->lf_next = lock->lf_next;
502 break;
503
504 case 3:
505 *prev = overlap->lf_next;
506 lf = overlap->lf_next;
507 lf_free(overlap);
508 continue;
509
510 case 4:
511 overlap->lf_end = lock->lf_start - 1;
512 prev = &overlap->lf_next;
513 lf = overlap->lf_next;
514 continue;
515
516 case 5:
517 overlap->lf_start = lock->lf_end + 1;
518 break;
519 }
520 break;
521 }
522 return (0);
523 }
524
525
526
527
528
529 int
530 lf_getlock(struct lockf *lock, struct flock *fl)
531 {
532 struct lockf *block;
533
534 #ifdef LOCKF_DEBUG
535 if (lockf_debug & DEBUG_CLEARLOCK)
536 lf_print("lf_getlock", lock);
537 #endif
538
539 if ((block = lf_getblock(lock)) != NULL) {
540 fl->l_type = block->lf_type;
541 fl->l_whence = SEEK_SET;
542 fl->l_start = block->lf_start;
543 if (block->lf_end == -1)
544 fl->l_len = 0;
545 else
546 fl->l_len = block->lf_end - block->lf_start + 1;
547 if (block->lf_flags & F_POSIX)
548 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
549 else
550 fl->l_pid = -1;
551 } else {
552 fl->l_type = F_UNLCK;
553 }
554 return (0);
555 }
556
557
558
559
560
561 struct lockf *
562 lf_getblock(struct lockf *lock)
563 {
564 struct lockf **prev, *overlap, *lf;
565
566 prev = lock->lf_head;
567 lf = *prev;
568 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
569
570
571
572 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
573 return (overlap);
574
575
576
577
578 lf = overlap->lf_next;
579 }
580 return (NULL);
581 }
582
583
584
585
586
587
588
589
590 int
591 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
592 struct lockf ***prev, struct lockf **overlap)
593 {
594 off_t start, end;
595
596 #ifdef LOCKF_DEBUG
597 if (lf && lockf_debug & DEBUG_FINDOVR)
598 lf_print("lf_findoverlap: looking for overlap in", lock);
599 #endif
600
601 *overlap = lf;
602 start = lock->lf_start;
603 end = lock->lf_end;
604 while (lf != NULL) {
605 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
606 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
607 *prev = &lf->lf_next;
608 *overlap = lf = lf->lf_next;
609 continue;
610 }
611 #ifdef LOCKF_DEBUG
612 if (lockf_debug & DEBUG_FINDOVR)
613 lf_print("\tchecking", lf);
614 #endif
615
616
617
618
619
620
621
622
623
624
625
626
627
628 if ((lf->lf_end != -1 && start > lf->lf_end) ||
629 (end != -1 && lf->lf_start > end)) {
630 DPRINTF(("no overlap\n"), DEBUG_FINDOVR);
631 if ((type & SELF) && end != -1 && lf->lf_start > end)
632 return (0);
633 *prev = &lf->lf_next;
634 *overlap = lf = lf->lf_next;
635 continue;
636 }
637
638 if ((lf->lf_start == start) && (lf->lf_end == end)) {
639 DPRINTF(("overlap == lock\n"), DEBUG_FINDOVR);
640 return (1);
641 }
642
643 if ((lf->lf_start <= start) &&
644 (lf->lf_end == -1 ||
645 (end != -1 && lf->lf_end >= end))) {
646 DPRINTF(("overlap contains lock\n"), DEBUG_FINDOVR);
647 return (2);
648 }
649
650 if (start <= lf->lf_start &&
651 (end == -1 ||
652 (lf->lf_end != -1 && end >= lf->lf_end))) {
653 DPRINTF(("lock contains overlap\n"), DEBUG_FINDOVR);
654 return (3);
655 }
656
657 if ((lf->lf_start < start) &&
658 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
659 DPRINTF(("overlap starts before lock\n"),
660 DEBUG_FINDOVR);
661 return (4);
662 }
663
664 if ((lf->lf_start > start) &&
665 (end != -1) &&
666 ((lf->lf_end > end) || (lf->lf_end == -1))) {
667 DPRINTF(("overlap ends after lock\n"), DEBUG_FINDOVR);
668 return (5);
669 }
670 panic("lf_findoverlap: default");
671 }
672 return (0);
673 }
674
675
676
677
678
679 void
680 lf_split(struct lockf *lock1, struct lockf *lock2)
681 {
682 struct lockf *splitlock;
683
684 #ifdef LOCKF_DEBUG
685 if (lockf_debug & DEBUG_SPLIT) {
686 lf_print("lf_split", lock1);
687 lf_print("splitting from", lock2);
688 }
689 #endif
690
691
692
693 if (lock1->lf_start == lock2->lf_start) {
694 lock1->lf_start = lock2->lf_end + 1;
695 lock2->lf_next = lock1;
696 return;
697 }
698 if (lock1->lf_end == lock2->lf_end) {
699 lock1->lf_end = lock2->lf_start - 1;
700 lock2->lf_next = lock1->lf_next;
701 lock1->lf_next = lock2;
702 return;
703 }
704
705
706
707
708 splitlock = lf_alloc(lock1->lf_uid, 0);
709 memcpy(splitlock, lock1, sizeof(*splitlock));
710 splitlock->lf_start = lock2->lf_end + 1;
711 splitlock->lf_block.tqe_next = NULL;
712 TAILQ_INIT(&splitlock->lf_blkhd);
713 lock1->lf_end = lock2->lf_start - 1;
714
715
716
717 lock2->lf_next = splitlock;
718 lock1->lf_next = lock2;
719 }
720
721
722
723
724 void
725 lf_wakelock(struct lockf *lock)
726 {
727 struct lockf *wakelock;
728
729 while ((wakelock = TAILQ_FIRST(&lock->lf_blkhd))) {
730 TAILQ_REMOVE(&lock->lf_blkhd, wakelock, lf_block);
731 wakelock->lf_next = NULL;
732 wakeup_one(wakelock);
733 }
734 }
735
736 #ifdef LOCKF_DEBUG
737
738
739
740 void
741 lf_print(char *tag, struct lockf *lock)
742 {
743 struct lockf *block;
744
745 printf("%s: lock %p for ", tag, lock);
746 if (lock->lf_flags & F_POSIX)
747 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
748 else
749 printf("id %p", lock->lf_id);
750 printf(" %s, start %llx, end %llx",
751 lock->lf_type == F_RDLCK ? "shared" :
752 lock->lf_type == F_WRLCK ? "exclusive" :
753 lock->lf_type == F_UNLCK ? "unlock" :
754 "unknown", lock->lf_start, lock->lf_end);
755 block = TAILQ_FIRST(&lock->lf_blkhd);
756 if (block)
757 printf(" block");
758 TAILQ_FOREACH(block, &lock->lf_blkhd, lf_block)
759 printf(" %p,", block);
760 printf("\n");
761
762 }
763
764 void
765 lf_printlist(char *tag, struct lockf *lock)
766 {
767 struct lockf *lf;
768
769 printf("%s: Lock list:\n", tag);
770 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
771 printf("\tlock %p for ", lf);
772 if (lf->lf_flags & F_POSIX)
773 printf("proc %d", ((struct proc*)(lf->lf_id))->p_pid);
774 else
775 printf("id %p", lf->lf_id);
776 printf(" %s, start %llx, end %llx",
777 lf->lf_type == F_RDLCK ? "shared" :
778 lf->lf_type == F_WRLCK ? "exclusive" :
779 lf->lf_type == F_UNLCK ? "unlock" :
780 "unknown", lf->lf_start, lf->lf_end);
781 printf("\n");
782 }
783 }
784 #endif