This source file includes following definitions.
- generic_softclock
- initclocks
- virttimer_trampoline
- proftimer_trampoline
- hardclock
- hzto
- tvtohz
- startprofclock
- stopprofclock
- statclock
- sysctl_clockrate
- getmicrotime
- nanotime
- getnanotime
- nanouptime
- getnanouptime
- microuptime
- getmicrouptime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/dkstat.h>
43 #include <sys/timeout.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/proc.h>
47 #include <sys/user.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <uvm/uvm_extern.h>
51 #include <sys/sysctl.h>
52 #include <sys/sched.h>
53 #ifdef __HAVE_TIMECOUNTER
54 #include <sys/timetc.h>
55 #endif
56
57 #include <machine/cpu.h>
58
59 #ifdef GPROF
60 #include <sys/gmon.h>
61 #endif
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 #define BUMPTIME(t, usec) { \
91 volatile struct timeval *tp = (t); \
92 long us; \
93 \
94 tp->tv_usec = us = tp->tv_usec + (usec); \
95 if (us >= 1000000) { \
96 tp->tv_usec = us - 1000000; \
97 tp->tv_sec++; \
98 } \
99 }
100
101 int stathz;
102 int schedhz;
103 int profhz;
104 int profprocs;
105 int ticks;
106 static int psdiv, pscnt;
107 int psratio;
108
109 long cp_time[CPUSTATES];
110
111 #ifndef __HAVE_TIMECOUNTER
112 int tickfix, tickfixinterval;
113 static int tickfixcnt;
114
115 volatile time_t time_second;
116 volatile time_t time_uptime;
117
118 volatile struct timeval time
119 __attribute__((__aligned__(__alignof__(quad_t))));
120 volatile struct timeval mono_time;
121 #endif
122
123 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
124 void *softclock_si;
125 void generic_softclock(void *);
126
127 void
128 generic_softclock(void *ignore)
129 {
130
131
132
133
134 softclock();
135 }
136 #endif
137
138
139
140
141 void
142 initclocks(void)
143 {
144 int i;
145 #ifdef __HAVE_TIMECOUNTER
146 extern void inittimecounter(void);
147 #endif
148
149 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
150 softclock_si = softintr_establish(IPL_SOFTCLOCK, generic_softclock, NULL);
151 if (softclock_si == NULL)
152 panic("initclocks: unable to register softclock intr");
153 #endif
154
155
156
157
158
159 psdiv = pscnt = 1;
160 cpu_initclocks();
161
162
163
164
165 i = stathz ? stathz : hz;
166 if (profhz == 0)
167 profhz = i;
168 psratio = profhz / i;
169 #ifdef __HAVE_TIMECOUNTER
170 inittimecounter();
171 #endif
172 }
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199 void
200 virttimer_trampoline(void *v)
201 {
202 struct proc *p = v;
203
204 psignal(p, SIGVTALRM);
205 }
206
207 void
208 proftimer_trampoline(void *v)
209 {
210 struct proc *p = v;
211
212 psignal(p, SIGPROF);
213 }
214
215
216
217
218 void
219 hardclock(struct clockframe *frame)
220 {
221 struct proc *p;
222 #ifndef __HAVE_TIMECOUNTER
223 int delta;
224 extern int tickdelta;
225 extern long timedelta;
226 extern int64_t ntp_tick_permanent;
227 extern int64_t ntp_tick_acc;
228 #endif
229 struct cpu_info *ci = curcpu();
230
231 p = curproc;
232 if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
233 struct pstats *pstats;
234
235
236
237
238 pstats = p->p_stats;
239 if (CLKF_USERMODE(frame) &&
240 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
241 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
242 timeout_add(&pstats->p_virt_to, 1);
243 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
244 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
245 timeout_add(&pstats->p_prof_to, 1);
246 }
247
248
249
250
251 if (stathz == 0)
252 statclock(frame);
253
254 if (--ci->ci_schedstate.spc_rrticks <= 0)
255 roundrobin(ci);
256
257
258
259
260
261 if (CPU_IS_PRIMARY(ci) == 0)
262 return;
263
264 #ifndef __HAVE_TIMECOUNTER
265
266
267
268
269
270
271
272
273
274 delta = tick;
275
276 if (tickfix) {
277 tickfixcnt += tickfix;
278 if (tickfixcnt >= tickfixinterval) {
279 delta++;
280 tickfixcnt -= tickfixinterval;
281 }
282 }
283
284 if (timedelta != 0) {
285 delta += tickdelta;
286 timedelta -= tickdelta;
287 }
288
289
290
291
292
293
294
295
296 if (ntp_tick_permanent != 0) {
297 ntp_tick_acc += ntp_tick_permanent;
298 while (ntp_tick_acc >= (1000LL << 32)) {
299 delta++;
300 ntp_tick_acc -= (1000LL << 32);
301 }
302 while (ntp_tick_acc <= -(1000LL << 32)) {
303 delta--;
304 ntp_tick_acc += (1000LL << 32);
305 }
306 }
307
308 BUMPTIME(&time, delta);
309 BUMPTIME(&mono_time, delta);
310 time_second = time.tv_sec;
311 time_uptime = mono_time.tv_sec;
312 #else
313 tc_ticktock();
314 #endif
315
316
317
318
319
320
321 if (timeout_hardclock_update()) {
322 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
323 softintr_schedule(softclock_si);
324 #else
325 setsoftclock();
326 #endif
327 }
328 }
329
330
331
332
333
334 int
335 hzto(struct timeval *tv)
336 {
337 struct timeval now;
338 unsigned long ticks;
339 long sec, usec;
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361 getmicrotime(&now);
362 sec = tv->tv_sec - now.tv_sec;
363 usec = tv->tv_usec - now.tv_usec;
364 if (usec < 0) {
365 sec--;
366 usec += 1000000;
367 }
368 if (sec < 0 || (sec == 0 && usec <= 0)) {
369 ticks = 0;
370 } else if (sec <= LONG_MAX / 1000000)
371 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
372 / tick + 1;
373 else if (sec <= LONG_MAX / hz)
374 ticks = sec * hz
375 + ((unsigned long)usec + (tick - 1)) / tick + 1;
376 else
377 ticks = LONG_MAX;
378 if (ticks > INT_MAX)
379 ticks = INT_MAX;
380 return ((int)ticks);
381 }
382
383
384
385
386 int
387 tvtohz(struct timeval *tv)
388 {
389 unsigned long ticks;
390 long sec, usec;
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412 sec = tv->tv_sec;
413 usec = tv->tv_usec;
414 if (sec < 0 || (sec == 0 && usec <= 0))
415 ticks = 0;
416 else if (sec <= LONG_MAX / 1000000)
417 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
418 / tick + 1;
419 else if (sec <= LONG_MAX / hz)
420 ticks = sec * hz
421 + ((unsigned long)usec + (tick - 1)) / tick + 1;
422 else
423 ticks = LONG_MAX;
424 if (ticks > INT_MAX)
425 ticks = INT_MAX;
426 return ((int)ticks);
427 }
428
429
430
431
432
433
434
435 void
436 startprofclock(struct proc *p)
437 {
438 int s;
439
440 if ((p->p_flag & P_PROFIL) == 0) {
441 atomic_setbits_int(&p->p_flag, P_PROFIL);
442 if (++profprocs == 1 && stathz != 0) {
443 s = splstatclock();
444 psdiv = pscnt = psratio;
445 setstatclockrate(profhz);
446 splx(s);
447 }
448 }
449 }
450
451
452
453
454 void
455 stopprofclock(struct proc *p)
456 {
457 int s;
458
459 if (p->p_flag & P_PROFIL) {
460 atomic_clearbits_int(&p->p_flag, P_PROFIL);
461 if (--profprocs == 0 && stathz != 0) {
462 s = splstatclock();
463 psdiv = pscnt = 1;
464 setstatclockrate(stathz);
465 splx(s);
466 }
467 }
468 }
469
470
471
472
473
474 void
475 statclock(struct clockframe *frame)
476 {
477 #ifdef GPROF
478 struct gmonparam *g;
479 int i;
480 #endif
481 struct cpu_info *ci = curcpu();
482 struct schedstate_percpu *spc = &ci->ci_schedstate;
483 struct proc *p = curproc;
484
485
486
487
488
489 if (spc->spc_psdiv != psdiv) {
490 spc->spc_psdiv = psdiv;
491 spc->spc_pscnt = psdiv;
492 if (psdiv == 1) {
493 setstatclockrate(stathz);
494 } else {
495 setstatclockrate(profhz);
496 }
497 }
498
499 if (CLKF_USERMODE(frame)) {
500 if (p->p_flag & P_PROFIL)
501 addupc_intr(p, CLKF_PC(frame));
502 if (--spc->spc_pscnt > 0)
503 return;
504
505
506
507
508 p->p_uticks++;
509 if (p->p_nice > NZERO)
510 spc->spc_cp_time[CP_NICE]++;
511 else
512 spc->spc_cp_time[CP_USER]++;
513 } else {
514 #ifdef GPROF
515
516
517
518 g = &_gmonparam;
519 if (g->state == GMON_PROF_ON) {
520 i = CLKF_PC(frame) - g->lowpc;
521 if (i < g->textsize) {
522 i /= HISTFRACTION * sizeof(*g->kcount);
523 g->kcount[i]++;
524 }
525 }
526 #endif
527 #if defined(PROC_PC)
528 if (p != NULL && p->p_flag & P_PROFIL)
529 addupc_intr(p, PROC_PC(p));
530 #endif
531 if (--spc->spc_pscnt > 0)
532 return;
533
534
535
536
537
538
539
540
541
542
543
544
545 if (CLKF_INTR(frame)) {
546 if (p != NULL)
547 p->p_iticks++;
548 spc->spc_cp_time[CP_INTR]++;
549 } else if (p != NULL) {
550 p->p_sticks++;
551 spc->spc_cp_time[CP_SYS]++;
552 } else
553 spc->spc_cp_time[CP_IDLE]++;
554 }
555 spc->spc_pscnt = psdiv;
556
557 if (p != NULL) {
558 p->p_cpticks++;
559
560
561
562
563 if (schedhz == 0) {
564 if ((++curcpu()->ci_schedstate.spc_schedticks & 3) ==
565 0)
566 schedclock(p);
567 }
568 }
569 }
570
571
572
573
574 int
575 sysctl_clockrate(char *where, size_t *sizep)
576 {
577 struct clockinfo clkinfo;
578
579
580
581
582 clkinfo.tick = tick;
583 clkinfo.tickadj = tickadj;
584 clkinfo.hz = hz;
585 clkinfo.profhz = profhz;
586 clkinfo.stathz = stathz ? stathz : hz;
587 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
588 }
589
590 #ifndef __HAVE_TIMECOUNTER
591
592
593
594
595
596 void
597 getmicrotime(struct timeval *tvp)
598 {
599 int s;
600
601 s = splhigh();
602 *tvp = time;
603 splx(s);
604 }
605
606 void
607 nanotime(struct timespec *tsp)
608 {
609 struct timeval tv;
610
611 microtime(&tv);
612 TIMEVAL_TO_TIMESPEC(&tv, tsp);
613 }
614
615 void
616 getnanotime(struct timespec *tsp)
617 {
618 struct timeval tv;
619
620 getmicrotime(&tv);
621 TIMEVAL_TO_TIMESPEC(&tv, tsp);
622 }
623
624 void
625 nanouptime(struct timespec *tsp)
626 {
627 struct timeval tv;
628
629 microuptime(&tv);
630 TIMEVAL_TO_TIMESPEC(&tv, tsp);
631 }
632
633
634 void
635 getnanouptime(struct timespec *tsp)
636 {
637 struct timeval tv;
638
639 getmicrouptime(&tv);
640 TIMEVAL_TO_TIMESPEC(&tv, tsp);
641 }
642
643 void
644 microuptime(struct timeval *tvp)
645 {
646 struct timeval tv;
647
648 microtime(&tv);
649 timersub(&tv, &boottime, tvp);
650 }
651
652 void
653 getmicrouptime(struct timeval *tvp)
654 {
655 int s;
656
657 s = splhigh();
658 *tvp = mono_time;
659 splx(s);
660 }
661 #endif