This source file includes following definitions.
- hfsc_pfattach
- hfsc_add_altq
- hfsc_remove_altq
- hfsc_add_queue
- hfsc_remove_queue
- hfsc_getqstats
- hfsc_clear_interface
- hfsc_request
- hfsc_purge
- hfsc_class_create
- hfsc_class_destroy
- hfsc_nextclass
- hfsc_enqueue
- hfsc_dequeue
- hfsc_addq
- hfsc_getq
- hfsc_pollq
- hfsc_purgeq
- set_active
- set_passive
- init_ed
- update_ed
- update_d
- init_vf
- update_vf
- update_cfmin
- ellist_alloc
- ellist_destroy
- ellist_insert
- ellist_remove
- ellist_update
- ellist_get_mindl
- actlist_alloc
- actlist_destroy
- actlist_insert
- actlist_remove
- actlist_update
- actlist_firstfit
- seg_x2y
- seg_y2x
- m2sm
- m2ism
- d2dx
- sm2m
- dx2d
- sc2isc
- rtsc_init
- rtsc_y2x
- rtsc_x2y
- rtsc_min
- get_class_stats
- clh_to_clp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 #include <sys/param.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/errno.h>
51 #include <sys/queue.h>
52
53 #include <net/if.h>
54 #include <netinet/in.h>
55
56 #include <net/pfvar.h>
57 #include <altq/altq.h>
58 #include <altq/altq_hfsc.h>
59
60
61
62
63 static int hfsc_clear_interface(struct hfsc_if *);
64 static int hfsc_request(struct ifaltq *, int, void *);
65 static void hfsc_purge(struct hfsc_if *);
66 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
67 struct service_curve *, struct service_curve *, struct service_curve *,
68 struct hfsc_class *, int, int, int);
69 static int hfsc_class_destroy(struct hfsc_class *);
70 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
71 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
72 struct altq_pktattr *);
73 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
74
75 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
76 static struct mbuf *hfsc_getq(struct hfsc_class *);
77 static struct mbuf *hfsc_pollq(struct hfsc_class *);
78 static void hfsc_purgeq(struct hfsc_class *);
79
80 static void update_cfmin(struct hfsc_class *);
81 static void set_active(struct hfsc_class *, int);
82 static void set_passive(struct hfsc_class *);
83
84 static void init_ed(struct hfsc_class *, int);
85 static void update_ed(struct hfsc_class *, int);
86 static void update_d(struct hfsc_class *, int);
87 static void init_vf(struct hfsc_class *, int);
88 static void update_vf(struct hfsc_class *, int, u_int64_t);
89 static ellist_t *ellist_alloc(void);
90 static void ellist_destroy(ellist_t *);
91 static void ellist_insert(struct hfsc_class *);
92 static void ellist_remove(struct hfsc_class *);
93 static void ellist_update(struct hfsc_class *);
94 struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
95 static actlist_t *actlist_alloc(void);
96 static void actlist_destroy(actlist_t *);
97 static void actlist_insert(struct hfsc_class *);
98 static void actlist_remove(struct hfsc_class *);
99 static void actlist_update(struct hfsc_class *);
100
101 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
102 u_int64_t);
103
104 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
105 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
106 static __inline u_int64_t m2sm(u_int);
107 static __inline u_int64_t m2ism(u_int);
108 static __inline u_int64_t d2dx(u_int);
109 static u_int sm2m(u_int64_t);
110 static u_int dx2d(u_int64_t);
111
112 static void sc2isc(struct service_curve *, struct internal_sc *);
113 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
114 u_int64_t, u_int64_t);
115 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
116 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
117 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
118 u_int64_t, u_int64_t);
119
120 static void get_class_stats(struct hfsc_classstats *,
121 struct hfsc_class *);
122 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
123
124
125
126
127 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
128
129 #define HT_INFINITY 0xffffffffffffffffLL
130
131 int
132 hfsc_pfattach(struct pf_altq *a)
133 {
134 struct ifnet *ifp;
135 int s, error;
136
137 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
138 return (EINVAL);
139 s = splnet();
140 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
141 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
142 splx(s);
143 return (error);
144 }
145
146 int
147 hfsc_add_altq(struct pf_altq *a)
148 {
149 struct hfsc_if *hif;
150 struct ifnet *ifp;
151
152 if ((ifp = ifunit(a->ifname)) == NULL)
153 return (EINVAL);
154 if (!ALTQ_IS_READY(&ifp->if_snd))
155 return (ENODEV);
156
157 MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
158 M_DEVBUF, M_WAITOK);
159 if (hif == NULL)
160 return (ENOMEM);
161 bzero(hif, sizeof(struct hfsc_if));
162
163 hif->hif_eligible = ellist_alloc();
164 if (hif->hif_eligible == NULL) {
165 FREE(hif, M_DEVBUF);
166 return (ENOMEM);
167 }
168
169 hif->hif_ifq = &ifp->if_snd;
170
171
172 a->altq_disc = hif;
173
174 return (0);
175 }
176
177 int
178 hfsc_remove_altq(struct pf_altq *a)
179 {
180 struct hfsc_if *hif;
181
182 if ((hif = a->altq_disc) == NULL)
183 return (EINVAL);
184 a->altq_disc = NULL;
185
186 (void)hfsc_clear_interface(hif);
187 (void)hfsc_class_destroy(hif->hif_rootclass);
188
189 ellist_destroy(hif->hif_eligible);
190
191 FREE(hif, M_DEVBUF);
192
193 return (0);
194 }
195
196 int
197 hfsc_add_queue(struct pf_altq *a)
198 {
199 struct hfsc_if *hif;
200 struct hfsc_class *cl, *parent;
201 struct hfsc_opts *opts;
202 struct service_curve rtsc, lssc, ulsc;
203
204 if ((hif = a->altq_disc) == NULL)
205 return (EINVAL);
206
207 opts = &a->pq_u.hfsc_opts;
208
209 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
210 hif->hif_rootclass == NULL)
211 parent = NULL;
212 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
213 return (EINVAL);
214
215 if (a->qid == 0)
216 return (EINVAL);
217
218 if (clh_to_clp(hif, a->qid) != NULL)
219 return (EBUSY);
220
221 rtsc.m1 = opts->rtsc_m1;
222 rtsc.d = opts->rtsc_d;
223 rtsc.m2 = opts->rtsc_m2;
224 lssc.m1 = opts->lssc_m1;
225 lssc.d = opts->lssc_d;
226 lssc.m2 = opts->lssc_m2;
227 ulsc.m1 = opts->ulsc_m1;
228 ulsc.d = opts->ulsc_d;
229 ulsc.m2 = opts->ulsc_m2;
230
231 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
232 parent, a->qlimit, opts->flags, a->qid);
233 if (cl == NULL)
234 return (ENOMEM);
235
236 return (0);
237 }
238
239 int
240 hfsc_remove_queue(struct pf_altq *a)
241 {
242 struct hfsc_if *hif;
243 struct hfsc_class *cl;
244
245 if ((hif = a->altq_disc) == NULL)
246 return (EINVAL);
247
248 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
249 return (EINVAL);
250
251 return (hfsc_class_destroy(cl));
252 }
253
254 int
255 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
256 {
257 struct hfsc_if *hif;
258 struct hfsc_class *cl;
259 struct hfsc_classstats stats;
260 int error = 0;
261
262 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
263 return (EBADF);
264
265 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
266 return (EINVAL);
267
268 if (*nbytes < sizeof(stats))
269 return (EINVAL);
270
271 get_class_stats(&stats, cl);
272
273 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
274 return (error);
275 *nbytes = sizeof(stats);
276 return (0);
277 }
278
279
280
281
282
283 static int
284 hfsc_clear_interface(struct hfsc_if *hif)
285 {
286 struct hfsc_class *cl;
287
288
289 while (hif->hif_rootclass != NULL &&
290 (cl = hif->hif_rootclass->cl_children) != NULL) {
291
292
293
294
295 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
296 if (!is_a_parent_class(cl)) {
297 (void)hfsc_class_destroy(cl);
298 break;
299 }
300 }
301 }
302
303 return (0);
304 }
305
306 static int
307 hfsc_request(struct ifaltq *ifq, int req, void *arg)
308 {
309 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
310
311 switch (req) {
312 case ALTRQ_PURGE:
313 hfsc_purge(hif);
314 break;
315 }
316 return (0);
317 }
318
319
320 static void
321 hfsc_purge(struct hfsc_if *hif)
322 {
323 struct hfsc_class *cl;
324
325 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
326 if (!qempty(cl->cl_q))
327 hfsc_purgeq(cl);
328 if (ALTQ_IS_ENABLED(hif->hif_ifq))
329 hif->hif_ifq->ifq_len = 0;
330 }
331
332 struct hfsc_class *
333 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
334 struct service_curve *fsc, struct service_curve *usc,
335 struct hfsc_class *parent, int qlimit, int flags, int qid)
336 {
337 struct hfsc_class *cl, *p;
338 int i, s;
339
340 if (hif->hif_classes >= HFSC_MAX_CLASSES)
341 return (NULL);
342
343 #ifndef ALTQ_RED
344 if (flags & HFCF_RED) {
345 #ifdef ALTQ_DEBUG
346 printf("hfsc_class_create: RED not configured for HFSC!\n");
347 #endif
348 return (NULL);
349 }
350 #endif
351
352 MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
353 M_DEVBUF, M_WAITOK);
354 if (cl == NULL)
355 return (NULL);
356 bzero(cl, sizeof(struct hfsc_class));
357
358 MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
359 M_DEVBUF, M_WAITOK);
360 if (cl->cl_q == NULL)
361 goto err_ret;
362 bzero(cl->cl_q, sizeof(class_queue_t));
363
364 cl->cl_actc = actlist_alloc();
365 if (cl->cl_actc == NULL)
366 goto err_ret;
367
368 if (qlimit == 0)
369 qlimit = 50;
370 qlimit(cl->cl_q) = qlimit;
371 qtype(cl->cl_q) = Q_DROPTAIL;
372 qlen(cl->cl_q) = 0;
373 cl->cl_flags = flags;
374 #ifdef ALTQ_RED
375 if (flags & (HFCF_RED|HFCF_RIO)) {
376 int red_flags, red_pkttime;
377 u_int m2;
378
379 m2 = 0;
380 if (rsc != NULL && rsc->m2 > m2)
381 m2 = rsc->m2;
382 if (fsc != NULL && fsc->m2 > m2)
383 m2 = fsc->m2;
384 if (usc != NULL && usc->m2 > m2)
385 m2 = usc->m2;
386
387 red_flags = 0;
388 if (flags & HFCF_ECN)
389 red_flags |= REDF_ECN;
390 #ifdef ALTQ_RIO
391 if (flags & HFCF_CLEARDSCP)
392 red_flags |= RIOF_CLEARDSCP;
393 #endif
394 if (m2 < 8)
395 red_pkttime = 1000 * 1000 * 1000;
396 else
397 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
398 * 1000 * 1000 * 1000 / (m2 / 8);
399 if (flags & HFCF_RED) {
400 cl->cl_red = red_alloc(0, 0,
401 qlimit(cl->cl_q) * 10/100,
402 qlimit(cl->cl_q) * 30/100,
403 red_flags, red_pkttime);
404 if (cl->cl_red != NULL)
405 qtype(cl->cl_q) = Q_RED;
406 }
407 #ifdef ALTQ_RIO
408 else {
409 cl->cl_red = (red_t *)rio_alloc(0, NULL,
410 red_flags, red_pkttime);
411 if (cl->cl_red != NULL)
412 qtype(cl->cl_q) = Q_RIO;
413 }
414 #endif
415 }
416 #endif
417
418 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
419 MALLOC(cl->cl_rsc, struct internal_sc *,
420 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
421 if (cl->cl_rsc == NULL)
422 goto err_ret;
423 sc2isc(rsc, cl->cl_rsc);
424 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
425 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
426 }
427 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
428 MALLOC(cl->cl_fsc, struct internal_sc *,
429 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
430 if (cl->cl_fsc == NULL)
431 goto err_ret;
432 sc2isc(fsc, cl->cl_fsc);
433 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
434 }
435 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
436 MALLOC(cl->cl_usc, struct internal_sc *,
437 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
438 if (cl->cl_usc == NULL)
439 goto err_ret;
440 sc2isc(usc, cl->cl_usc);
441 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
442 }
443
444 cl->cl_id = hif->hif_classid++;
445 cl->cl_handle = qid;
446 cl->cl_hif = hif;
447 cl->cl_parent = parent;
448
449 s = splnet();
450 hif->hif_classes++;
451
452
453
454
455
456
457 i = qid % HFSC_MAX_CLASSES;
458 if (hif->hif_class_tbl[i] == NULL)
459 hif->hif_class_tbl[i] = cl;
460 else {
461 for (i = 0; i < HFSC_MAX_CLASSES; i++)
462 if (hif->hif_class_tbl[i] == NULL) {
463 hif->hif_class_tbl[i] = cl;
464 break;
465 }
466 if (i == HFSC_MAX_CLASSES) {
467 splx(s);
468 goto err_ret;
469 }
470 }
471
472 if (flags & HFCF_DEFAULTCLASS)
473 hif->hif_defaultclass = cl;
474
475 if (parent == NULL) {
476
477 hif->hif_rootclass = cl;
478 } else {
479
480 if ((p = parent->cl_children) == NULL)
481 parent->cl_children = cl;
482 else {
483 while (p->cl_siblings != NULL)
484 p = p->cl_siblings;
485 p->cl_siblings = cl;
486 }
487 }
488 splx(s);
489
490 return (cl);
491
492 err_ret:
493 if (cl->cl_actc != NULL)
494 actlist_destroy(cl->cl_actc);
495 if (cl->cl_red != NULL) {
496 #ifdef ALTQ_RIO
497 if (q_is_rio(cl->cl_q))
498 rio_destroy((rio_t *)cl->cl_red);
499 #endif
500 #ifdef ALTQ_RED
501 if (q_is_red(cl->cl_q))
502 red_destroy(cl->cl_red);
503 #endif
504 }
505 if (cl->cl_fsc != NULL)
506 FREE(cl->cl_fsc, M_DEVBUF);
507 if (cl->cl_rsc != NULL)
508 FREE(cl->cl_rsc, M_DEVBUF);
509 if (cl->cl_usc != NULL)
510 FREE(cl->cl_usc, M_DEVBUF);
511 if (cl->cl_q != NULL)
512 FREE(cl->cl_q, M_DEVBUF);
513 FREE(cl, M_DEVBUF);
514 return (NULL);
515 }
516
517 static int
518 hfsc_class_destroy(struct hfsc_class *cl)
519 {
520 int i, s;
521
522 if (cl == NULL)
523 return (0);
524
525 if (is_a_parent_class(cl))
526 return (EBUSY);
527
528 s = splnet();
529
530 if (!qempty(cl->cl_q))
531 hfsc_purgeq(cl);
532
533 if (cl->cl_parent == NULL) {
534
535 } else {
536 struct hfsc_class *p = cl->cl_parent->cl_children;
537
538 if (p == cl)
539 cl->cl_parent->cl_children = cl->cl_siblings;
540 else do {
541 if (p->cl_siblings == cl) {
542 p->cl_siblings = cl->cl_siblings;
543 break;
544 }
545 } while ((p = p->cl_siblings) != NULL);
546 ASSERT(p != NULL);
547 }
548
549 for (i = 0; i < HFSC_MAX_CLASSES; i++)
550 if (cl->cl_hif->hif_class_tbl[i] == cl) {
551 cl->cl_hif->hif_class_tbl[i] = NULL;
552 break;
553 }
554
555 cl->cl_hif->hif_classes--;
556 splx(s);
557
558 actlist_destroy(cl->cl_actc);
559
560 if (cl->cl_red != NULL) {
561 #ifdef ALTQ_RIO
562 if (q_is_rio(cl->cl_q))
563 rio_destroy((rio_t *)cl->cl_red);
564 #endif
565 #ifdef ALTQ_RED
566 if (q_is_red(cl->cl_q))
567 red_destroy(cl->cl_red);
568 #endif
569 }
570
571 if (cl == cl->cl_hif->hif_rootclass)
572 cl->cl_hif->hif_rootclass = NULL;
573 if (cl == cl->cl_hif->hif_defaultclass)
574 cl->cl_hif->hif_defaultclass = NULL;
575
576 if (cl->cl_usc != NULL)
577 FREE(cl->cl_usc, M_DEVBUF);
578 if (cl->cl_fsc != NULL)
579 FREE(cl->cl_fsc, M_DEVBUF);
580 if (cl->cl_rsc != NULL)
581 FREE(cl->cl_rsc, M_DEVBUF);
582 FREE(cl->cl_q, M_DEVBUF);
583 FREE(cl, M_DEVBUF);
584
585 return (0);
586 }
587
588
589
590
591
592
593
594 static struct hfsc_class *
595 hfsc_nextclass(struct hfsc_class *cl)
596 {
597 if (cl->cl_children != NULL)
598 cl = cl->cl_children;
599 else if (cl->cl_siblings != NULL)
600 cl = cl->cl_siblings;
601 else {
602 while ((cl = cl->cl_parent) != NULL)
603 if (cl->cl_siblings) {
604 cl = cl->cl_siblings;
605 break;
606 }
607 }
608
609 return (cl);
610 }
611
612
613
614
615
616 static int
617 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
618 {
619 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
620 struct hfsc_class *cl;
621 int len;
622
623
624 if ((m->m_flags & M_PKTHDR) == 0) {
625
626 printf("altq: packet for %s does not have pkthdr\n",
627 ifq->altq_ifp->if_xname);
628 m_freem(m);
629 return (ENOBUFS);
630 }
631 if ((cl = clh_to_clp(hif, m->m_pkthdr.pf.qid)) == NULL ||
632 is_a_parent_class(cl)) {
633 cl = hif->hif_defaultclass;
634 if (cl == NULL) {
635 m_freem(m);
636 return (ENOBUFS);
637 }
638 cl->cl_pktattr = NULL;
639 }
640
641 len = m_pktlen(m);
642 if (hfsc_addq(cl, m) != 0) {
643
644 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
645 return (ENOBUFS);
646 }
647 IFQ_INC_LEN(ifq);
648 cl->cl_hif->hif_packets++;
649
650
651 if (qlen(cl->cl_q) == 1)
652 set_active(cl, m_pktlen(m));
653
654 return (0);
655 }
656
657
658
659
660
661
662
663
664
665
666 static struct mbuf *
667 hfsc_dequeue(struct ifaltq *ifq, int op)
668 {
669 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
670 struct hfsc_class *cl;
671 struct mbuf *m;
672 int len, next_len;
673 int realtime = 0;
674 u_int64_t cur_time;
675
676 if (hif->hif_packets == 0)
677
678 return (NULL);
679
680 cur_time = read_machclk();
681
682 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
683
684 cl = hif->hif_pollcache;
685 hif->hif_pollcache = NULL;
686
687 if (cl->cl_rsc != NULL)
688 realtime = (cl->cl_e <= cur_time);
689 } else {
690
691
692
693
694
695 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
696 != NULL) {
697 realtime = 1;
698 } else {
699 #ifdef ALTQ_DEBUG
700 int fits = 0;
701 #endif
702
703
704
705
706 cl = hif->hif_rootclass;
707 while (is_a_parent_class(cl)) {
708
709 cl = actlist_firstfit(cl, cur_time);
710 if (cl == NULL) {
711 #ifdef ALTQ_DEBUG
712 if (fits > 0)
713 printf("%d fit but none found\n",fits);
714 #endif
715 return (NULL);
716 }
717
718
719
720
721 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
722 cl->cl_parent->cl_cvtmin = cl->cl_vt;
723 #ifdef ALTQ_DEBUG
724 fits++;
725 #endif
726 }
727 }
728
729 if (op == ALTDQ_POLL) {
730 hif->hif_pollcache = cl;
731 m = hfsc_pollq(cl);
732 return (m);
733 }
734 }
735
736 m = hfsc_getq(cl);
737 if (m == NULL)
738 panic("hfsc_dequeue:");
739 len = m_pktlen(m);
740 cl->cl_hif->hif_packets--;
741 IFQ_DEC_LEN(ifq);
742 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
743
744 update_vf(cl, len, cur_time);
745 if (realtime)
746 cl->cl_cumul += len;
747
748 if (!qempty(cl->cl_q)) {
749 if (cl->cl_rsc != NULL) {
750
751 next_len = m_pktlen(qhead(cl->cl_q));
752
753 if (realtime)
754 update_ed(cl, next_len);
755 else
756 update_d(cl, next_len);
757 }
758 } else {
759
760 set_passive(cl);
761 }
762
763 return (m);
764 }
765
766 static int
767 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
768 {
769
770 #ifdef ALTQ_RIO
771 if (q_is_rio(cl->cl_q))
772 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
773 m, cl->cl_pktattr);
774 #endif
775 #ifdef ALTQ_RED
776 if (q_is_red(cl->cl_q))
777 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
778 #endif
779 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
780 m_freem(m);
781 return (-1);
782 }
783
784 if (cl->cl_flags & HFCF_CLEARDSCP)
785 write_dsfield(m, cl->cl_pktattr, 0);
786
787 _addq(cl->cl_q, m);
788
789 return (0);
790 }
791
792 static struct mbuf *
793 hfsc_getq(struct hfsc_class *cl)
794 {
795 #ifdef ALTQ_RIO
796 if (q_is_rio(cl->cl_q))
797 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
798 #endif
799 #ifdef ALTQ_RED
800 if (q_is_red(cl->cl_q))
801 return red_getq(cl->cl_red, cl->cl_q);
802 #endif
803 return _getq(cl->cl_q);
804 }
805
806 static struct mbuf *
807 hfsc_pollq(struct hfsc_class *cl)
808 {
809 return qhead(cl->cl_q);
810 }
811
812 static void
813 hfsc_purgeq(struct hfsc_class *cl)
814 {
815 struct mbuf *m;
816
817 if (qempty(cl->cl_q))
818 return;
819
820 while ((m = _getq(cl->cl_q)) != NULL) {
821 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
822 m_freem(m);
823 cl->cl_hif->hif_packets--;
824 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
825 }
826 ASSERT(qlen(cl->cl_q) == 0);
827
828 update_vf(cl, 0, 0);
829 set_passive(cl);
830 }
831
832 static void
833 set_active(struct hfsc_class *cl, int len)
834 {
835 if (cl->cl_rsc != NULL)
836 init_ed(cl, len);
837 if (cl->cl_fsc != NULL)
838 init_vf(cl, len);
839
840 cl->cl_stats.period++;
841 }
842
843 static void
844 set_passive(struct hfsc_class *cl)
845 {
846 if (cl->cl_rsc != NULL)
847 ellist_remove(cl);
848
849
850
851
852
853 }
854
855 static void
856 init_ed(struct hfsc_class *cl, int next_len)
857 {
858 u_int64_t cur_time;
859
860 cur_time = read_machclk();
861
862
863 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
864
865
866
867
868
869
870 cl->cl_eligible = cl->cl_deadline;
871 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
872 cl->cl_eligible.dx = 0;
873 cl->cl_eligible.dy = 0;
874 }
875
876
877 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
878 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
879
880 ellist_insert(cl);
881 }
882
883 static void
884 update_ed(struct hfsc_class *cl, int next_len)
885 {
886 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
887 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
888
889 ellist_update(cl);
890 }
891
892 static void
893 update_d(struct hfsc_class *cl, int next_len)
894 {
895 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
896 }
897
898 static void
899 init_vf(struct hfsc_class *cl, int len)
900 {
901 struct hfsc_class *max_cl, *p;
902 u_int64_t vt, f, cur_time;
903 int go_active;
904
905 cur_time = 0;
906 go_active = 1;
907 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
908
909 if (go_active && cl->cl_nactive++ == 0)
910 go_active = 1;
911 else
912 go_active = 0;
913
914 if (go_active) {
915 max_cl = actlist_last(cl->cl_parent->cl_actc);
916 if (max_cl != NULL) {
917
918
919
920
921
922 vt = max_cl->cl_vt;
923 if (cl->cl_parent->cl_cvtmin != 0)
924 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
925
926 if (cl->cl_parent->cl_vtperiod !=
927 cl->cl_parentperiod || vt > cl->cl_vt)
928 cl->cl_vt = vt;
929 } else {
930
931
932
933
934
935
936 vt = cl->cl_parent->cl_cvtmax;
937 for (p = cl->cl_parent->cl_children; p != NULL;
938 p = p->cl_siblings)
939 p->cl_vtoff += vt;
940 cl->cl_vt = 0;
941 cl->cl_parent->cl_cvtmax = 0;
942 cl->cl_parent->cl_cvtmin = 0;
943 }
944 cl->cl_initvt = cl->cl_vt;
945
946
947 vt = cl->cl_vt + cl->cl_vtoff;
948 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
949 if (cl->cl_virtual.x == vt) {
950 cl->cl_virtual.x -= cl->cl_vtoff;
951 cl->cl_vtoff = 0;
952 }
953 cl->cl_vtadj = 0;
954
955 cl->cl_vtperiod++;
956 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
957 if (cl->cl_parent->cl_nactive == 0)
958 cl->cl_parentperiod++;
959 cl->cl_f = 0;
960
961 actlist_insert(cl);
962
963 if (cl->cl_usc != NULL) {
964
965 if (cur_time == 0)
966 cur_time = read_machclk();
967
968
969 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
970 cl->cl_total);
971
972 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
973 cl->cl_total);
974 cl->cl_myfadj = 0;
975 }
976 }
977
978 if (cl->cl_myf > cl->cl_cfmin)
979 f = cl->cl_myf;
980 else
981 f = cl->cl_cfmin;
982 if (f != cl->cl_f) {
983 cl->cl_f = f;
984 update_cfmin(cl->cl_parent);
985 }
986 }
987 }
988
989 static void
990 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
991 {
992 u_int64_t f, myf_bound, delta;
993 int go_passive;
994
995 go_passive = qempty(cl->cl_q);
996
997 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
998
999 cl->cl_total += len;
1000
1001 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1002 continue;
1003
1004 if (go_passive && --cl->cl_nactive == 0)
1005 go_passive = 1;
1006 else
1007 go_passive = 0;
1008
1009 if (go_passive) {
1010
1011
1012
1013 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1014 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1015
1016
1017 actlist_remove(cl);
1018
1019 update_cfmin(cl->cl_parent);
1020
1021 continue;
1022 }
1023
1024
1025
1026
1027 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1028 - cl->cl_vtoff + cl->cl_vtadj;
1029
1030
1031
1032
1033
1034
1035 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1036 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1037 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1038 }
1039
1040
1041 actlist_update(cl);
1042
1043 if (cl->cl_usc != NULL) {
1044 cl->cl_myf = cl->cl_myfadj
1045 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1046
1047
1048
1049
1050
1051
1052
1053
1054 myf_bound = cur_time - machclk_per_tick;
1055 if (cl->cl_myf < myf_bound) {
1056 delta = cur_time - cl->cl_myf;
1057 cl->cl_myfadj += delta;
1058 cl->cl_myf += delta;
1059 }
1060 }
1061
1062
1063 if (cl->cl_myf > cl->cl_cfmin)
1064 f = cl->cl_myf;
1065 else
1066 f = cl->cl_cfmin;
1067 if (f != cl->cl_f) {
1068 cl->cl_f = f;
1069 update_cfmin(cl->cl_parent);
1070 }
1071 }
1072 }
1073
1074 static void
1075 update_cfmin(struct hfsc_class *cl)
1076 {
1077 struct hfsc_class *p;
1078 u_int64_t cfmin;
1079
1080 if (TAILQ_EMPTY(cl->cl_actc)) {
1081 cl->cl_cfmin = 0;
1082 return;
1083 }
1084 cfmin = HT_INFINITY;
1085 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1086 if (p->cl_f == 0) {
1087 cl->cl_cfmin = 0;
1088 return;
1089 }
1090 if (p->cl_f < cfmin)
1091 cfmin = p->cl_f;
1092 }
1093 cl->cl_cfmin = cfmin;
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 static ellist_t *
1106 ellist_alloc(void)
1107 {
1108 ellist_t *head;
1109
1110 MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1111 TAILQ_INIT(head);
1112 return (head);
1113 }
1114
1115 static void
1116 ellist_destroy(ellist_t *head)
1117 {
1118 FREE(head, M_DEVBUF);
1119 }
1120
1121 static void
1122 ellist_insert(struct hfsc_class *cl)
1123 {
1124 struct hfsc_if *hif = cl->cl_hif;
1125 struct hfsc_class *p;
1126
1127
1128 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1129 p->cl_e <= cl->cl_e) {
1130 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1131 return;
1132 }
1133
1134 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1135 if (cl->cl_e < p->cl_e) {
1136 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1137 return;
1138 }
1139 }
1140 ASSERT(0);
1141 }
1142
1143 static void
1144 ellist_remove(struct hfsc_class *cl)
1145 {
1146 struct hfsc_if *hif = cl->cl_hif;
1147
1148 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1149 }
1150
1151 static void
1152 ellist_update(struct hfsc_class *cl)
1153 {
1154 struct hfsc_if *hif = cl->cl_hif;
1155 struct hfsc_class *p, *last;
1156
1157
1158
1159
1160
1161 p = TAILQ_NEXT(cl, cl_ellist);
1162 if (p == NULL || cl->cl_e <= p->cl_e)
1163 return;
1164
1165
1166 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1167 ASSERT(last != NULL);
1168 if (last->cl_e <= cl->cl_e) {
1169 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1170 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1171 return;
1172 }
1173
1174
1175
1176
1177
1178 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1179 if (cl->cl_e < p->cl_e) {
1180 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1181 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1182 return;
1183 }
1184 }
1185 ASSERT(0);
1186 }
1187
1188
1189 struct hfsc_class *
1190 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1191 {
1192 struct hfsc_class *p, *cl = NULL;
1193
1194 TAILQ_FOREACH(p, head, cl_ellist) {
1195 if (p->cl_e > cur_time)
1196 break;
1197 if (cl == NULL || p->cl_d < cl->cl_d)
1198 cl = p;
1199 }
1200 return (cl);
1201 }
1202
1203
1204
1205
1206
1207
1208 static actlist_t *
1209 actlist_alloc(void)
1210 {
1211 actlist_t *head;
1212
1213 MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1214 TAILQ_INIT(head);
1215 return (head);
1216 }
1217
1218 static void
1219 actlist_destroy(actlist_t *head)
1220 {
1221 FREE(head, M_DEVBUF);
1222 }
1223 static void
1224 actlist_insert(struct hfsc_class *cl)
1225 {
1226 struct hfsc_class *p;
1227
1228
1229 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1230 || p->cl_vt <= cl->cl_vt) {
1231 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1232 return;
1233 }
1234
1235 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1236 if (cl->cl_vt < p->cl_vt) {
1237 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1238 return;
1239 }
1240 }
1241 ASSERT(0);
1242 }
1243
1244 static void
1245 actlist_remove(struct hfsc_class *cl)
1246 {
1247 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1248 }
1249
1250 static void
1251 actlist_update(struct hfsc_class *cl)
1252 {
1253 struct hfsc_class *p, *last;
1254
1255
1256
1257
1258
1259
1260 p = TAILQ_NEXT(cl, cl_actlist);
1261 if (p == NULL || cl->cl_vt < p->cl_vt)
1262 return;
1263
1264
1265 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1266 ASSERT(last != NULL);
1267 if (last->cl_vt <= cl->cl_vt) {
1268 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1269 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1270 return;
1271 }
1272
1273
1274
1275
1276
1277 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1278 if (cl->cl_vt < p->cl_vt) {
1279 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1280 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1281 return;
1282 }
1283 }
1284 ASSERT(0);
1285 }
1286
1287 static struct hfsc_class *
1288 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1289 {
1290 struct hfsc_class *p;
1291
1292 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1293 if (p->cl_f <= cur_time)
1294 return (p);
1295 }
1296 return (NULL);
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 #define SM_SHIFT 24
1326 #define ISM_SHIFT 10
1327
1328 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1329 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1330
1331 static __inline u_int64_t
1332 seg_x2y(u_int64_t x, u_int64_t sm)
1333 {
1334 u_int64_t y;
1335
1336
1337
1338
1339
1340
1341 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1342 return (y);
1343 }
1344
1345 static __inline u_int64_t
1346 seg_y2x(u_int64_t y, u_int64_t ism)
1347 {
1348 u_int64_t x;
1349
1350 if (y == 0)
1351 x = 0;
1352 else if (ism == HT_INFINITY)
1353 x = HT_INFINITY;
1354 else {
1355 x = (y >> ISM_SHIFT) * ism
1356 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1357 }
1358 return (x);
1359 }
1360
1361 static __inline u_int64_t
1362 m2sm(u_int m)
1363 {
1364 u_int64_t sm;
1365
1366 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1367 return (sm);
1368 }
1369
1370 static __inline u_int64_t
1371 m2ism(u_int m)
1372 {
1373 u_int64_t ism;
1374
1375 if (m == 0)
1376 ism = HT_INFINITY;
1377 else
1378 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1379 return (ism);
1380 }
1381
1382 static __inline u_int64_t
1383 d2dx(u_int d)
1384 {
1385 u_int64_t dx;
1386
1387 dx = ((u_int64_t)d * machclk_freq) / 1000;
1388 return (dx);
1389 }
1390
1391 static u_int
1392 sm2m(u_int64_t sm)
1393 {
1394 u_int64_t m;
1395
1396 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1397 return ((u_int)m);
1398 }
1399
1400 static u_int
1401 dx2d(u_int64_t dx)
1402 {
1403 u_int64_t d;
1404
1405 d = dx * 1000 / machclk_freq;
1406 return ((u_int)d);
1407 }
1408
1409 static void
1410 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1411 {
1412 isc->sm1 = m2sm(sc->m1);
1413 isc->ism1 = m2ism(sc->m1);
1414 isc->dx = d2dx(sc->d);
1415 isc->dy = seg_x2y(isc->dx, isc->sm1);
1416 isc->sm2 = m2sm(sc->m2);
1417 isc->ism2 = m2ism(sc->m2);
1418 }
1419
1420
1421
1422
1423
1424 static void
1425 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1426 u_int64_t y)
1427 {
1428 rtsc->x = x;
1429 rtsc->y = y;
1430 rtsc->sm1 = isc->sm1;
1431 rtsc->ism1 = isc->ism1;
1432 rtsc->dx = isc->dx;
1433 rtsc->dy = isc->dy;
1434 rtsc->sm2 = isc->sm2;
1435 rtsc->ism2 = isc->ism2;
1436 }
1437
1438
1439
1440
1441
1442 static u_int64_t
1443 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1444 {
1445 u_int64_t x;
1446
1447 if (y < rtsc->y)
1448 x = rtsc->x;
1449 else if (y <= rtsc->y + rtsc->dy) {
1450
1451 if (rtsc->dy == 0)
1452 x = rtsc->x + rtsc->dx;
1453 else
1454 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1455 } else {
1456
1457 x = rtsc->x + rtsc->dx
1458 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1459 }
1460 return (x);
1461 }
1462
1463 static u_int64_t
1464 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1465 {
1466 u_int64_t y;
1467
1468 if (x <= rtsc->x)
1469 y = rtsc->y;
1470 else if (x <= rtsc->x + rtsc->dx)
1471
1472 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1473 else
1474
1475 y = rtsc->y + rtsc->dy
1476 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1477 return (y);
1478 }
1479
1480
1481
1482
1483
1484 static void
1485 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1486 u_int64_t y)
1487 {
1488 u_int64_t y1, y2, dx, dy;
1489
1490 if (isc->sm1 <= isc->sm2) {
1491
1492 y1 = rtsc_x2y(rtsc, x);
1493 if (y1 < y)
1494
1495 return;
1496 rtsc->x = x;
1497 rtsc->y = y;
1498 return;
1499 }
1500
1501
1502
1503
1504
1505
1506
1507 y1 = rtsc_x2y(rtsc, x);
1508 if (y1 <= y) {
1509
1510 return;
1511 }
1512
1513 y2 = rtsc_x2y(rtsc, x + isc->dx);
1514 if (y2 >= y + isc->dy) {
1515
1516 rtsc->x = x;
1517 rtsc->y = y;
1518 rtsc->dx = isc->dx;
1519 rtsc->dy = isc->dy;
1520 return;
1521 }
1522
1523
1524
1525
1526
1527
1528
1529 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1530
1531
1532
1533
1534 if (rtsc->x + rtsc->dx > x)
1535 dx += rtsc->x + rtsc->dx - x;
1536 dy = seg_x2y(dx, isc->sm1);
1537
1538 rtsc->x = x;
1539 rtsc->y = y;
1540 rtsc->dx = dx;
1541 rtsc->dy = dy;
1542 return;
1543 }
1544
1545 static void
1546 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1547 {
1548 sp->class_id = cl->cl_id;
1549 sp->class_handle = cl->cl_handle;
1550
1551 if (cl->cl_rsc != NULL) {
1552 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1553 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1554 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1555 } else {
1556 sp->rsc.m1 = 0;
1557 sp->rsc.d = 0;
1558 sp->rsc.m2 = 0;
1559 }
1560 if (cl->cl_fsc != NULL) {
1561 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1562 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1563 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1564 } else {
1565 sp->fsc.m1 = 0;
1566 sp->fsc.d = 0;
1567 sp->fsc.m2 = 0;
1568 }
1569 if (cl->cl_usc != NULL) {
1570 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1571 sp->usc.d = dx2d(cl->cl_usc->dx);
1572 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1573 } else {
1574 sp->usc.m1 = 0;
1575 sp->usc.d = 0;
1576 sp->usc.m2 = 0;
1577 }
1578
1579 sp->total = cl->cl_total;
1580 sp->cumul = cl->cl_cumul;
1581
1582 sp->d = cl->cl_d;
1583 sp->e = cl->cl_e;
1584 sp->vt = cl->cl_vt;
1585 sp->f = cl->cl_f;
1586
1587 sp->initvt = cl->cl_initvt;
1588 sp->vtperiod = cl->cl_vtperiod;
1589 sp->parentperiod = cl->cl_parentperiod;
1590 sp->nactive = cl->cl_nactive;
1591 sp->vtoff = cl->cl_vtoff;
1592 sp->cvtmax = cl->cl_cvtmax;
1593 sp->myf = cl->cl_myf;
1594 sp->cfmin = cl->cl_cfmin;
1595 sp->cvtmin = cl->cl_cvtmin;
1596 sp->myfadj = cl->cl_myfadj;
1597 sp->vtadj = cl->cl_vtadj;
1598
1599 sp->cur_time = read_machclk();
1600 sp->machclk_freq = machclk_freq;
1601
1602 sp->qlength = qlen(cl->cl_q);
1603 sp->qlimit = qlimit(cl->cl_q);
1604 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1605 sp->drop_cnt = cl->cl_stats.drop_cnt;
1606 sp->period = cl->cl_stats.period;
1607
1608 sp->qtype = qtype(cl->cl_q);
1609 #ifdef ALTQ_RED
1610 if (q_is_red(cl->cl_q))
1611 red_getstats(cl->cl_red, &sp->red[0]);
1612 #endif
1613 #ifdef ALTQ_RIO
1614 if (q_is_rio(cl->cl_q))
1615 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1616 #endif
1617 }
1618
1619
1620 static struct hfsc_class *
1621 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1622 {
1623 int i;
1624 struct hfsc_class *cl;
1625
1626 if (chandle == 0)
1627 return (NULL);
1628
1629
1630
1631
1632 i = chandle % HFSC_MAX_CLASSES;
1633 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1634 return (cl);
1635 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1636 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1637 cl->cl_handle == chandle)
1638 return (cl);
1639 return (NULL);
1640 }