root/altq/altq_hfsc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hfsc_pfattach
  2. hfsc_add_altq
  3. hfsc_remove_altq
  4. hfsc_add_queue
  5. hfsc_remove_queue
  6. hfsc_getqstats
  7. hfsc_clear_interface
  8. hfsc_request
  9. hfsc_purge
  10. hfsc_class_create
  11. hfsc_class_destroy
  12. hfsc_nextclass
  13. hfsc_enqueue
  14. hfsc_dequeue
  15. hfsc_addq
  16. hfsc_getq
  17. hfsc_pollq
  18. hfsc_purgeq
  19. set_active
  20. set_passive
  21. init_ed
  22. update_ed
  23. update_d
  24. init_vf
  25. update_vf
  26. update_cfmin
  27. ellist_alloc
  28. ellist_destroy
  29. ellist_insert
  30. ellist_remove
  31. ellist_update
  32. ellist_get_mindl
  33. actlist_alloc
  34. actlist_destroy
  35. actlist_insert
  36. actlist_remove
  37. actlist_update
  38. actlist_firstfit
  39. seg_x2y
  40. seg_y2x
  41. m2sm
  42. m2ism
  43. d2dx
  44. sm2m
  45. dx2d
  46. sc2isc
  47. rtsc_init
  48. rtsc_y2x
  49. rtsc_x2y
  50. rtsc_min
  51. get_class_stats
  52. clh_to_clp

    1 /*      $OpenBSD: altq_hfsc.c,v 1.24 2007/05/28 17:16:38 henning Exp $  */
    2 /*      $KAME: altq_hfsc.c,v 1.17 2002/11/29 07:48:33 kjc Exp $ */
    3 
    4 /*
    5  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
    6  *
    7  * Permission to use, copy, modify, and distribute this software and
    8  * its documentation is hereby granted (including for commercial or
    9  * for-profit use), provided that both the copyright notice and this
   10  * permission notice appear in all copies of the software, derivative
   11  * works, or modified versions, and any portions thereof.
   12  *
   13  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
   14  * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
   15  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
   16  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   18  * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
   21  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
   22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
   25  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  *
   28  * Carnegie Mellon encourages (but does not require) users of this
   29  * software to return any improvements or extensions that they make,
   30  * and to grant Carnegie Mellon the rights to redistribute these
   31  * changes without encumbrance.
   32  */
   33 /*
   34  * H-FSC is described in Proceedings of SIGCOMM'97,
   35  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
   36  * Real-Time and Priority Service"
   37  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
   38  *
   39  * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
   40  * when a class has an upperlimit, the fit-time is computed from the
   41  * upperlimit service curve.  the link-sharing scheduler does not schedule
   42  * a class whose fit-time exceeds the current time.
   43  */
   44 
   45 #include <sys/param.h>
   46 #include <sys/malloc.h>
   47 #include <sys/mbuf.h>
   48 #include <sys/socket.h>
   49 #include <sys/systm.h>
   50 #include <sys/errno.h>
   51 #include <sys/queue.h>
   52 
   53 #include <net/if.h>
   54 #include <netinet/in.h>
   55 
   56 #include <net/pfvar.h>
   57 #include <altq/altq.h>
   58 #include <altq/altq_hfsc.h>
   59 
   60 /*
   61  * function prototypes
   62  */
   63 static int                       hfsc_clear_interface(struct hfsc_if *);
   64 static int                       hfsc_request(struct ifaltq *, int, void *);
   65 static void                      hfsc_purge(struct hfsc_if *);
   66 static struct hfsc_class        *hfsc_class_create(struct hfsc_if *,
   67     struct service_curve *, struct service_curve *, struct service_curve *,
   68     struct hfsc_class *, int, int, int);
   69 static int                       hfsc_class_destroy(struct hfsc_class *);
   70 static struct hfsc_class        *hfsc_nextclass(struct hfsc_class *);
   71 static int                       hfsc_enqueue(struct ifaltq *, struct mbuf *,
   72                                     struct altq_pktattr *);
   73 static struct mbuf              *hfsc_dequeue(struct ifaltq *, int);
   74 
   75 static int               hfsc_addq(struct hfsc_class *, struct mbuf *);
   76 static struct mbuf      *hfsc_getq(struct hfsc_class *);
   77 static struct mbuf      *hfsc_pollq(struct hfsc_class *);
   78 static void              hfsc_purgeq(struct hfsc_class *);
   79 
   80 static void              update_cfmin(struct hfsc_class *);
   81 static void              set_active(struct hfsc_class *, int);
   82 static void              set_passive(struct hfsc_class *);
   83 
   84 static void              init_ed(struct hfsc_class *, int);
   85 static void              update_ed(struct hfsc_class *, int);
   86 static void              update_d(struct hfsc_class *, int);
   87 static void              init_vf(struct hfsc_class *, int);
   88 static void              update_vf(struct hfsc_class *, int, u_int64_t);
   89 static ellist_t         *ellist_alloc(void);
   90 static void              ellist_destroy(ellist_t *);
   91 static void              ellist_insert(struct hfsc_class *);
   92 static void              ellist_remove(struct hfsc_class *);
   93 static void              ellist_update(struct hfsc_class *);
   94 struct hfsc_class       *ellist_get_mindl(ellist_t *, u_int64_t);
   95 static actlist_t        *actlist_alloc(void);
   96 static void              actlist_destroy(actlist_t *);
   97 static void              actlist_insert(struct hfsc_class *);
   98 static void              actlist_remove(struct hfsc_class *);
   99 static void              actlist_update(struct hfsc_class *);
  100 
  101 static struct hfsc_class        *actlist_firstfit(struct hfsc_class *,
  102                                     u_int64_t);
  103 
  104 static __inline u_int64_t       seg_x2y(u_int64_t, u_int64_t);
  105 static __inline u_int64_t       seg_y2x(u_int64_t, u_int64_t);
  106 static __inline u_int64_t       m2sm(u_int);
  107 static __inline u_int64_t       m2ism(u_int);
  108 static __inline u_int64_t       d2dx(u_int);
  109 static u_int                    sm2m(u_int64_t);
  110 static u_int                    dx2d(u_int64_t);
  111 
  112 static void             sc2isc(struct service_curve *, struct internal_sc *);
  113 static void             rtsc_init(struct runtime_sc *, struct internal_sc *,
  114                             u_int64_t, u_int64_t);
  115 static u_int64_t        rtsc_y2x(struct runtime_sc *, u_int64_t);
  116 static u_int64_t        rtsc_x2y(struct runtime_sc *, u_int64_t);
  117 static void             rtsc_min(struct runtime_sc *, struct internal_sc *,
  118                             u_int64_t, u_int64_t);
  119 
  120 static void                      get_class_stats(struct hfsc_classstats *,
  121                                     struct hfsc_class *);
  122 static struct hfsc_class        *clh_to_clp(struct hfsc_if *, u_int32_t);
  123 
  124 /*
  125  * macros
  126  */
  127 #define is_a_parent_class(cl)   ((cl)->cl_children != NULL)
  128 
  129 #define HT_INFINITY     0xffffffffffffffffLL    /* infinite time value */
  130 
  131 int
  132 hfsc_pfattach(struct pf_altq *a)
  133 {
  134         struct ifnet *ifp;
  135         int s, error;
  136 
  137         if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
  138                 return (EINVAL);
  139         s = splnet();
  140         error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
  141             hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
  142         splx(s);
  143         return (error);
  144 }
  145 
  146 int
  147 hfsc_add_altq(struct pf_altq *a)
  148 {
  149         struct hfsc_if *hif;
  150         struct ifnet *ifp;
  151 
  152         if ((ifp = ifunit(a->ifname)) == NULL)
  153                 return (EINVAL);
  154         if (!ALTQ_IS_READY(&ifp->if_snd))
  155                 return (ENODEV);
  156 
  157         MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
  158             M_DEVBUF, M_WAITOK);
  159         if (hif == NULL)
  160                 return (ENOMEM);
  161         bzero(hif, sizeof(struct hfsc_if));
  162 
  163         hif->hif_eligible = ellist_alloc();
  164         if (hif->hif_eligible == NULL) {
  165                 FREE(hif, M_DEVBUF);
  166                 return (ENOMEM);
  167         }
  168 
  169         hif->hif_ifq = &ifp->if_snd;
  170 
  171         /* keep the state in pf_altq */
  172         a->altq_disc = hif;
  173 
  174         return (0);
  175 }
  176 
  177 int
  178 hfsc_remove_altq(struct pf_altq *a)
  179 {
  180         struct hfsc_if *hif;
  181 
  182         if ((hif = a->altq_disc) == NULL)
  183                 return (EINVAL);
  184         a->altq_disc = NULL;
  185 
  186         (void)hfsc_clear_interface(hif);
  187         (void)hfsc_class_destroy(hif->hif_rootclass);
  188 
  189         ellist_destroy(hif->hif_eligible);
  190 
  191         FREE(hif, M_DEVBUF);
  192 
  193         return (0);
  194 }
  195 
  196 int
  197 hfsc_add_queue(struct pf_altq *a)
  198 {
  199         struct hfsc_if *hif;
  200         struct hfsc_class *cl, *parent;
  201         struct hfsc_opts *opts;
  202         struct service_curve rtsc, lssc, ulsc;
  203 
  204         if ((hif = a->altq_disc) == NULL)
  205                 return (EINVAL);
  206 
  207         opts = &a->pq_u.hfsc_opts;
  208 
  209         if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
  210             hif->hif_rootclass == NULL)
  211                 parent = NULL;
  212         else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
  213                 return (EINVAL);
  214 
  215         if (a->qid == 0)
  216                 return (EINVAL);
  217 
  218         if (clh_to_clp(hif, a->qid) != NULL)
  219                 return (EBUSY);
  220 
  221         rtsc.m1 = opts->rtsc_m1;
  222         rtsc.d  = opts->rtsc_d;
  223         rtsc.m2 = opts->rtsc_m2;
  224         lssc.m1 = opts->lssc_m1;
  225         lssc.d  = opts->lssc_d;
  226         lssc.m2 = opts->lssc_m2;
  227         ulsc.m1 = opts->ulsc_m1;
  228         ulsc.d  = opts->ulsc_d;
  229         ulsc.m2 = opts->ulsc_m2;
  230 
  231         cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
  232             parent, a->qlimit, opts->flags, a->qid);
  233         if (cl == NULL)
  234                 return (ENOMEM);
  235 
  236         return (0);
  237 }
  238 
  239 int
  240 hfsc_remove_queue(struct pf_altq *a)
  241 {
  242         struct hfsc_if *hif;
  243         struct hfsc_class *cl;
  244 
  245         if ((hif = a->altq_disc) == NULL)
  246                 return (EINVAL);
  247 
  248         if ((cl = clh_to_clp(hif, a->qid)) == NULL)
  249                 return (EINVAL);
  250 
  251         return (hfsc_class_destroy(cl));
  252 }
  253 
  254 int
  255 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
  256 {
  257         struct hfsc_if *hif;
  258         struct hfsc_class *cl;
  259         struct hfsc_classstats stats;
  260         int error = 0;
  261 
  262         if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
  263                 return (EBADF);
  264 
  265         if ((cl = clh_to_clp(hif, a->qid)) == NULL)
  266                 return (EINVAL);
  267 
  268         if (*nbytes < sizeof(stats))
  269                 return (EINVAL);
  270 
  271         get_class_stats(&stats, cl);
  272 
  273         if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
  274                 return (error);
  275         *nbytes = sizeof(stats);
  276         return (0);
  277 }
  278 
  279 /*
  280  * bring the interface back to the initial state by discarding
  281  * all the filters and classes except the root class.
  282  */
  283 static int
  284 hfsc_clear_interface(struct hfsc_if *hif)
  285 {
  286         struct hfsc_class       *cl;
  287 
  288         /* clear out the classes */
  289         while (hif->hif_rootclass != NULL &&
  290             (cl = hif->hif_rootclass->cl_children) != NULL) {
  291                 /*
  292                  * remove the first leaf class found in the hierarchy
  293                  * then start over
  294                  */
  295                 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
  296                         if (!is_a_parent_class(cl)) {
  297                                 (void)hfsc_class_destroy(cl);
  298                                 break;
  299                         }
  300                 }
  301         }
  302 
  303         return (0);
  304 }
  305 
  306 static int
  307 hfsc_request(struct ifaltq *ifq, int req, void *arg)
  308 {
  309         struct hfsc_if  *hif = (struct hfsc_if *)ifq->altq_disc;
  310 
  311         switch (req) {
  312         case ALTRQ_PURGE:
  313                 hfsc_purge(hif);
  314                 break;
  315         }
  316         return (0);
  317 }
  318 
  319 /* discard all the queued packets on the interface */
  320 static void
  321 hfsc_purge(struct hfsc_if *hif)
  322 {
  323         struct hfsc_class *cl;
  324 
  325         for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
  326                 if (!qempty(cl->cl_q))
  327                         hfsc_purgeq(cl);
  328         if (ALTQ_IS_ENABLED(hif->hif_ifq))
  329                 hif->hif_ifq->ifq_len = 0;
  330 }
  331 
  332 struct hfsc_class *
  333 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
  334     struct service_curve *fsc, struct service_curve *usc,
  335     struct hfsc_class *parent, int qlimit, int flags, int qid)
  336 {
  337         struct hfsc_class *cl, *p;
  338         int i, s;
  339 
  340         if (hif->hif_classes >= HFSC_MAX_CLASSES)
  341                 return (NULL);
  342 
  343 #ifndef ALTQ_RED
  344         if (flags & HFCF_RED) {
  345 #ifdef ALTQ_DEBUG
  346                 printf("hfsc_class_create: RED not configured for HFSC!\n");
  347 #endif
  348                 return (NULL);
  349         }
  350 #endif
  351 
  352         MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
  353                M_DEVBUF, M_WAITOK);
  354         if (cl == NULL)
  355                 return (NULL);
  356         bzero(cl, sizeof(struct hfsc_class));
  357 
  358         MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
  359                M_DEVBUF, M_WAITOK);
  360         if (cl->cl_q == NULL)
  361                 goto err_ret;
  362         bzero(cl->cl_q, sizeof(class_queue_t));
  363 
  364         cl->cl_actc = actlist_alloc();
  365         if (cl->cl_actc == NULL)
  366                 goto err_ret;
  367 
  368         if (qlimit == 0)
  369                 qlimit = 50;  /* use default */
  370         qlimit(cl->cl_q) = qlimit;
  371         qtype(cl->cl_q) = Q_DROPTAIL;
  372         qlen(cl->cl_q) = 0;
  373         cl->cl_flags = flags;
  374 #ifdef ALTQ_RED
  375         if (flags & (HFCF_RED|HFCF_RIO)) {
  376                 int red_flags, red_pkttime;
  377                 u_int m2;
  378 
  379                 m2 = 0;
  380                 if (rsc != NULL && rsc->m2 > m2)
  381                         m2 = rsc->m2;
  382                 if (fsc != NULL && fsc->m2 > m2)
  383                         m2 = fsc->m2;
  384                 if (usc != NULL && usc->m2 > m2)
  385                         m2 = usc->m2;
  386 
  387                 red_flags = 0;
  388                 if (flags & HFCF_ECN)
  389                         red_flags |= REDF_ECN;
  390 #ifdef ALTQ_RIO
  391                 if (flags & HFCF_CLEARDSCP)
  392                         red_flags |= RIOF_CLEARDSCP;
  393 #endif
  394                 if (m2 < 8)
  395                         red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
  396                 else
  397                         red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
  398                                 * 1000 * 1000 * 1000 / (m2 / 8);
  399                 if (flags & HFCF_RED) {
  400                         cl->cl_red = red_alloc(0, 0,
  401                             qlimit(cl->cl_q) * 10/100,
  402                             qlimit(cl->cl_q) * 30/100,
  403                             red_flags, red_pkttime);
  404                         if (cl->cl_red != NULL)
  405                                 qtype(cl->cl_q) = Q_RED;
  406                 }
  407 #ifdef ALTQ_RIO
  408                 else {
  409                         cl->cl_red = (red_t *)rio_alloc(0, NULL,
  410                             red_flags, red_pkttime);
  411                         if (cl->cl_red != NULL)
  412                                 qtype(cl->cl_q) = Q_RIO;
  413                 }
  414 #endif
  415         }
  416 #endif /* ALTQ_RED */
  417 
  418         if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
  419                 MALLOC(cl->cl_rsc, struct internal_sc *,
  420                     sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  421                 if (cl->cl_rsc == NULL)
  422                         goto err_ret;
  423                 sc2isc(rsc, cl->cl_rsc);
  424                 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
  425                 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
  426         }
  427         if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
  428                 MALLOC(cl->cl_fsc, struct internal_sc *,
  429                     sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  430                 if (cl->cl_fsc == NULL)
  431                         goto err_ret;
  432                 sc2isc(fsc, cl->cl_fsc);
  433                 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
  434         }
  435         if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
  436                 MALLOC(cl->cl_usc, struct internal_sc *,
  437                     sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
  438                 if (cl->cl_usc == NULL)
  439                         goto err_ret;
  440                 sc2isc(usc, cl->cl_usc);
  441                 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
  442         }
  443 
  444         cl->cl_id = hif->hif_classid++;
  445         cl->cl_handle = qid;
  446         cl->cl_hif = hif;
  447         cl->cl_parent = parent;
  448 
  449         s = splnet();
  450         hif->hif_classes++;
  451 
  452         /*
  453          * find a free slot in the class table.  if the slot matching
  454          * the lower bits of qid is free, use this slot.  otherwise,
  455          * use the first free slot.
  456          */
  457         i = qid % HFSC_MAX_CLASSES;
  458         if (hif->hif_class_tbl[i] == NULL)
  459                 hif->hif_class_tbl[i] = cl;
  460         else {
  461                 for (i = 0; i < HFSC_MAX_CLASSES; i++)
  462                         if (hif->hif_class_tbl[i] == NULL) {
  463                                 hif->hif_class_tbl[i] = cl;
  464                                 break;
  465                         }
  466                 if (i == HFSC_MAX_CLASSES) {
  467                         splx(s);
  468                         goto err_ret;
  469                 }
  470         }
  471 
  472         if (flags & HFCF_DEFAULTCLASS)
  473                 hif->hif_defaultclass = cl;
  474 
  475         if (parent == NULL) {
  476                 /* this is root class */
  477                 hif->hif_rootclass = cl;
  478         } else {
  479                 /* add this class to the children list of the parent */
  480                 if ((p = parent->cl_children) == NULL)
  481                         parent->cl_children = cl;
  482                 else {
  483                         while (p->cl_siblings != NULL)
  484                                 p = p->cl_siblings;
  485                         p->cl_siblings = cl;
  486                 }
  487         }
  488         splx(s);
  489 
  490         return (cl);
  491 
  492  err_ret:
  493         if (cl->cl_actc != NULL)
  494                 actlist_destroy(cl->cl_actc);
  495         if (cl->cl_red != NULL) {
  496 #ifdef ALTQ_RIO
  497                 if (q_is_rio(cl->cl_q))
  498                         rio_destroy((rio_t *)cl->cl_red);
  499 #endif
  500 #ifdef ALTQ_RED
  501                 if (q_is_red(cl->cl_q))
  502                         red_destroy(cl->cl_red);
  503 #endif
  504         }
  505         if (cl->cl_fsc != NULL)
  506                 FREE(cl->cl_fsc, M_DEVBUF);
  507         if (cl->cl_rsc != NULL)
  508                 FREE(cl->cl_rsc, M_DEVBUF);
  509         if (cl->cl_usc != NULL)
  510                 FREE(cl->cl_usc, M_DEVBUF);
  511         if (cl->cl_q != NULL)
  512                 FREE(cl->cl_q, M_DEVBUF);
  513         FREE(cl, M_DEVBUF);
  514         return (NULL);
  515 }
  516 
  517 static int
  518 hfsc_class_destroy(struct hfsc_class *cl)
  519 {
  520         int i, s;
  521 
  522         if (cl == NULL)
  523                 return (0);
  524 
  525         if (is_a_parent_class(cl))
  526                 return (EBUSY);
  527 
  528         s = splnet();
  529 
  530         if (!qempty(cl->cl_q))
  531                 hfsc_purgeq(cl);
  532 
  533         if (cl->cl_parent == NULL) {
  534                 /* this is root class */
  535         } else {
  536                 struct hfsc_class *p = cl->cl_parent->cl_children;
  537 
  538                 if (p == cl)
  539                         cl->cl_parent->cl_children = cl->cl_siblings;
  540                 else do {
  541                         if (p->cl_siblings == cl) {
  542                                 p->cl_siblings = cl->cl_siblings;
  543                                 break;
  544                         }
  545                 } while ((p = p->cl_siblings) != NULL);
  546                 ASSERT(p != NULL);
  547         }
  548 
  549         for (i = 0; i < HFSC_MAX_CLASSES; i++)
  550                 if (cl->cl_hif->hif_class_tbl[i] == cl) {
  551                         cl->cl_hif->hif_class_tbl[i] = NULL;
  552                         break;
  553                 }
  554 
  555         cl->cl_hif->hif_classes--;
  556         splx(s);
  557 
  558         actlist_destroy(cl->cl_actc);
  559 
  560         if (cl->cl_red != NULL) {
  561 #ifdef ALTQ_RIO
  562                 if (q_is_rio(cl->cl_q))
  563                         rio_destroy((rio_t *)cl->cl_red);
  564 #endif
  565 #ifdef ALTQ_RED
  566                 if (q_is_red(cl->cl_q))
  567                         red_destroy(cl->cl_red);
  568 #endif
  569         }
  570 
  571         if (cl == cl->cl_hif->hif_rootclass)
  572                 cl->cl_hif->hif_rootclass = NULL;
  573         if (cl == cl->cl_hif->hif_defaultclass)
  574                 cl->cl_hif->hif_defaultclass = NULL;
  575 
  576         if (cl->cl_usc != NULL)
  577                 FREE(cl->cl_usc, M_DEVBUF);
  578         if (cl->cl_fsc != NULL)
  579                 FREE(cl->cl_fsc, M_DEVBUF);
  580         if (cl->cl_rsc != NULL)
  581                 FREE(cl->cl_rsc, M_DEVBUF);
  582         FREE(cl->cl_q, M_DEVBUF);
  583         FREE(cl, M_DEVBUF);
  584 
  585         return (0);
  586 }
  587 
  588 /*
  589  * hfsc_nextclass returns the next class in the tree.
  590  *   usage:
  591  *      for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
  592  *              do_something;
  593  */
  594 static struct hfsc_class *
  595 hfsc_nextclass(struct hfsc_class *cl)
  596 {
  597         if (cl->cl_children != NULL)
  598                 cl = cl->cl_children;
  599         else if (cl->cl_siblings != NULL)
  600                 cl = cl->cl_siblings;
  601         else {
  602                 while ((cl = cl->cl_parent) != NULL)
  603                         if (cl->cl_siblings) {
  604                                 cl = cl->cl_siblings;
  605                                 break;
  606                         }
  607         }
  608 
  609         return (cl);
  610 }
  611 
  612 /*
  613  * hfsc_enqueue is an enqueue function to be registered to
  614  * (*altq_enqueue) in struct ifaltq.
  615  */
  616 static int
  617 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
  618 {
  619         struct hfsc_if  *hif = (struct hfsc_if *)ifq->altq_disc;
  620         struct hfsc_class *cl;
  621         int len;
  622 
  623         /* grab class set by classifier */
  624         if ((m->m_flags & M_PKTHDR) == 0) {
  625                 /* should not happen */
  626                 printf("altq: packet for %s does not have pkthdr\n",
  627                     ifq->altq_ifp->if_xname);
  628                 m_freem(m);
  629                 return (ENOBUFS);
  630         }
  631         if ((cl = clh_to_clp(hif, m->m_pkthdr.pf.qid)) == NULL ||
  632                 is_a_parent_class(cl)) {
  633                 cl = hif->hif_defaultclass;
  634                 if (cl == NULL) {
  635                         m_freem(m);
  636                         return (ENOBUFS);
  637                 }
  638                 cl->cl_pktattr = NULL;
  639         }
  640 
  641         len = m_pktlen(m);
  642         if (hfsc_addq(cl, m) != 0) {
  643                 /* drop occurred.  mbuf was freed in hfsc_addq. */
  644                 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
  645                 return (ENOBUFS);
  646         }
  647         IFQ_INC_LEN(ifq);
  648         cl->cl_hif->hif_packets++;
  649 
  650         /* successfully queued. */
  651         if (qlen(cl->cl_q) == 1)
  652                 set_active(cl, m_pktlen(m));
  653 
  654         return (0);
  655 }
  656 
  657 /*
  658  * hfsc_dequeue is a dequeue function to be registered to
  659  * (*altq_dequeue) in struct ifaltq.
  660  *
  661  * note: ALTDQ_POLL returns the next packet without removing the packet
  662  *      from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
  663  *      ALTDQ_REMOVE must return the same packet if called immediately
  664  *      after ALTDQ_POLL.
  665  */
  666 static struct mbuf *
  667 hfsc_dequeue(struct ifaltq *ifq, int op)
  668 {
  669         struct hfsc_if  *hif = (struct hfsc_if *)ifq->altq_disc;
  670         struct hfsc_class *cl;
  671         struct mbuf *m;
  672         int len, next_len;
  673         int realtime = 0;
  674         u_int64_t cur_time;
  675 
  676         if (hif->hif_packets == 0)
  677                 /* no packet in the tree */
  678                 return (NULL);
  679 
  680         cur_time = read_machclk();
  681 
  682         if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
  683 
  684                 cl = hif->hif_pollcache;
  685                 hif->hif_pollcache = NULL;
  686                 /* check if the class was scheduled by real-time criteria */
  687                 if (cl->cl_rsc != NULL)
  688                         realtime = (cl->cl_e <= cur_time);
  689         } else {
  690                 /*
  691                  * if there are eligible classes, use real-time criteria.
  692                  * find the class with the minimum deadline among
  693                  * the eligible classes.
  694                  */
  695                 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
  696                     != NULL) {
  697                         realtime = 1;
  698                 } else {
  699 #ifdef ALTQ_DEBUG
  700                         int fits = 0;
  701 #endif
  702                         /*
  703                          * use link-sharing criteria
  704                          * get the class with the minimum vt in the hierarchy
  705                          */
  706                         cl = hif->hif_rootclass;
  707                         while (is_a_parent_class(cl)) {
  708 
  709                                 cl = actlist_firstfit(cl, cur_time);
  710                                 if (cl == NULL) {
  711 #ifdef ALTQ_DEBUG
  712                                         if (fits > 0)
  713                                                 printf("%d fit but none found\n",fits);
  714 #endif
  715                                         return (NULL);
  716                                 }
  717                                 /*
  718                                  * update parent's cl_cvtmin.
  719                                  * don't update if the new vt is smaller.
  720                                  */
  721                                 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
  722                                         cl->cl_parent->cl_cvtmin = cl->cl_vt;
  723 #ifdef ALTQ_DEBUG
  724                                 fits++;
  725 #endif
  726                         }
  727                 }
  728 
  729                 if (op == ALTDQ_POLL) {
  730                         hif->hif_pollcache = cl;
  731                         m = hfsc_pollq(cl);
  732                         return (m);
  733                 }
  734         }
  735 
  736         m = hfsc_getq(cl);
  737         if (m == NULL)
  738                 panic("hfsc_dequeue:");
  739         len = m_pktlen(m);
  740         cl->cl_hif->hif_packets--;
  741         IFQ_DEC_LEN(ifq);
  742         PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
  743 
  744         update_vf(cl, len, cur_time);
  745         if (realtime)
  746                 cl->cl_cumul += len;
  747 
  748         if (!qempty(cl->cl_q)) {
  749                 if (cl->cl_rsc != NULL) {
  750                         /* update ed */
  751                         next_len = m_pktlen(qhead(cl->cl_q));
  752 
  753                         if (realtime)
  754                                 update_ed(cl, next_len);
  755                         else
  756                                 update_d(cl, next_len);
  757                 }
  758         } else {
  759                 /* the class becomes passive */
  760                 set_passive(cl);
  761         }
  762 
  763         return (m);
  764 }
  765 
  766 static int
  767 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
  768 {
  769 
  770 #ifdef ALTQ_RIO
  771         if (q_is_rio(cl->cl_q))
  772                 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
  773                                 m, cl->cl_pktattr);
  774 #endif
  775 #ifdef ALTQ_RED
  776         if (q_is_red(cl->cl_q))
  777                 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
  778 #endif
  779         if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
  780                 m_freem(m);
  781                 return (-1);
  782         }
  783 
  784         if (cl->cl_flags & HFCF_CLEARDSCP)
  785                 write_dsfield(m, cl->cl_pktattr, 0);
  786 
  787         _addq(cl->cl_q, m);
  788 
  789         return (0);
  790 }
  791 
  792 static struct mbuf *
  793 hfsc_getq(struct hfsc_class *cl)
  794 {
  795 #ifdef ALTQ_RIO
  796         if (q_is_rio(cl->cl_q))
  797                 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
  798 #endif
  799 #ifdef ALTQ_RED
  800         if (q_is_red(cl->cl_q))
  801                 return red_getq(cl->cl_red, cl->cl_q);
  802 #endif
  803         return _getq(cl->cl_q);
  804 }
  805 
  806 static struct mbuf *
  807 hfsc_pollq(struct hfsc_class *cl)
  808 {
  809         return qhead(cl->cl_q);
  810 }
  811 
  812 static void
  813 hfsc_purgeq(struct hfsc_class *cl)
  814 {
  815         struct mbuf *m;
  816 
  817         if (qempty(cl->cl_q))
  818                 return;
  819 
  820         while ((m = _getq(cl->cl_q)) != NULL) {
  821                 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
  822                 m_freem(m);
  823                 cl->cl_hif->hif_packets--;
  824                 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
  825         }
  826         ASSERT(qlen(cl->cl_q) == 0);
  827 
  828         update_vf(cl, 0, 0);    /* remove cl from the actlist */
  829         set_passive(cl);
  830 }
  831 
  832 static void
  833 set_active(struct hfsc_class *cl, int len)
  834 {
  835         if (cl->cl_rsc != NULL)
  836                 init_ed(cl, len);
  837         if (cl->cl_fsc != NULL)
  838                 init_vf(cl, len);
  839 
  840         cl->cl_stats.period++;
  841 }
  842 
  843 static void
  844 set_passive(struct hfsc_class *cl)
  845 {
  846         if (cl->cl_rsc != NULL)
  847                 ellist_remove(cl);
  848 
  849         /*
  850          * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
  851          * needs to be called explicitly to remove a class from actlist
  852          */
  853 }
  854 
  855 static void
  856 init_ed(struct hfsc_class *cl, int next_len)
  857 {
  858         u_int64_t cur_time;
  859 
  860         cur_time = read_machclk();
  861 
  862         /* update the deadline curve */
  863         rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
  864 
  865         /*
  866          * update the eligible curve.
  867          * for concave, it is equal to the deadline curve.
  868          * for convex, it is a linear curve with slope m2.
  869          */
  870         cl->cl_eligible = cl->cl_deadline;
  871         if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
  872                 cl->cl_eligible.dx = 0;
  873                 cl->cl_eligible.dy = 0;
  874         }
  875 
  876         /* compute e and d */
  877         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  878         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  879 
  880         ellist_insert(cl);
  881 }
  882 
  883 static void
  884 update_ed(struct hfsc_class *cl, int next_len)
  885 {
  886         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
  887         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  888 
  889         ellist_update(cl);
  890 }
  891 
  892 static void
  893 update_d(struct hfsc_class *cl, int next_len)
  894 {
  895         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
  896 }
  897 
  898 static void
  899 init_vf(struct hfsc_class *cl, int len)
  900 {
  901         struct hfsc_class *max_cl, *p;
  902         u_int64_t vt, f, cur_time;
  903         int go_active;
  904 
  905         cur_time = 0;
  906         go_active = 1;
  907         for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
  908 
  909                 if (go_active && cl->cl_nactive++ == 0)
  910                         go_active = 1;
  911                 else
  912                         go_active = 0;
  913 
  914                 if (go_active) {
  915                         max_cl = actlist_last(cl->cl_parent->cl_actc);
  916                         if (max_cl != NULL) {
  917                                 /*
  918                                  * set vt to the average of the min and max
  919                                  * classes.  if the parent's period didn't
  920                                  * change, don't decrease vt of the class.
  921                                  */
  922                                 vt = max_cl->cl_vt;
  923                                 if (cl->cl_parent->cl_cvtmin != 0)
  924                                         vt = (cl->cl_parent->cl_cvtmin + vt)/2;
  925 
  926                                 if (cl->cl_parent->cl_vtperiod !=
  927                                     cl->cl_parentperiod || vt > cl->cl_vt)
  928                                         cl->cl_vt = vt;
  929                         } else {
  930                                 /*
  931                                  * first child for a new parent backlog period.
  932                                  * add parent's cvtmax to vtoff of children
  933                                  * to make a new vt (vtoff + vt) larger than
  934                                  * the vt in the last period for all children.
  935                                  */
  936                                 vt = cl->cl_parent->cl_cvtmax;
  937                                 for (p = cl->cl_parent->cl_children; p != NULL;
  938                                      p = p->cl_siblings)
  939                                         p->cl_vtoff += vt;
  940                                 cl->cl_vt = 0;
  941                                 cl->cl_parent->cl_cvtmax = 0;
  942                                 cl->cl_parent->cl_cvtmin = 0;
  943                         }
  944                         cl->cl_initvt = cl->cl_vt;
  945 
  946                         /* update the virtual curve */
  947                         vt = cl->cl_vt + cl->cl_vtoff;
  948                         rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
  949                         if (cl->cl_virtual.x == vt) {
  950                                 cl->cl_virtual.x -= cl->cl_vtoff;
  951                                 cl->cl_vtoff = 0;
  952                         }
  953                         cl->cl_vtadj = 0;
  954 
  955                         cl->cl_vtperiod++;  /* increment vt period */
  956                         cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
  957                         if (cl->cl_parent->cl_nactive == 0)
  958                                 cl->cl_parentperiod++;
  959                         cl->cl_f = 0;
  960 
  961                         actlist_insert(cl);
  962 
  963                         if (cl->cl_usc != NULL) {
  964                                 /* class has upper limit curve */
  965                                 if (cur_time == 0)
  966                                         cur_time = read_machclk();
  967 
  968                                 /* update the ulimit curve */
  969                                 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
  970                                     cl->cl_total);
  971                                 /* compute myf */
  972                                 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
  973                                     cl->cl_total);
  974                                 cl->cl_myfadj = 0;
  975                         }
  976                 }
  977 
  978                 if (cl->cl_myf > cl->cl_cfmin)
  979                         f = cl->cl_myf;
  980                 else
  981                         f = cl->cl_cfmin;
  982                 if (f != cl->cl_f) {
  983                         cl->cl_f = f;
  984                         update_cfmin(cl->cl_parent);
  985                 }
  986         }
  987 }
  988 
  989 static void
  990 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
  991 {
  992         u_int64_t f, myf_bound, delta;
  993         int go_passive;
  994 
  995         go_passive = qempty(cl->cl_q);
  996 
  997         for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
  998 
  999                 cl->cl_total += len;
 1000 
 1001                 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
 1002                         continue;
 1003 
 1004                 if (go_passive && --cl->cl_nactive == 0)
 1005                         go_passive = 1;
 1006                 else
 1007                         go_passive = 0;
 1008 
 1009                 if (go_passive) {
 1010                         /* no more active child, going passive */
 1011 
 1012                         /* update cvtmax of the parent class */
 1013                         if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
 1014                                 cl->cl_parent->cl_cvtmax = cl->cl_vt;
 1015 
 1016                         /* remove this class from the vt list */
 1017                         actlist_remove(cl);
 1018 
 1019                         update_cfmin(cl->cl_parent);
 1020 
 1021                         continue;
 1022                 }
 1023 
 1024                 /*
 1025                  * update vt and f
 1026                  */
 1027                 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
 1028                     - cl->cl_vtoff + cl->cl_vtadj;
 1029 
 1030                 /*
 1031                  * if vt of the class is smaller than cvtmin,
 1032                  * the class was skipped in the past due to non-fit.
 1033                  * if so, we need to adjust vtadj.
 1034                  */
 1035                 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
 1036                         cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
 1037                         cl->cl_vt = cl->cl_parent->cl_cvtmin;
 1038                 }
 1039 
 1040                 /* update the vt list */
 1041                 actlist_update(cl);
 1042 
 1043                 if (cl->cl_usc != NULL) {
 1044                         cl->cl_myf = cl->cl_myfadj
 1045                             + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
 1046 
 1047                         /*
 1048                          * if myf lags behind by more than one clock tick
 1049                          * from the current time, adjust myfadj to prevent
 1050                          * a rate-limited class from going greedy.
 1051                          * in a steady state under rate-limiting, myf
 1052                          * fluctuates within one clock tick.
 1053                          */
 1054                         myf_bound = cur_time - machclk_per_tick;
 1055                         if (cl->cl_myf < myf_bound) {
 1056                                 delta = cur_time - cl->cl_myf;
 1057                                 cl->cl_myfadj += delta;
 1058                                 cl->cl_myf += delta;
 1059                         }
 1060                 }
 1061 
 1062                 /* cl_f is max(cl_myf, cl_cfmin) */
 1063                 if (cl->cl_myf > cl->cl_cfmin)
 1064                         f = cl->cl_myf;
 1065                 else
 1066                         f = cl->cl_cfmin;
 1067                 if (f != cl->cl_f) {
 1068                         cl->cl_f = f;
 1069                         update_cfmin(cl->cl_parent);
 1070                 }
 1071         }
 1072 }
 1073 
 1074 static void
 1075 update_cfmin(struct hfsc_class *cl)
 1076 {
 1077         struct hfsc_class *p;
 1078         u_int64_t cfmin;
 1079 
 1080         if (TAILQ_EMPTY(cl->cl_actc)) {
 1081                 cl->cl_cfmin = 0;
 1082                 return;
 1083         }
 1084         cfmin = HT_INFINITY;
 1085         TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
 1086                 if (p->cl_f == 0) {
 1087                         cl->cl_cfmin = 0;
 1088                         return;
 1089                 }
 1090                 if (p->cl_f < cfmin)
 1091                         cfmin = p->cl_f;
 1092         }
 1093         cl->cl_cfmin = cfmin;
 1094 }
 1095 
 1096 /*
 1097  * TAILQ based ellist and actlist implementation
 1098  * (ion wanted to make a calendar queue based implementation)
 1099  */
 1100 /*
 1101  * eligible list holds backlogged classes being sorted by their eligible times.
 1102  * there is one eligible list per interface.
 1103  */
 1104 
 1105 static ellist_t *
 1106 ellist_alloc(void)
 1107 {
 1108         ellist_t *head;
 1109 
 1110         MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK);
 1111         TAILQ_INIT(head);
 1112         return (head);
 1113 }
 1114 
 1115 static void
 1116 ellist_destroy(ellist_t *head)
 1117 {
 1118         FREE(head, M_DEVBUF);
 1119 }
 1120 
 1121 static void
 1122 ellist_insert(struct hfsc_class *cl)
 1123 {
 1124         struct hfsc_if  *hif = cl->cl_hif;
 1125         struct hfsc_class *p;
 1126 
 1127         /* check the last entry first */
 1128         if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
 1129             p->cl_e <= cl->cl_e) {
 1130                 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
 1131                 return;
 1132         }
 1133 
 1134         TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
 1135                 if (cl->cl_e < p->cl_e) {
 1136                         TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
 1137                         return;
 1138                 }
 1139         }
 1140         ASSERT(0); /* should not reach here */
 1141 }
 1142 
 1143 static void
 1144 ellist_remove(struct hfsc_class *cl)
 1145 {
 1146         struct hfsc_if  *hif = cl->cl_hif;
 1147 
 1148         TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
 1149 }
 1150 
 1151 static void
 1152 ellist_update(struct hfsc_class *cl)
 1153 {
 1154         struct hfsc_if  *hif = cl->cl_hif;
 1155         struct hfsc_class *p, *last;
 1156 
 1157         /*
 1158          * the eligible time of a class increases monotonically.
 1159          * if the next entry has a larger eligible time, nothing to do.
 1160          */
 1161         p = TAILQ_NEXT(cl, cl_ellist);
 1162         if (p == NULL || cl->cl_e <= p->cl_e)
 1163                 return;
 1164 
 1165         /* check the last entry */
 1166         last = TAILQ_LAST(hif->hif_eligible, _eligible);
 1167         ASSERT(last != NULL);
 1168         if (last->cl_e <= cl->cl_e) {
 1169                 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
 1170                 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
 1171                 return;
 1172         }
 1173 
 1174         /*
 1175          * the new position must be between the next entry
 1176          * and the last entry
 1177          */
 1178         while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
 1179                 if (cl->cl_e < p->cl_e) {
 1180                         TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
 1181                         TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
 1182                         return;
 1183                 }
 1184         }
 1185         ASSERT(0); /* should not reach here */
 1186 }
 1187 
 1188 /* find the class with the minimum deadline among the eligible classes */
 1189 struct hfsc_class *
 1190 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
 1191 {
 1192         struct hfsc_class *p, *cl = NULL;
 1193 
 1194         TAILQ_FOREACH(p, head, cl_ellist) {
 1195                 if (p->cl_e > cur_time)
 1196                         break;
 1197                 if (cl == NULL || p->cl_d < cl->cl_d)
 1198                         cl = p;
 1199         }
 1200         return (cl);
 1201 }
 1202 
 1203 /*
 1204  * active children list holds backlogged child classes being sorted
 1205  * by their virtual time.
 1206  * each intermediate class has one active children list.
 1207  */
 1208 static actlist_t *
 1209 actlist_alloc(void)
 1210 {
 1211         actlist_t *head;
 1212 
 1213         MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK);
 1214         TAILQ_INIT(head);
 1215         return (head);
 1216 }
 1217 
 1218 static void
 1219 actlist_destroy(actlist_t *head)
 1220 {
 1221         FREE(head, M_DEVBUF);
 1222 }
 1223 static void
 1224 actlist_insert(struct hfsc_class *cl)
 1225 {
 1226         struct hfsc_class *p;
 1227 
 1228         /* check the last entry first */
 1229         if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
 1230             || p->cl_vt <= cl->cl_vt) {
 1231                 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
 1232                 return;
 1233         }
 1234 
 1235         TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
 1236                 if (cl->cl_vt < p->cl_vt) {
 1237                         TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
 1238                         return;
 1239                 }
 1240         }
 1241         ASSERT(0); /* should not reach here */
 1242 }
 1243 
 1244 static void
 1245 actlist_remove(struct hfsc_class *cl)
 1246 {
 1247         TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
 1248 }
 1249 
 1250 static void
 1251 actlist_update(struct hfsc_class *cl)
 1252 {
 1253         struct hfsc_class *p, *last;
 1254 
 1255         /*
 1256          * the virtual time of a class increases monotonically during its
 1257          * backlogged period.
 1258          * if the next entry has a larger virtual time, nothing to do.
 1259          */
 1260         p = TAILQ_NEXT(cl, cl_actlist);
 1261         if (p == NULL || cl->cl_vt < p->cl_vt)
 1262                 return;
 1263 
 1264         /* check the last entry */
 1265         last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
 1266         ASSERT(last != NULL);
 1267         if (last->cl_vt <= cl->cl_vt) {
 1268                 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
 1269                 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
 1270                 return;
 1271         }
 1272 
 1273         /*
 1274          * the new position must be between the next entry
 1275          * and the last entry
 1276          */
 1277         while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
 1278                 if (cl->cl_vt < p->cl_vt) {
 1279                         TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
 1280                         TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
 1281                         return;
 1282                 }
 1283         }
 1284         ASSERT(0); /* should not reach here */
 1285 }
 1286 
 1287 static struct hfsc_class *
 1288 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
 1289 {
 1290         struct hfsc_class *p;
 1291 
 1292         TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
 1293                 if (p->cl_f <= cur_time)
 1294                         return (p);
 1295         }
 1296         return (NULL);
 1297 }
 1298 
 1299 /*
 1300  * service curve support functions
 1301  *
 1302  *  external service curve parameters
 1303  *      m: bits/sec
 1304  *      d: msec
 1305  *  internal service curve parameters
 1306  *      sm: (bytes/tsc_interval) << SM_SHIFT
 1307  *      ism: (tsc_count/byte) << ISM_SHIFT
 1308  *      dx: tsc_count
 1309  *
 1310  * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
 1311  * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
 1312  * speed.  SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
 1313  * digits in decimal using the following table.
 1314  *
 1315  *  bits/sec    100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
 1316  *  ----------+-------------------------------------------------------
 1317  *  bytes/nsec  12.5e-6    125e-6     1250e-6    12500e-6   125000e-6
 1318  *  sm(500MHz)  25.0e-6    250e-6     2500e-6    25000e-6   250000e-6
 1319  *  sm(200MHz)  62.5e-6    625e-6     6250e-6    62500e-6   625000e-6
 1320  *
 1321  *  nsec/byte   80000      8000       800        80         8
 1322  *  ism(500MHz) 40000      4000       400        40         4
 1323  *  ism(200MHz) 16000      1600       160        16         1.6
 1324  */
 1325 #define SM_SHIFT        24
 1326 #define ISM_SHIFT       10
 1327 
 1328 #define SM_MASK         ((1LL << SM_SHIFT) - 1)
 1329 #define ISM_MASK        ((1LL << ISM_SHIFT) - 1)
 1330 
 1331 static __inline u_int64_t
 1332 seg_x2y(u_int64_t x, u_int64_t sm)
 1333 {
 1334         u_int64_t y;
 1335 
 1336         /*
 1337          * compute
 1338          *      y = x * sm >> SM_SHIFT
 1339          * but divide it for the upper and lower bits to avoid overflow
 1340          */
 1341         y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
 1342         return (y);
 1343 }
 1344 
 1345 static __inline u_int64_t
 1346 seg_y2x(u_int64_t y, u_int64_t ism)
 1347 {
 1348         u_int64_t x;
 1349 
 1350         if (y == 0)
 1351                 x = 0;
 1352         else if (ism == HT_INFINITY)
 1353                 x = HT_INFINITY;
 1354         else {
 1355                 x = (y >> ISM_SHIFT) * ism
 1356                     + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
 1357         }
 1358         return (x);
 1359 }
 1360 
 1361 static __inline u_int64_t
 1362 m2sm(u_int m)
 1363 {
 1364         u_int64_t sm;
 1365 
 1366         sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
 1367         return (sm);
 1368 }
 1369 
 1370 static __inline u_int64_t
 1371 m2ism(u_int m)
 1372 {
 1373         u_int64_t ism;
 1374 
 1375         if (m == 0)
 1376                 ism = HT_INFINITY;
 1377         else
 1378                 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
 1379         return (ism);
 1380 }
 1381 
 1382 static __inline u_int64_t
 1383 d2dx(u_int d)
 1384 {
 1385         u_int64_t dx;
 1386 
 1387         dx = ((u_int64_t)d * machclk_freq) / 1000;
 1388         return (dx);
 1389 }
 1390 
 1391 static u_int
 1392 sm2m(u_int64_t sm)
 1393 {
 1394         u_int64_t m;
 1395 
 1396         m = (sm * 8 * machclk_freq) >> SM_SHIFT;
 1397         return ((u_int)m);
 1398 }
 1399 
 1400 static u_int
 1401 dx2d(u_int64_t dx)
 1402 {
 1403         u_int64_t d;
 1404 
 1405         d = dx * 1000 / machclk_freq;
 1406         return ((u_int)d);
 1407 }
 1408 
 1409 static void
 1410 sc2isc(struct service_curve *sc, struct internal_sc *isc)
 1411 {
 1412         isc->sm1 = m2sm(sc->m1);
 1413         isc->ism1 = m2ism(sc->m1);
 1414         isc->dx = d2dx(sc->d);
 1415         isc->dy = seg_x2y(isc->dx, isc->sm1);
 1416         isc->sm2 = m2sm(sc->m2);
 1417         isc->ism2 = m2ism(sc->m2);
 1418 }
 1419 
 1420 /*
 1421  * initialize the runtime service curve with the given internal
 1422  * service curve starting at (x, y).
 1423  */
 1424 static void
 1425 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
 1426     u_int64_t y)
 1427 {
 1428         rtsc->x =       x;
 1429         rtsc->y =       y;
 1430         rtsc->sm1 =     isc->sm1;
 1431         rtsc->ism1 =    isc->ism1;
 1432         rtsc->dx =      isc->dx;
 1433         rtsc->dy =      isc->dy;
 1434         rtsc->sm2 =     isc->sm2;
 1435         rtsc->ism2 =    isc->ism2;
 1436 }
 1437 
 1438 /*
 1439  * calculate the y-projection of the runtime service curve by the
 1440  * given x-projection value
 1441  */
 1442 static u_int64_t
 1443 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
 1444 {
 1445         u_int64_t       x;
 1446 
 1447         if (y < rtsc->y)
 1448                 x = rtsc->x;
 1449         else if (y <= rtsc->y + rtsc->dy) {
 1450                 /* x belongs to the 1st segment */
 1451                 if (rtsc->dy == 0)
 1452                         x = rtsc->x + rtsc->dx;
 1453                 else
 1454                         x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
 1455         } else {
 1456                 /* x belongs to the 2nd segment */
 1457                 x = rtsc->x + rtsc->dx
 1458                     + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
 1459         }
 1460         return (x);
 1461 }
 1462 
 1463 static u_int64_t
 1464 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
 1465 {
 1466         u_int64_t       y;
 1467 
 1468         if (x <= rtsc->x)
 1469                 y = rtsc->y;
 1470         else if (x <= rtsc->x + rtsc->dx)
 1471                 /* y belongs to the 1st segment */
 1472                 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
 1473         else
 1474                 /* y belongs to the 2nd segment */
 1475                 y = rtsc->y + rtsc->dy
 1476                     + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
 1477         return (y);
 1478 }
 1479 
 1480 /*
 1481  * update the runtime service curve by taking the minimum of the current
 1482  * runtime service curve and the service curve starting at (x, y).
 1483  */
 1484 static void
 1485 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
 1486     u_int64_t y)
 1487 {
 1488         u_int64_t       y1, y2, dx, dy;
 1489 
 1490         if (isc->sm1 <= isc->sm2) {
 1491                 /* service curve is convex */
 1492                 y1 = rtsc_x2y(rtsc, x);
 1493                 if (y1 < y)
 1494                         /* the current rtsc is smaller */
 1495                         return;
 1496                 rtsc->x = x;
 1497                 rtsc->y = y;
 1498                 return;
 1499         }
 1500 
 1501         /*
 1502          * service curve is concave
 1503          * compute the two y values of the current rtsc
 1504          *      y1: at x
 1505          *      y2: at (x + dx)
 1506          */
 1507         y1 = rtsc_x2y(rtsc, x);
 1508         if (y1 <= y) {
 1509                 /* rtsc is below isc, no change to rtsc */
 1510                 return;
 1511         }
 1512 
 1513         y2 = rtsc_x2y(rtsc, x + isc->dx);
 1514         if (y2 >= y + isc->dy) {
 1515                 /* rtsc is above isc, replace rtsc by isc */
 1516                 rtsc->x = x;
 1517                 rtsc->y = y;
 1518                 rtsc->dx = isc->dx;
 1519                 rtsc->dy = isc->dy;
 1520                 return;
 1521         }
 1522 
 1523         /*
 1524          * the two curves intersect
 1525          * compute the offsets (dx, dy) using the reverse
 1526          * function of seg_x2y()
 1527          *      seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
 1528          */
 1529         dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
 1530         /*
 1531          * check if (x, y1) belongs to the 1st segment of rtsc.
 1532          * if so, add the offset.
 1533          */
 1534         if (rtsc->x + rtsc->dx > x)
 1535                 dx += rtsc->x + rtsc->dx - x;
 1536         dy = seg_x2y(dx, isc->sm1);
 1537 
 1538         rtsc->x = x;
 1539         rtsc->y = y;
 1540         rtsc->dx = dx;
 1541         rtsc->dy = dy;
 1542         return;
 1543 }
 1544 
 1545 static void
 1546 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
 1547 {
 1548         sp->class_id = cl->cl_id;
 1549         sp->class_handle = cl->cl_handle;
 1550 
 1551         if (cl->cl_rsc != NULL) {
 1552                 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
 1553                 sp->rsc.d = dx2d(cl->cl_rsc->dx);
 1554                 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
 1555         } else {
 1556                 sp->rsc.m1 = 0;
 1557                 sp->rsc.d = 0;
 1558                 sp->rsc.m2 = 0;
 1559         }
 1560         if (cl->cl_fsc != NULL) {
 1561                 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
 1562                 sp->fsc.d = dx2d(cl->cl_fsc->dx);
 1563                 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
 1564         } else {
 1565                 sp->fsc.m1 = 0;
 1566                 sp->fsc.d = 0;
 1567                 sp->fsc.m2 = 0;
 1568         }
 1569         if (cl->cl_usc != NULL) {
 1570                 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
 1571                 sp->usc.d = dx2d(cl->cl_usc->dx);
 1572                 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
 1573         } else {
 1574                 sp->usc.m1 = 0;
 1575                 sp->usc.d = 0;
 1576                 sp->usc.m2 = 0;
 1577         }
 1578 
 1579         sp->total = cl->cl_total;
 1580         sp->cumul = cl->cl_cumul;
 1581 
 1582         sp->d = cl->cl_d;
 1583         sp->e = cl->cl_e;
 1584         sp->vt = cl->cl_vt;
 1585         sp->f = cl->cl_f;
 1586 
 1587         sp->initvt = cl->cl_initvt;
 1588         sp->vtperiod = cl->cl_vtperiod;
 1589         sp->parentperiod = cl->cl_parentperiod;
 1590         sp->nactive = cl->cl_nactive;
 1591         sp->vtoff = cl->cl_vtoff;
 1592         sp->cvtmax = cl->cl_cvtmax;
 1593         sp->myf = cl->cl_myf;
 1594         sp->cfmin = cl->cl_cfmin;
 1595         sp->cvtmin = cl->cl_cvtmin;
 1596         sp->myfadj = cl->cl_myfadj;
 1597         sp->vtadj = cl->cl_vtadj;
 1598 
 1599         sp->cur_time = read_machclk();
 1600         sp->machclk_freq = machclk_freq;
 1601 
 1602         sp->qlength = qlen(cl->cl_q);
 1603         sp->qlimit = qlimit(cl->cl_q);
 1604         sp->xmit_cnt = cl->cl_stats.xmit_cnt;
 1605         sp->drop_cnt = cl->cl_stats.drop_cnt;
 1606         sp->period = cl->cl_stats.period;
 1607 
 1608         sp->qtype = qtype(cl->cl_q);
 1609 #ifdef ALTQ_RED
 1610         if (q_is_red(cl->cl_q))
 1611                 red_getstats(cl->cl_red, &sp->red[0]);
 1612 #endif
 1613 #ifdef ALTQ_RIO
 1614         if (q_is_rio(cl->cl_q))
 1615                 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
 1616 #endif
 1617 }
 1618 
 1619 /* convert a class handle to the corresponding class pointer */
 1620 static struct hfsc_class *
 1621 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
 1622 {
 1623         int i;
 1624         struct hfsc_class *cl;
 1625 
 1626         if (chandle == 0)
 1627                 return (NULL);
 1628         /*
 1629          * first, try the slot corresponding to the lower bits of the handle.
 1630          * if it does not match, do the linear table search.
 1631          */
 1632         i = chandle % HFSC_MAX_CLASSES;
 1633         if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
 1634                 return (cl);
 1635         for (i = 0; i < HFSC_MAX_CLASSES; i++)
 1636                 if ((cl = hif->hif_class_tbl[i]) != NULL &&
 1637                     cl->cl_handle == chandle)
 1638                         return (cl);
 1639         return (NULL);
 1640 }

/* [<][>][^][v][top][bottom][index][help] */