root/altq/altq_subr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. altq_lookup
  2. altq_attach
  3. altq_detach
  4. altq_enable
  5. altq_disable
  6. altq_assert
  7. tbr_dequeue
  8. tbr_set
  9. tbr_timeout
  10. tbr_get
  11. altq_pfattach
  12. altq_pfdetach
  13. altq_add
  14. altq_remove
  15. altq_add_queue
  16. altq_remove_queue
  17. altq_getqstats
  18. read_dsfield
  19. write_dsfield
  20. init_machclk
  21. rdtsc
  22. read_machclk

    1 /*      $OpenBSD: altq_subr.c,v 1.21 2006/12/20 17:50:40 gwk Exp $      */
    2 /*      $KAME: altq_subr.c,v 1.11 2002/01/11 08:11:49 kjc Exp $ */
    3 
    4 /*
    5  * Copyright (C) 1997-2002
    6  *      Sony Computer Science Laboratories Inc.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/param.h>
   31 #include <sys/malloc.h>
   32 #include <sys/mbuf.h>
   33 #include <sys/systm.h>
   34 #include <sys/proc.h>
   35 #include <sys/socket.h>
   36 #include <sys/socketvar.h>
   37 #include <sys/kernel.h>
   38 #include <sys/errno.h>
   39 #include <sys/syslog.h>
   40 #include <sys/sysctl.h>
   41 #include <sys/queue.h>
   42 
   43 #include <net/if.h>
   44 #include <net/if_dl.h>
   45 #include <net/if_types.h>
   46 
   47 #include <netinet/in.h>
   48 #include <netinet/in_systm.h>
   49 #include <netinet/ip.h>
   50 #ifdef INET6
   51 #include <netinet/ip6.h>
   52 #endif
   53 #include <netinet/tcp.h>
   54 #include <netinet/udp.h>
   55 
   56 #include <net/pfvar.h>
   57 #include <altq/altq.h>
   58 
   59 /* machine dependent clock related includes */
   60 #if defined(__i386__)
   61 #include <machine/cpufunc.h>            /* for pentium tsc */
   62 #include <machine/specialreg.h>         /* for CPUID_TSC */
   63 #endif /* __i386__ */
   64 
   65 /*
   66  * internal function prototypes
   67  */
   68 static void     tbr_timeout(void *);
   69 int (*altq_input)(struct mbuf *, int) = NULL;
   70 static int tbr_timer = 0;       /* token bucket regulator timer */
   71 static struct callout tbr_callout = CALLOUT_INITIALIZER;
   72 
   73 /*
   74  * alternate queueing support routines
   75  */
   76 
   77 /* look up the queue state by the interface name and the queueing type. */
   78 void *
   79 altq_lookup(name, type)
   80         char *name;
   81         int type;
   82 {
   83         struct ifnet *ifp;
   84 
   85         if ((ifp = ifunit(name)) != NULL) {
   86                 if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
   87                         return (ifp->if_snd.altq_disc);
   88         }
   89 
   90         return NULL;
   91 }
   92 
   93 int
   94 altq_attach(ifq, type, discipline, enqueue, dequeue, request, clfier, classify)
   95         struct ifaltq *ifq;
   96         int type;
   97         void *discipline;
   98         int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
   99         struct mbuf *(*dequeue)(struct ifaltq *, int);
  100         int (*request)(struct ifaltq *, int, void *);
  101         void *clfier;
  102         void *(*classify)(void *, struct mbuf *, int);
  103 {
  104         if (!ALTQ_IS_READY(ifq))
  105                 return ENXIO;
  106 
  107 #if 0   /* pfaltq can override the existing discipline */
  108         if (ALTQ_IS_ENABLED(ifq))
  109                 return EBUSY;
  110         if (ALTQ_IS_ATTACHED(ifq))
  111                 return EEXIST;
  112 #endif
  113         ifq->altq_type     = type;
  114         ifq->altq_disc     = discipline;
  115         ifq->altq_enqueue  = enqueue;
  116         ifq->altq_dequeue  = dequeue;
  117         ifq->altq_request  = request;
  118         ifq->altq_clfier   = clfier;
  119         ifq->altq_classify = classify;
  120         ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
  121 
  122         return 0;
  123 }
  124 
  125 int
  126 altq_detach(ifq)
  127         struct ifaltq *ifq;
  128 {
  129         if (!ALTQ_IS_READY(ifq))
  130                 return ENXIO;
  131         if (ALTQ_IS_ENABLED(ifq))
  132                 return EBUSY;
  133         if (!ALTQ_IS_ATTACHED(ifq))
  134                 return (0);
  135 
  136         ifq->altq_type     = ALTQT_NONE;
  137         ifq->altq_disc     = NULL;
  138         ifq->altq_enqueue  = NULL;
  139         ifq->altq_dequeue  = NULL;
  140         ifq->altq_request  = NULL;
  141         ifq->altq_clfier   = NULL;
  142         ifq->altq_classify = NULL;
  143         ifq->altq_flags &= ALTQF_CANTCHANGE;
  144         return 0;
  145 }
  146 
  147 int
  148 altq_enable(ifq)
  149         struct ifaltq *ifq;
  150 {
  151         int s;
  152 
  153         if (!ALTQ_IS_READY(ifq))
  154                 return ENXIO;
  155         if (ALTQ_IS_ENABLED(ifq))
  156                 return 0;
  157 
  158         s = splnet();
  159         IFQ_PURGE(ifq);
  160         ASSERT(ifq->ifq_len == 0);
  161         ifq->altq_flags |= ALTQF_ENABLED;
  162         if (ifq->altq_clfier != NULL)
  163                 ifq->altq_flags |= ALTQF_CLASSIFY;
  164         splx(s);
  165 
  166         return 0;
  167 }
  168 
  169 int
  170 altq_disable(ifq)
  171         struct ifaltq *ifq;
  172 {
  173         int s;
  174 
  175         if (!ALTQ_IS_ENABLED(ifq))
  176                 return 0;
  177 
  178         s = splnet();
  179         IFQ_PURGE(ifq);
  180         ASSERT(ifq->ifq_len == 0);
  181         ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
  182         splx(s);
  183         return 0;
  184 }
  185 
  186 void
  187 altq_assert(file, line, failedexpr)
  188         const char *file, *failedexpr;
  189         int line;
  190 {
  191         (void)printf("altq assertion \"%s\" failed: file \"%s\", line %d\n",
  192                      failedexpr, file, line);
  193         panic("altq assertion");
  194         /* NOTREACHED */
  195 }
  196 
  197 /*
  198  * internal representation of token bucket parameters
  199  *      rate:   byte_per_unittime << 32
  200  *              (((bits_per_sec) / 8) << 32) / machclk_freq
  201  *      depth:  byte << 32
  202  *
  203  */
  204 #define TBR_SHIFT       32
  205 #define TBR_SCALE(x)    ((int64_t)(x) << TBR_SHIFT)
  206 #define TBR_UNSCALE(x)  ((x) >> TBR_SHIFT)
  207 
  208 struct mbuf *
  209 tbr_dequeue(ifq, op)
  210         struct ifaltq *ifq;
  211         int op;
  212 {
  213         struct tb_regulator *tbr;
  214         struct mbuf *m;
  215         int64_t interval;
  216         u_int64_t now;
  217 
  218         tbr = ifq->altq_tbr;
  219         if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
  220                 /* if this is a remove after poll, bypass tbr check */
  221         } else {
  222                 /* update token only when it is negative */
  223                 if (tbr->tbr_token <= 0) {
  224                         now = read_machclk();
  225                         interval = now - tbr->tbr_last;
  226                         if (interval >= tbr->tbr_filluptime)
  227                                 tbr->tbr_token = tbr->tbr_depth;
  228                         else {
  229                                 tbr->tbr_token += interval * tbr->tbr_rate;
  230                                 if (tbr->tbr_token > tbr->tbr_depth)
  231                                         tbr->tbr_token = tbr->tbr_depth;
  232                         }
  233                         tbr->tbr_last = now;
  234                 }
  235                 /* if token is still negative, don't allow dequeue */
  236                 if (tbr->tbr_token <= 0)
  237                         return (NULL);
  238         }
  239 
  240         if (ALTQ_IS_ENABLED(ifq))
  241                 m = (*ifq->altq_dequeue)(ifq, op);
  242         else {
  243                 if (op == ALTDQ_POLL)
  244                         IF_POLL(ifq, m);
  245                 else
  246                         IF_DEQUEUE(ifq, m);
  247         }
  248 
  249         if (m != NULL && op == ALTDQ_REMOVE)
  250                 tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
  251         tbr->tbr_lastop = op;
  252         return (m);
  253 }
  254 
  255 /*
  256  * set a token bucket regulator.
  257  * if the specified rate is zero, the token bucket regulator is deleted.
  258  */
  259 int
  260 tbr_set(ifq, profile)
  261         struct ifaltq *ifq;
  262         struct tb_profile *profile;
  263 {
  264         struct tb_regulator *tbr, *otbr;
  265 
  266         if (machclk_freq == 0)
  267                 init_machclk();
  268         if (machclk_freq == 0) {
  269                 printf("tbr_set: no cpu clock available!\n");
  270                 return (ENXIO);
  271         }
  272 
  273         if (profile->rate == 0) {
  274                 /* delete this tbr */
  275                 if ((tbr = ifq->altq_tbr) == NULL)
  276                         return (ENOENT);
  277                 ifq->altq_tbr = NULL;
  278                 FREE(tbr, M_DEVBUF);
  279                 return (0);
  280         }
  281 
  282         MALLOC(tbr, struct tb_regulator *, sizeof(struct tb_regulator),
  283                M_DEVBUF, M_WAITOK);
  284         if (tbr == NULL)
  285                 return (ENOMEM);
  286         bzero(tbr, sizeof(struct tb_regulator));
  287 
  288         tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
  289         tbr->tbr_depth = TBR_SCALE(profile->depth);
  290         if (tbr->tbr_rate > 0)
  291                 tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
  292         else
  293                 tbr->tbr_filluptime = 0xffffffffffffffffLL;
  294         tbr->tbr_token = tbr->tbr_depth;
  295         tbr->tbr_last = read_machclk();
  296         tbr->tbr_lastop = ALTDQ_REMOVE;
  297 
  298         otbr = ifq->altq_tbr;
  299         ifq->altq_tbr = tbr;    /* set the new tbr */
  300 
  301         if (otbr != NULL)
  302                 FREE(otbr, M_DEVBUF);
  303         else {
  304                 if (tbr_timer == 0) {
  305                         CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
  306                         tbr_timer = 1;
  307                 }
  308         }
  309         return (0);
  310 }
  311 
  312 /*
  313  * tbr_timeout goes through the interface list, and kicks the drivers
  314  * if necessary.
  315  */
  316 static void
  317 tbr_timeout(arg)
  318         void *arg;
  319 {
  320         struct ifnet *ifp;
  321         int active, s;
  322 
  323         active = 0;
  324         s = splnet();
  325         for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
  326                 if (!TBR_IS_ENABLED(&ifp->if_snd))
  327                         continue;
  328                 active++;
  329                 if (!IFQ_IS_EMPTY(&ifp->if_snd) && ifp->if_start != NULL)
  330                         (*ifp->if_start)(ifp);
  331         }
  332         splx(s);
  333         if (active > 0)
  334                 CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
  335         else
  336                 tbr_timer = 0;  /* don't need tbr_timer anymore */
  337 #if defined(__alpha__) && !defined(ALTQ_NOPCC)
  338         {
  339                 /*
  340                  * XXX read out the machine dependent clock once a second
  341                  * to detect counter wrap-around.
  342                  */
  343                 static u_int cnt;
  344 
  345                 if (++cnt >= hz) {
  346                         (void)read_machclk();
  347                         cnt = 0;
  348                 }
  349         }
  350 #endif /* __alpha__ && !ALTQ_NOPCC */
  351 }
  352 
  353 /*
  354  * get token bucket regulator profile
  355  */
  356 int
  357 tbr_get(ifq, profile)
  358         struct ifaltq *ifq;
  359         struct tb_profile *profile;
  360 {
  361         struct tb_regulator *tbr;
  362 
  363         if ((tbr = ifq->altq_tbr) == NULL) {
  364                 profile->rate = 0;
  365                 profile->depth = 0;
  366         } else {
  367                 profile->rate =
  368                     (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
  369                 profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
  370         }
  371         return (0);
  372 }
  373 
  374 /*
  375  * attach a discipline to the interface.  if one already exists, it is
  376  * overridden.
  377  */
  378 int
  379 altq_pfattach(struct pf_altq *a)
  380 {
  381         int error = 0;
  382 
  383         switch (a->scheduler) {
  384         case ALTQT_NONE:
  385                 break;
  386 #ifdef ALTQ_CBQ
  387         case ALTQT_CBQ:
  388                 error = cbq_pfattach(a);
  389                 break;
  390 #endif
  391 #ifdef ALTQ_PRIQ
  392         case ALTQT_PRIQ:
  393                 error = priq_pfattach(a);
  394                 break;
  395 #endif
  396 #ifdef ALTQ_HFSC
  397         case ALTQT_HFSC:
  398                 error = hfsc_pfattach(a);
  399                 break;
  400 #endif
  401         default:
  402                 error = ENXIO;
  403         }
  404 
  405         return (error);
  406 }
  407 
  408 /*
  409  * detach a discipline from the interface.
  410  * it is possible that the discipline was already overridden by another
  411  * discipline.
  412  */
  413 int
  414 altq_pfdetach(struct pf_altq *a)
  415 {
  416         struct ifnet *ifp;
  417         int s, error = 0;
  418 
  419         if ((ifp = ifunit(a->ifname)) == NULL)
  420                 return (EINVAL);
  421 
  422         /* if this discipline is no longer referenced, just return */
  423         if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc)
  424                 return (0);
  425 
  426         s = splnet();
  427         if (ALTQ_IS_ENABLED(&ifp->if_snd))
  428                 error = altq_disable(&ifp->if_snd);
  429         if (error == 0)
  430                 error = altq_detach(&ifp->if_snd);
  431         splx(s);
  432 
  433         return (error);
  434 }
  435 
  436 /*
  437  * add a discipline or a queue
  438  */
  439 int
  440 altq_add(struct pf_altq *a)
  441 {
  442         int error = 0;
  443 
  444         if (a->qname[0] != 0)
  445                 return (altq_add_queue(a));
  446 
  447         if (machclk_freq == 0)
  448                 init_machclk();
  449         if (machclk_freq == 0)
  450                 panic("altq_add: no cpu clock");
  451 
  452         switch (a->scheduler) {
  453 #ifdef ALTQ_CBQ
  454         case ALTQT_CBQ:
  455                 error = cbq_add_altq(a);
  456                 break;
  457 #endif
  458 #ifdef ALTQ_PRIQ
  459         case ALTQT_PRIQ:
  460                 error = priq_add_altq(a);
  461                 break;
  462 #endif
  463 #ifdef ALTQ_HFSC
  464         case ALTQT_HFSC:
  465                 error = hfsc_add_altq(a);
  466                 break;
  467 #endif
  468         default:
  469                 error = ENXIO;
  470         }
  471 
  472         return (error);
  473 }
  474 
  475 /*
  476  * remove a discipline or a queue
  477  */
  478 int
  479 altq_remove(struct pf_altq *a)
  480 {
  481         int error = 0;
  482 
  483         if (a->qname[0] != 0)
  484                 return (altq_remove_queue(a));
  485 
  486         switch (a->scheduler) {
  487 #ifdef ALTQ_CBQ
  488         case ALTQT_CBQ:
  489                 error = cbq_remove_altq(a);
  490                 break;
  491 #endif
  492 #ifdef ALTQ_PRIQ
  493         case ALTQT_PRIQ:
  494                 error = priq_remove_altq(a);
  495                 break;
  496 #endif
  497 #ifdef ALTQ_HFSC
  498         case ALTQT_HFSC:
  499                 error = hfsc_remove_altq(a);
  500                 break;
  501 #endif
  502         default:
  503                 error = ENXIO;
  504         }
  505 
  506         return (error);
  507 }
  508 
  509 /*
  510  * add a queue to the discipline
  511  */
  512 int
  513 altq_add_queue(struct pf_altq *a)
  514 {
  515         int error = 0;
  516 
  517         switch (a->scheduler) {
  518 #ifdef ALTQ_CBQ
  519         case ALTQT_CBQ:
  520                 error = cbq_add_queue(a);
  521                 break;
  522 #endif
  523 #ifdef ALTQ_PRIQ
  524         case ALTQT_PRIQ:
  525                 error = priq_add_queue(a);
  526                 break;
  527 #endif
  528 #ifdef ALTQ_HFSC
  529         case ALTQT_HFSC:
  530                 error = hfsc_add_queue(a);
  531                 break;
  532 #endif
  533         default:
  534                 error = ENXIO;
  535         }
  536 
  537         return (error);
  538 }
  539 
  540 /*
  541  * remove a queue from the discipline
  542  */
  543 int
  544 altq_remove_queue(struct pf_altq *a)
  545 {
  546         int error = 0;
  547 
  548         switch (a->scheduler) {
  549 #ifdef ALTQ_CBQ
  550         case ALTQT_CBQ:
  551                 error = cbq_remove_queue(a);
  552                 break;
  553 #endif
  554 #ifdef ALTQ_PRIQ
  555         case ALTQT_PRIQ:
  556                 error = priq_remove_queue(a);
  557                 break;
  558 #endif
  559 #ifdef ALTQ_HFSC
  560         case ALTQT_HFSC:
  561                 error = hfsc_remove_queue(a);
  562                 break;
  563 #endif
  564         default:
  565                 error = ENXIO;
  566         }
  567 
  568         return (error);
  569 }
  570 
  571 /*
  572  * get queue statistics
  573  */
  574 int
  575 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
  576 {
  577         int error = 0;
  578 
  579         switch (a->scheduler) {
  580 #ifdef ALTQ_CBQ
  581         case ALTQT_CBQ:
  582                 error = cbq_getqstats(a, ubuf, nbytes);
  583                 break;
  584 #endif
  585 #ifdef ALTQ_PRIQ
  586         case ALTQT_PRIQ:
  587                 error = priq_getqstats(a, ubuf, nbytes);
  588                 break;
  589 #endif
  590 #ifdef ALTQ_HFSC
  591         case ALTQT_HFSC:
  592                 error = hfsc_getqstats(a, ubuf, nbytes);
  593                 break;
  594 #endif
  595         default:
  596                 error = ENXIO;
  597         }
  598 
  599         return (error);
  600 }
  601 
  602 /*
  603  * read and write diffserv field in IPv4 or IPv6 header
  604  */
  605 u_int8_t
  606 read_dsfield(m, pktattr)
  607         struct mbuf *m;
  608         struct altq_pktattr *pktattr;
  609 {
  610         struct mbuf *m0;
  611         u_int8_t ds_field = 0;
  612 
  613         if (pktattr == NULL ||
  614             (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
  615                 return ((u_int8_t)0);
  616 
  617         /* verify that pattr_hdr is within the mbuf data */
  618         for (m0 = m; m0 != NULL; m0 = m0->m_next)
  619                 if ((pktattr->pattr_hdr >= m0->m_data) &&
  620                     (pktattr->pattr_hdr < m0->m_data + m0->m_len))
  621                         break;
  622         if (m0 == NULL) {
  623                 /* ick, pattr_hdr is stale */
  624                 pktattr->pattr_af = AF_UNSPEC;
  625 #ifdef ALTQ_DEBUG
  626                 printf("read_dsfield: can't locate header!\n");
  627 #endif
  628                 return ((u_int8_t)0);
  629         }
  630 
  631         if (pktattr->pattr_af == AF_INET) {
  632                 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
  633 
  634                 if (ip->ip_v != 4)
  635                         return ((u_int8_t)0);   /* version mismatch! */
  636                 ds_field = ip->ip_tos;
  637         }
  638 #ifdef INET6
  639         else if (pktattr->pattr_af == AF_INET6) {
  640                 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
  641                 u_int32_t flowlabel;
  642 
  643                 flowlabel = ntohl(ip6->ip6_flow);
  644                 if ((flowlabel >> 28) != 6)
  645                         return ((u_int8_t)0);   /* version mismatch! */
  646                 ds_field = (flowlabel >> 20) & 0xff;
  647         }
  648 #endif
  649         return (ds_field);
  650 }
  651 
  652 void
  653 write_dsfield(m, pktattr, dsfield)
  654         struct mbuf *m;
  655         struct altq_pktattr *pktattr;
  656         u_int8_t dsfield;
  657 {
  658         struct mbuf *m0;
  659 
  660         if (pktattr == NULL ||
  661             (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
  662                 return;
  663 
  664         /* verify that pattr_hdr is within the mbuf data */
  665         for (m0 = m; m0 != NULL; m0 = m0->m_next)
  666                 if ((pktattr->pattr_hdr >= m0->m_data) &&
  667                     (pktattr->pattr_hdr < m0->m_data + m0->m_len))
  668                         break;
  669         if (m0 == NULL) {
  670                 /* ick, pattr_hdr is stale */
  671                 pktattr->pattr_af = AF_UNSPEC;
  672 #ifdef ALTQ_DEBUG
  673                 printf("write_dsfield: can't locate header!\n");
  674 #endif
  675                 return;
  676         }
  677 
  678         if (pktattr->pattr_af == AF_INET) {
  679                 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
  680                 u_int8_t old;
  681                 int32_t sum;
  682 
  683                 if (ip->ip_v != 4)
  684                         return;         /* version mismatch! */
  685                 old = ip->ip_tos;
  686                 dsfield |= old & 3;     /* leave CU bits */
  687                 if (old == dsfield)
  688                         return;
  689                 ip->ip_tos = dsfield;
  690                 /*
  691                  * update checksum (from RFC1624)
  692                  *         HC' = ~(~HC + ~m + m')
  693                  */
  694                 sum = ~ntohs(ip->ip_sum) & 0xffff;
  695                 sum += 0xff00 + (~old & 0xff) + dsfield;
  696                 sum = (sum >> 16) + (sum & 0xffff);
  697                 sum += (sum >> 16);  /* add carry */
  698 
  699                 ip->ip_sum = htons(~sum & 0xffff);
  700         }
  701 #ifdef INET6
  702         else if (pktattr->pattr_af == AF_INET6) {
  703                 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
  704                 u_int32_t flowlabel;
  705 
  706                 flowlabel = ntohl(ip6->ip6_flow);
  707                 if ((flowlabel >> 28) != 6)
  708                         return;         /* version mismatch! */
  709                 flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
  710                 ip6->ip6_flow = htonl(flowlabel);
  711         }
  712 #endif
  713         return;
  714 }
  715 
  716 
  717 /*
  718  * high resolution clock support taking advantage of a machine dependent
  719  * high resolution time counter (e.g., timestamp counter of intel pentium).
  720  * we assume
  721  *  - 64-bit-long monotonically-increasing counter
  722  *  - frequency range is 100M-4GHz (CPU speed)
  723  */
  724 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
  725 #define MACHCLK_SHIFT   8
  726 
  727 int machclk_usepcc;
  728 u_int32_t machclk_freq = 0;
  729 u_int32_t machclk_per_tick = 0;
  730 
  731 #ifdef __alpha__
  732 extern u_int64_t cycles_per_usec;       /* alpha cpu clock frequency */
  733 #endif /* __alpha__ */
  734 
  735 void
  736 init_machclk(void)
  737 {
  738         machclk_usepcc = 1;
  739 
  740 #if (!defined(__i386__) && !defined(__alpha__)) || defined(ALTQ_NOPCC)
  741         machclk_usepcc = 0;
  742 #endif
  743 #if defined(__FreeBSD__) && defined(SMP)
  744         machclk_usepcc = 0;
  745 #endif
  746 #if defined(__NetBSD__) && defined(MULTIPROCESSOR)
  747         machclk_usepcc = 0;
  748 #endif
  749 #if defined(__OpenBSD__) && defined(__HAVE_TIMECOUNTER)
  750         /*
  751          * If we have timecounters, microtime is good enough and we can
  752          * avoid problems on machines with variable cycle counter
  753          * frequencies.
  754          */
  755         machclk_usepcc = 0;
  756 #endif
  757 #ifdef __i386__
  758         /* check if TSC is available */
  759         if (machclk_usepcc == 1 && (cpu_feature & CPUID_TSC) == 0)
  760                 machclk_usepcc = 0;
  761 #endif
  762 
  763         if (machclk_usepcc == 0) {
  764                 /* emulate 256MHz using microtime() */
  765                 machclk_freq = 1000000 << MACHCLK_SHIFT;
  766                 machclk_per_tick = machclk_freq / hz;
  767 #ifdef ALTQ_DEBUG
  768                 printf("altq: emulate %uHz cpu clock\n", machclk_freq);
  769 #endif
  770                 return;
  771         }
  772 
  773         /*
  774          * if the clock frequency (of Pentium TSC or Alpha PCC) is
  775          * accessible, just use it.
  776          */
  777 #if defined(__i386__) && (defined(I586_CPU) || defined(I686_CPU))
  778         /* XXX - this will break down with variable cpu frequency. */
  779         machclk_freq = cpuspeed * 1000000;
  780 #endif
  781 #if defined(__alpha__)
  782         machclk_freq = (u_int32_t)(cycles_per_usec * 1000000);
  783 #endif /* __alpha__ */
  784 
  785         /*
  786          * if we don't know the clock frequency, measure it.
  787          */
  788         if (machclk_freq == 0) {
  789                 static int      wait;
  790                 struct timeval  tv_start, tv_end;
  791                 u_int64_t       start, end, diff;
  792                 int             timo;
  793 
  794                 microtime(&tv_start);
  795                 start = read_machclk();
  796                 timo = hz;      /* 1 sec */
  797                 (void)tsleep(&wait, PWAIT | PCATCH, "init_machclk", timo);
  798                 microtime(&tv_end);
  799                 end = read_machclk();
  800                 diff = (u_int64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
  801                     + tv_end.tv_usec - tv_start.tv_usec;
  802                 if (diff != 0)
  803                         machclk_freq = (u_int)((end - start) * 1000000 / diff);
  804         }
  805 
  806         machclk_per_tick = machclk_freq / hz;
  807 
  808 #ifdef ALTQ_DEBUG
  809         printf("altq: CPU clock: %uHz\n", machclk_freq);
  810 #endif
  811 }
  812 
  813 #if defined(__OpenBSD__) && defined(__i386__)
  814 static __inline u_int64_t
  815 rdtsc(void)
  816 {
  817         u_int64_t rv;
  818         __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv));
  819         return (rv);
  820 }
  821 #endif /* __OpenBSD__ && __i386__ */
  822 
  823 u_int64_t
  824 read_machclk(void)
  825 {
  826         u_int64_t val;
  827 
  828         if (machclk_usepcc) {
  829 #if defined(__i386__)
  830                 val = rdtsc();
  831 #elif defined(__alpha__)
  832                 static u_int32_t last_pcc, upper;
  833                 u_int32_t pcc;
  834 
  835                 /*
  836                  * for alpha, make a 64bit counter value out of the 32bit
  837                  * alpha processor cycle counter.
  838                  * read_machclk must be called within a half of its
  839                  * wrap-around cycle (about 5 sec for 400MHz cpu) to properly
  840                  * detect a counter wrap-around.
  841                  * tbr_timeout calls read_machclk once a second.
  842                  */
  843                 pcc = (u_int32_t)alpha_rpcc();
  844                 if (pcc <= last_pcc)
  845                         upper++;
  846                 last_pcc = pcc;
  847                 val = ((u_int64_t)upper << 32) + pcc;
  848 #else
  849                 panic("read_machclk");
  850 #endif
  851         } else {
  852                 struct timeval tv;
  853 
  854                 microuptime(&tv);
  855                 val = (((u_int64_t)(tv.tv_sec) * 1000000
  856                     + tv.tv_usec) << MACHCLK_SHIFT);
  857         }
  858         return (val);
  859 }

/* [<][>][^][v][top][bottom][index][help] */