root/kern/uipc_mbuf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mbinit
  2. nmbclust_update
  3. m_reclaim
  4. m_get
  5. m_gethdr
  6. m_getclr
  7. m_clget
  8. m_free
  9. m_freem
  10. m_prepend
  11. m_copym
  12. m_copym2
  13. m_copym0
  14. m_copydata
  15. m_copyback
  16. m_cat
  17. m_adj
  18. m_pullup
  19. m_pullup2
  20. m_getptr
  21. m_inject
  22. m_split
  23. m_devget
  24. m_zero
  25. m_apply

    1 /*      $OpenBSD: uipc_mbuf.c,v 1.85 2007/07/20 09:59:19 claudio Exp $  */
    2 /*      $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $   */
    3 
    4 /*
    5  * Copyright (c) 1982, 1986, 1988, 1991, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
   33  */
   34 
   35 /*
   36  *      @(#)COPYRIGHT   1.1 (NRL) 17 January 1995
   37  * 
   38  * NRL grants permission for redistribution and use in source and binary
   39  * forms, with or without modification, of the software and documentation
   40  * created at NRL provided that the following conditions are met:
   41  * 
   42  * 1. Redistributions of source code must retain the above copyright
   43  *    notice, this list of conditions and the following disclaimer.
   44  * 2. Redistributions in binary form must reproduce the above copyright
   45  *    notice, this list of conditions and the following disclaimer in the
   46  *    documentation and/or other materials provided with the distribution.
   47  * 3. All advertising materials mentioning features or use of this software
   48  *    must display the following acknowledgements:
   49  *      This product includes software developed by the University of
   50  *      California, Berkeley and its contributors.
   51  *      This product includes software developed at the Information
   52  *      Technology Division, US Naval Research Laboratory.
   53  * 4. Neither the name of the NRL nor the names of its contributors
   54  *    may be used to endorse or promote products derived from this software
   55  *    without specific prior written permission.
   56  * 
   57  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
   58  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   59  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
   60  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
   61  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   62  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   63  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   64  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   65  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   66  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   67  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   68  * 
   69  * The views and conclusions contained in the software and documentation
   70  * are those of the authors and should not be interpreted as representing
   71  * official policies, either expressed or implied, of the US Naval
   72  * Research Laboratory (NRL).
   73  */
   74 
   75 #include <sys/param.h>
   76 #include <sys/systm.h>
   77 #include <sys/proc.h>
   78 #include <sys/malloc.h>
   79 #define MBTYPES
   80 #include <sys/mbuf.h>
   81 #include <sys/kernel.h>
   82 #include <sys/syslog.h>
   83 #include <sys/domain.h>
   84 #include <sys/protosw.h>
   85 #include <sys/pool.h>
   86 
   87 #include <machine/cpu.h>
   88 
   89 #include <uvm/uvm_extern.h>
   90 
   91 struct  mbstat mbstat;          /* mbuf stats */
   92 struct  pool mbpool;            /* mbuf pool */
   93 struct  pool mclpool;           /* mbuf cluster pool */
   94 
   95 int max_linkhdr;                /* largest link-level header */
   96 int max_protohdr;               /* largest protocol header */
   97 int max_hdr;                    /* largest link+protocol header */
   98 int max_datalen;                /* MHLEN - max_hdr */
   99 
  100 struct mbuf *m_copym0(struct mbuf *, int, int, int, int);
  101 void    nmbclust_update(void);
  102 
  103 
  104 const char *mclpool_warnmsg =
  105     "WARNING: mclpool limit reached; increase kern.maxclusters";
  106 
  107 /*
  108  * Initialize the mbuf allocator.
  109  */
  110 void
  111 mbinit(void)
  112 {
  113         pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
  114         pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", NULL);
  115 
  116         nmbclust_update();
  117 
  118         /*
  119          * Set a low water mark for both mbufs and clusters.  This should
  120          * help ensure that they can be allocated in a memory starvation
  121          * situation.  This is important for e.g. diskless systems which
  122          * must allocate mbufs in order for the pagedaemon to clean pages.
  123          */
  124         pool_setlowat(&mbpool, mblowat);
  125         pool_setlowat(&mclpool, mcllowat);
  126 }
  127 
  128 void
  129 nmbclust_update(void)
  130 {
  131         /*
  132          * Set the hard limit on the mclpool to the number of
  133          * mbuf clusters the kernel is to support.  Log the limit
  134          * reached message max once a minute.
  135          */
  136         (void)pool_sethardlimit(&mclpool, nmbclust, mclpool_warnmsg, 60);
  137         pool_sethiwat(&mbpool, nmbclust);
  138 }
  139 
  140 void
  141 m_reclaim(void *arg, int flags)
  142 {
  143         struct domain *dp;
  144         struct protosw *pr;
  145         int s = splvm();
  146 
  147         for (dp = domains; dp; dp = dp->dom_next)
  148                 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
  149                         if (pr->pr_drain)
  150                                 (*pr->pr_drain)();
  151         splx(s);
  152         mbstat.m_drain++;
  153 }
  154 
  155 /*
  156  * Space allocation routines.
  157  */
  158 struct mbuf *
  159 m_get(int nowait, int type)
  160 {
  161         struct mbuf *m;
  162         int s;
  163 
  164         s = splvm();
  165         m = pool_get(&mbpool, nowait == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0);
  166         if (m) {
  167                 m->m_type = type;
  168                 mbstat.m_mtypes[type]++;
  169                 m->m_next = (struct mbuf *)NULL;
  170                 m->m_nextpkt = (struct mbuf *)NULL;
  171                 m->m_data = m->m_dat;
  172                 m->m_flags = 0;
  173         }
  174         splx(s);
  175         return (m);
  176 }
  177 
  178 struct mbuf *
  179 m_gethdr(int nowait, int type)
  180 {
  181         struct mbuf *m;
  182         int s;
  183 
  184         s = splvm();
  185         m = pool_get(&mbpool, nowait == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0);
  186         if (m) {
  187                 m->m_type = type;
  188                 mbstat.m_mtypes[type]++;
  189                 m->m_next = (struct mbuf *)NULL;
  190                 m->m_nextpkt = (struct mbuf *)NULL;
  191                 m->m_data = m->m_pktdat;
  192                 m->m_flags = M_PKTHDR;
  193                 m->m_pkthdr.rcvif = NULL;
  194                 SLIST_INIT(&m->m_pkthdr.tags);
  195                 m->m_pkthdr.csum_flags = 0;
  196                 m->m_pkthdr.pf.hdr = NULL;
  197                 m->m_pkthdr.pf.rtableid = 0;
  198                 m->m_pkthdr.pf.qid = 0;
  199                 m->m_pkthdr.pf.tag = 0;
  200                 m->m_pkthdr.pf.flags = 0;
  201                 m->m_pkthdr.pf.routed = 0;
  202         }
  203         splx(s);
  204         return (m);
  205 }
  206 
  207 struct mbuf *
  208 m_getclr(int nowait, int type)
  209 {
  210         struct mbuf *m;
  211 
  212         MGET(m, nowait, type);
  213         if (m == NULL)
  214                 return (NULL);
  215         memset(mtod(m, caddr_t), 0, MLEN);
  216         return (m);
  217 }
  218 
  219 void
  220 m_clget(struct mbuf *m, int how)
  221 {
  222         int s;
  223 
  224         s = splvm();
  225         m->m_ext.ext_buf =
  226             pool_get(&mclpool, how == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : 0);
  227         splx(s);
  228         if (m->m_ext.ext_buf != NULL) {
  229                 m->m_data = m->m_ext.ext_buf;
  230                 m->m_flags |= M_EXT|M_CLUSTER;
  231                 m->m_ext.ext_size = MCLBYTES;
  232                 m->m_ext.ext_free = NULL;
  233                 m->m_ext.ext_arg = NULL;
  234                 MCLINITREFERENCE(m);
  235         }
  236 }
  237 
  238 struct mbuf *
  239 m_free(struct mbuf *m)
  240 {
  241         struct mbuf *n;
  242         int s;
  243 
  244         s = splvm();
  245         mbstat.m_mtypes[m->m_type]--;
  246         if (m->m_flags & M_PKTHDR)
  247                 m_tag_delete_chain(m);
  248         if (m->m_flags & M_EXT) {
  249                 if (MCLISREFERENCED(m))
  250                         _MCLDEREFERENCE(m);
  251                 else if (m->m_flags & M_CLUSTER)
  252                         pool_put(&mclpool, m->m_ext.ext_buf);
  253                 else if (m->m_ext.ext_free)
  254                         (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
  255                             m->m_ext.ext_size, m->m_ext.ext_arg);
  256                 else
  257                         free(m->m_ext.ext_buf,m->m_ext.ext_type);
  258                 m->m_flags &= ~(M_CLUSTER|M_EXT);
  259                 m->m_ext.ext_size = 0;
  260         }
  261         n = m->m_next;
  262         pool_put(&mbpool, m);
  263         splx(s);
  264 
  265         return (n);
  266 }
  267 
  268 void
  269 m_freem(struct mbuf *m)
  270 {
  271         struct mbuf *n;
  272 
  273         if (m == NULL)
  274                 return;
  275         do {
  276                 MFREE(m, n);
  277         } while ((m = n) != NULL);
  278 }
  279 
  280 /*
  281  * Mbuffer utility routines.
  282  */
  283 
  284 /*
  285  * Lesser-used path for M_PREPEND:
  286  * allocate new mbuf to prepend to chain,
  287  * copy junk along.
  288  */
  289 struct mbuf *
  290 m_prepend(struct mbuf *m, int len, int how)
  291 {
  292         struct mbuf *mn;
  293 
  294         if (len > MHLEN)
  295                 panic("mbuf prepend length too big");
  296 
  297         MGET(mn, how, m->m_type);
  298         if (mn == NULL) {
  299                 m_freem(m);
  300                 return (NULL);
  301         }
  302         if (m->m_flags & M_PKTHDR)
  303                 M_MOVE_PKTHDR(mn, m);
  304         mn->m_next = m;
  305         m = mn;
  306         MH_ALIGN(m, len);
  307         m->m_len = len;
  308         return (m);
  309 }
  310 
  311 /*
  312  * Make a copy of an mbuf chain starting "off" bytes from the beginning,
  313  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
  314  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
  315  */
  316 int MCFail;
  317 
  318 struct mbuf *
  319 m_copym(struct mbuf *m, int off, int len, int wait)
  320 {
  321         return m_copym0(m, off, len, wait, 0);  /* shallow copy on M_EXT */
  322 }
  323 
  324 /*
  325  * m_copym2() is like m_copym(), except it COPIES cluster mbufs, instead
  326  * of merely bumping the reference count.
  327  */
  328 struct mbuf *
  329 m_copym2(struct mbuf *m, int off, int len, int wait)
  330 {
  331         return m_copym0(m, off, len, wait, 1);  /* deep copy */
  332 }
  333 
  334 struct mbuf *
  335 m_copym0(struct mbuf *m, int off, int len, int wait, int deep)
  336 {
  337         struct mbuf *n, **np;
  338         struct mbuf *top;
  339         int copyhdr = 0;
  340 
  341         if (off < 0 || len < 0)
  342                 panic("m_copym0: off %d, len %d", off, len);
  343         if (off == 0 && m->m_flags & M_PKTHDR)
  344                 copyhdr = 1;
  345         while (off > 0) {
  346                 if (m == NULL)
  347                         panic("m_copym0: null mbuf");
  348                 if (off < m->m_len)
  349                         break;
  350                 off -= m->m_len;
  351                 m = m->m_next;
  352         }
  353         np = &top;
  354         top = NULL;
  355         while (len > 0) {
  356                 if (m == NULL) {
  357                         if (len != M_COPYALL)
  358                                 panic("m_copym0: m == NULL and not COPYALL");
  359                         break;
  360                 }
  361                 MGET(n, wait, m->m_type);
  362                 *np = n;
  363                 if (n == NULL)
  364                         goto nospace;
  365                 if (copyhdr) {
  366                         M_DUP_PKTHDR(n, m);
  367                         if (len != M_COPYALL)
  368                                 n->m_pkthdr.len = len;
  369                         copyhdr = 0;
  370                 }
  371                 n->m_len = min(len, m->m_len - off);
  372                 if (m->m_flags & M_EXT) {
  373                         if (!deep) {
  374                                 n->m_data = m->m_data + off;
  375                                 n->m_ext = m->m_ext;
  376                                 MCLADDREFERENCE(m, n);
  377                         } else {
  378                                 /*
  379                                  * we are unsure about the way m was allocated.
  380                                  * copy into multiple MCLBYTES cluster mbufs.
  381                                  */
  382                                 MCLGET(n, wait);
  383                                 n->m_len = 0;
  384                                 n->m_len = M_TRAILINGSPACE(n);
  385                                 n->m_len = min(n->m_len, len);
  386                                 n->m_len = min(n->m_len, m->m_len - off);
  387                                 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off,
  388                                     (unsigned)n->m_len);
  389                         }
  390                 } else
  391                         memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off,
  392                             (unsigned)n->m_len);
  393                 if (len != M_COPYALL)
  394                         len -= n->m_len;
  395                 off += n->m_len;
  396 #ifdef DIAGNOSTIC
  397                 if (off > m->m_len)
  398                         panic("m_copym0 overrun");
  399 #endif
  400                 if (off == m->m_len) {
  401                         m = m->m_next;
  402                         off = 0;
  403                 }
  404                 np = &n->m_next;
  405         }
  406         if (top == NULL)
  407                 MCFail++;
  408         return (top);
  409 nospace:
  410         m_freem(top);
  411         MCFail++;
  412         return (NULL);
  413 }
  414 
  415 /*
  416  * Copy data from an mbuf chain starting "off" bytes from the beginning,
  417  * continuing for "len" bytes, into the indicated buffer.
  418  */
  419 void
  420 m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
  421 {
  422         unsigned count;
  423 
  424         if (off < 0)
  425                 panic("m_copydata: off %d < 0", off);
  426         if (len < 0)
  427                 panic("m_copydata: len %d < 0", len);
  428         while (off > 0) {
  429                 if (m == NULL)
  430                         panic("m_copydata: null mbuf in skip");
  431                 if (off < m->m_len)
  432                         break;
  433                 off -= m->m_len;
  434                 m = m->m_next;
  435         }
  436         while (len > 0) {
  437                 if (m == NULL)
  438                         panic("m_copydata: null mbuf");
  439                 count = min(m->m_len - off, len);
  440                 bcopy(mtod(m, caddr_t) + off, cp, count);
  441                 len -= count;
  442                 cp += count;
  443                 off = 0;
  444                 m = m->m_next;
  445         }
  446 }
  447 
  448 /*
  449  * Copy data from a buffer back into the indicated mbuf chain,
  450  * starting "off" bytes from the beginning, extending the mbuf
  451  * chain if necessary. The mbuf needs to be properly initialized
  452  * including the setting of m_len.
  453  */
  454 void
  455 m_copyback(struct mbuf *m0, int off, int len, const void *_cp)
  456 {
  457         int mlen;
  458         struct mbuf *m = m0, *n;
  459         int totlen = 0;
  460         caddr_t cp = (caddr_t)_cp;
  461 
  462         if (m0 == NULL)
  463                 return;
  464         while (off > (mlen = m->m_len)) {
  465                 off -= mlen;
  466                 totlen += mlen;
  467                 if (m->m_next == NULL) {
  468                         n = m_getclr(M_DONTWAIT, m->m_type);
  469                         if (n == NULL)
  470                                 goto out;
  471                         n->m_len = min(MLEN, len + off);
  472                         m->m_next = n;
  473                 }
  474                 m = m->m_next;
  475         }
  476         while (len > 0) {
  477                 mlen = min (m->m_len - off, len);
  478                 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
  479                 cp += mlen;
  480                 len -= mlen;
  481                 mlen += off;
  482                 off = 0;
  483                 totlen += mlen;
  484                 if (len == 0)
  485                         break;
  486                 if (m->m_next == NULL) {
  487                         n = m_get(M_DONTWAIT, m->m_type);
  488                         if (n == NULL)
  489                                 break;
  490                         n->m_len = min(MLEN, len);
  491                         m->m_next = n;
  492                 }
  493                 m = m->m_next;
  494         }
  495 out:    if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
  496                 m->m_pkthdr.len = totlen;
  497 }
  498 
  499 /*
  500  * Concatenate mbuf chain n to m.
  501  * n might be copied into m (when n->m_len is small), therefore data portion of
  502  * n could be copied into an mbuf of different mbuf type.
  503  * Therefore both chains should be of the same type (e.g. MT_DATA).
  504  * Any m_pkthdr is not updated.
  505  */
  506 void
  507 m_cat(struct mbuf *m, struct mbuf *n)
  508 {
  509         while (m->m_next)
  510                 m = m->m_next;
  511         while (n) {
  512                 if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) {
  513                         /* just join the two chains */
  514                         m->m_next = n;
  515                         return;
  516                 }
  517                 /* splat the data from one into the other */
  518                 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
  519                     (u_int)n->m_len);
  520                 m->m_len += n->m_len;
  521                 n = m_free(n);
  522         }
  523 }
  524 
  525 void
  526 m_adj(struct mbuf *mp, int req_len)
  527 {
  528         int len = req_len;
  529         struct mbuf *m;
  530         int count;
  531 
  532         if ((m = mp) == NULL)
  533                 return;
  534         if (len >= 0) {
  535                 /*
  536                  * Trim from head.
  537                  */
  538                 while (m != NULL && len > 0) {
  539                         if (m->m_len <= len) {
  540                                 len -= m->m_len;
  541                                 m->m_len = 0;
  542                                 m = m->m_next;
  543                         } else {
  544                                 m->m_len -= len;
  545                                 m->m_data += len;
  546                                 len = 0;
  547                         }
  548                 }
  549                 m = mp;
  550                 if (mp->m_flags & M_PKTHDR)
  551                         m->m_pkthdr.len -= (req_len - len);
  552         } else {
  553                 /*
  554                  * Trim from tail.  Scan the mbuf chain,
  555                  * calculating its length and finding the last mbuf.
  556                  * If the adjustment only affects this mbuf, then just
  557                  * adjust and return.  Otherwise, rescan and truncate
  558                  * after the remaining size.
  559                  */
  560                 len = -len;
  561                 count = 0;
  562                 for (;;) {
  563                         count += m->m_len;
  564                         if (m->m_next == NULL)
  565                                 break;
  566                         m = m->m_next;
  567                 }
  568                 if (m->m_len >= len) {
  569                         m->m_len -= len;
  570                         if (mp->m_flags & M_PKTHDR)
  571                                 mp->m_pkthdr.len -= len;
  572                         return;
  573                 }
  574                 count -= len;
  575                 if (count < 0)
  576                         count = 0;
  577                 /*
  578                  * Correct length for chain is "count".
  579                  * Find the mbuf with last data, adjust its length,
  580                  * and toss data from remaining mbufs on chain.
  581                  */
  582                 m = mp;
  583                 if (m->m_flags & M_PKTHDR)
  584                         m->m_pkthdr.len = count;
  585                 for (; m; m = m->m_next) {
  586                         if (m->m_len >= count) {
  587                                 m->m_len = count;
  588                                 break;
  589                         }
  590                         count -= m->m_len;
  591                 }
  592                 while ((m = m->m_next) != NULL)
  593                         m->m_len = 0;
  594         }
  595 }
  596 
  597 /*
  598  * Rearange an mbuf chain so that len bytes are contiguous
  599  * and in the data area of an mbuf (so that mtod and dtom
  600  * will work for a structure of size len).  Returns the resulting
  601  * mbuf chain on success, frees it and returns null on failure.
  602  * If there is room, it will add up to max_protohdr-len extra bytes to the
  603  * contiguous region in an attempt to avoid being called next time.
  604  */
  605 int MPFail;
  606 
  607 struct mbuf *
  608 m_pullup(struct mbuf *n, int len)
  609 {
  610         struct mbuf *m;
  611         int count;
  612         int space;
  613 
  614         /*
  615          * If first mbuf has no cluster, and has room for len bytes
  616          * without shifting current data, pullup into it,
  617          * otherwise allocate a new mbuf to prepend to the chain.
  618          */
  619         if ((n->m_flags & M_EXT) == 0 &&
  620             n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
  621                 if (n->m_len >= len)
  622                         return (n);
  623                 m = n;
  624                 n = n->m_next;
  625                 len -= m->m_len;
  626         } else {
  627                 if (len > MHLEN)
  628                         goto bad;
  629                 MGET(m, M_DONTWAIT, n->m_type);
  630                 if (m == NULL)
  631                         goto bad;
  632                 m->m_len = 0;
  633                 if (n->m_flags & M_PKTHDR)
  634                         M_MOVE_PKTHDR(m, n);
  635         }
  636         space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
  637         do {
  638                 count = min(min(max(len, max_protohdr), space), n->m_len);
  639                 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
  640                     (unsigned)count);
  641                 len -= count;
  642                 m->m_len += count;
  643                 n->m_len -= count;
  644                 space -= count;
  645                 if (n->m_len)
  646                         n->m_data += count;
  647                 else
  648                         n = m_free(n);
  649         } while (len > 0 && n);
  650         if (len > 0) {
  651                 (void)m_free(m);
  652                 goto bad;
  653         }
  654         m->m_next = n;
  655         return (m);
  656 bad:
  657         m_freem(n);
  658         MPFail++;
  659         return (NULL);
  660 }
  661 
  662 /*
  663  * m_pullup2() works like m_pullup, save that len can be <= MCLBYTES.
  664  * m_pullup2() only works on values of len such that MHLEN < len <= MCLBYTES,
  665  * it calls m_pullup() for values <= MHLEN.  It also only coagulates the
  666  * reqested number of bytes.  (For those of us who expect unwieldly option
  667  * headers.
  668  *
  669  * KEBE SAYS:  Remember that dtom() calls with data in clusters does not work!
  670  */
  671 struct mbuf *   
  672 m_pullup2(struct mbuf *n, int len)       
  673 {
  674         struct mbuf *m;
  675         int count;
  676 
  677         if (len <= MHLEN)
  678                 return m_pullup(n, len);
  679         if ((n->m_flags & M_EXT) != 0 &&
  680             n->m_data + len < &n->m_data[MCLBYTES] && n->m_next) {
  681                 if (n->m_len >= len)
  682                         return (n);
  683                 m = n;
  684                 n = n->m_next;
  685                 len -= m->m_len;
  686         } else {
  687                 if (len > MCLBYTES)
  688                         goto bad;
  689                 MGET(m, M_DONTWAIT, n->m_type);
  690                 if (m == NULL)
  691                         goto bad;
  692                 MCLGET(m, M_DONTWAIT);
  693                 if ((m->m_flags & M_EXT) == 0)
  694                         goto bad;
  695                 m->m_len = 0;
  696                 if (n->m_flags & M_PKTHDR) {
  697                         /* Too many adverse side effects. */
  698                         /* M_MOVE_PKTHDR(m, n); */
  699                         m->m_flags = (n->m_flags & M_COPYFLAGS) |
  700                             M_EXT | M_CLUSTER;
  701                         M_MOVE_HDR(m, n);
  702                         /* n->m_data is cool. */
  703                 }
  704         }
  705 
  706         do {
  707                 count = min(len, n->m_len);
  708                 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
  709                     (unsigned)count);
  710                 len -= count;
  711                 m->m_len += count;
  712                 n->m_len -= count;
  713                 if (n->m_len)
  714                         n->m_data += count;
  715                 else
  716                         n = m_free(n);
  717         } while (len > 0 && n);
  718         if (len > 0) {
  719                 (void)m_free(m);
  720                 goto bad;
  721         }
  722         m->m_next = n;
  723 
  724         return (m);
  725 bad:
  726         m_freem(n);
  727         MPFail++;
  728         return (NULL);
  729 }
  730 
  731 /*
  732  * Return a pointer to mbuf/offset of location in mbuf chain.
  733  */
  734 struct mbuf *
  735 m_getptr(struct mbuf *m, int loc, int *off)
  736 {
  737         while (loc >= 0) {
  738                 /* Normal end of search */
  739                 if (m->m_len > loc) {
  740                         *off = loc;
  741                         return (m);
  742                 }
  743                 else {
  744                         loc -= m->m_len;
  745 
  746                         if (m->m_next == NULL) {
  747                                 if (loc == 0) {
  748                                         /* Point at the end of valid data */
  749                                         *off = m->m_len;
  750                                         return (m);
  751                                 }
  752                                 else
  753                                         return (NULL);
  754                         } else
  755                                 m = m->m_next;
  756                 }
  757         }
  758 
  759         return (NULL);
  760 }
  761 
  762 /*
  763  * Inject a new mbuf chain of length siz in mbuf chain m0 at
  764  * position len0. Returns a pointer to the first injected mbuf, or
  765  * NULL on failure (m0 is left undisturbed). Note that if there is
  766  * enough space for an object of size siz in the appropriate position,
  767  * no memory will be allocated. Also, there will be no data movement in
  768  * the first len0 bytes (pointers to that will remain valid).
  769  *
  770  * XXX It is assumed that siz is less than the size of an mbuf at the moment.
  771  */
  772 struct mbuf *
  773 m_inject(struct mbuf *m0, int len0, int siz, int wait)
  774 {
  775         struct mbuf *m, *n, *n2 = NULL, *n3;
  776         unsigned len = len0, remain;
  777 
  778         if ((siz >= MHLEN) || (len0 <= 0))
  779                 return (NULL);
  780         for (m = m0; m && len > m->m_len; m = m->m_next)
  781                 len -= m->m_len;
  782         if (m == NULL)
  783                 return (NULL);
  784         remain = m->m_len - len;
  785         if (remain == 0) {
  786                 if ((m->m_next) && (M_LEADINGSPACE(m->m_next) >= siz)) {
  787                         m->m_next->m_len += siz;
  788                         if (m0->m_flags & M_PKTHDR)
  789                                 m0->m_pkthdr.len += siz;
  790                         m->m_next->m_data -= siz;
  791                         return m->m_next;
  792                 }
  793         } else {
  794                 n2 = m_copym2(m, len, remain, wait);
  795                 if (n2 == NULL)
  796                         return (NULL);
  797         }
  798 
  799         MGET(n, wait, MT_DATA);
  800         if (n == NULL) {
  801                 if (n2)
  802                         m_freem(n2);
  803                 return (NULL);
  804         }
  805 
  806         n->m_len = siz;
  807         if (m0->m_flags & M_PKTHDR)
  808                 m0->m_pkthdr.len += siz;
  809         m->m_len -= remain; /* Trim */
  810         if (n2) {
  811                 for (n3 = n; n3->m_next != NULL; n3 = n3->m_next)
  812                         ;
  813                 n3->m_next = n2;
  814         } else
  815                 n3 = n;
  816         for (; n3->m_next != NULL; n3 = n3->m_next)
  817                 ;
  818         n3->m_next = m->m_next;
  819         m->m_next = n;
  820         return n;
  821 }
  822 
  823 /*
  824  * Partition an mbuf chain in two pieces, returning the tail --
  825  * all but the first len0 bytes.  In case of failure, it returns NULL and
  826  * attempts to restore the chain to its original state.
  827  */
  828 struct mbuf *
  829 m_split(struct mbuf *m0, int len0, int wait)
  830 {
  831         struct mbuf *m, *n;
  832         unsigned len = len0, remain, olen;
  833 
  834         for (m = m0; m && len > m->m_len; m = m->m_next)
  835                 len -= m->m_len;
  836         if (m == NULL)
  837                 return (NULL);
  838         remain = m->m_len - len;
  839         if (m0->m_flags & M_PKTHDR) {
  840                 MGETHDR(n, wait, m0->m_type);
  841                 if (n == NULL)
  842                         return (NULL);
  843                 M_DUP_PKTHDR(n, m0);
  844                 n->m_pkthdr.len -= len0;
  845                 olen = m0->m_pkthdr.len;
  846                 m0->m_pkthdr.len = len0;
  847                 if (m->m_flags & M_EXT)
  848                         goto extpacket;
  849                 if (remain > MHLEN) {
  850                         /* m can't be the lead packet */
  851                         MH_ALIGN(n, 0);
  852                         n->m_next = m_split(m, len, wait);
  853                         if (n->m_next == NULL) {
  854                                 (void) m_free(n);
  855                                 m0->m_pkthdr.len = olen;
  856                                 return (NULL);
  857                         } else
  858                                 return (n);
  859                 } else
  860                         MH_ALIGN(n, remain);
  861         } else if (remain == 0) {
  862                 n = m->m_next;
  863                 m->m_next = NULL;
  864                 return (n);
  865         } else {
  866                 MGET(n, wait, m->m_type);
  867                 if (n == NULL)
  868                         return (NULL);
  869                 M_ALIGN(n, remain);
  870         }
  871 extpacket:
  872         if (m->m_flags & M_EXT) {
  873                 n->m_ext = m->m_ext;
  874                 MCLADDREFERENCE(m, n);
  875                 n->m_data = m->m_data + len;
  876         } else {
  877                 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
  878         }
  879         n->m_len = remain;
  880         m->m_len = len;
  881         n->m_next = m->m_next;
  882         m->m_next = NULL;
  883         return (n);
  884 }
  885 
  886 /*
  887  * Routine to copy from device local memory into mbufs.
  888  */
  889 struct mbuf *
  890 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
  891     void (*copy)(const void *, void *, size_t))
  892 {
  893         struct mbuf *m;
  894         struct mbuf *top = NULL, **mp = &top;
  895         int len;
  896         char *cp;
  897         char *epkt;
  898 
  899         cp = buf;
  900         epkt = cp + totlen;
  901         if (off) {
  902                 /*
  903                  * If 'off' is non-zero, packet is trailer-encapsulated,
  904                  * so we have to skip the type and length fields.
  905                  */
  906                 cp += off + 2 * sizeof(u_int16_t);
  907                 totlen -= 2 * sizeof(u_int16_t);
  908         }
  909         MGETHDR(m, M_DONTWAIT, MT_DATA);
  910         if (m == NULL)
  911                 return (NULL);
  912         m->m_pkthdr.rcvif = ifp;
  913         m->m_pkthdr.len = totlen;
  914         m->m_len = MHLEN;
  915 
  916         while (totlen > 0) {
  917                 if (top != NULL) {
  918                         MGET(m, M_DONTWAIT, MT_DATA);
  919                         if (m == NULL) {
  920                                 m_freem(top);
  921                                 return (NULL);
  922                         }
  923                         m->m_len = MLEN;
  924                 }
  925                 len = min(totlen, epkt - cp);
  926                 if (len >= MINCLSIZE) {
  927                         MCLGET(m, M_DONTWAIT);
  928                         if (m->m_flags & M_EXT)
  929                                 m->m_len = len = min(len, MCLBYTES);
  930                         else
  931                                 len = m->m_len;
  932                 } else {
  933                         /*
  934                          * Place initial small packet/header at end of mbuf.
  935                          */
  936                         if (len < m->m_len) {
  937                                 if (top == NULL &&
  938                                     len + max_linkhdr <= m->m_len)
  939                                         m->m_data += max_linkhdr;
  940                                 m->m_len = len;
  941                         } else
  942                                 len = m->m_len;
  943                 }
  944                 if (copy)
  945                         copy(cp, mtod(m, caddr_t), (size_t)len);
  946                 else
  947                         bcopy(cp, mtod(m, caddr_t), (size_t)len);
  948                 cp += len;
  949                 *mp = m;
  950                 mp = &m->m_next;
  951                 totlen -= len;
  952                 if (cp == epkt)
  953                         cp = buf;
  954         }
  955         return (top);
  956 }
  957 
  958 void
  959 m_zero(struct mbuf *m)
  960 {
  961         while (m) {
  962 #ifdef DIAGNOSTIC
  963                 if (M_READONLY(m))
  964                         panic("m_zero: M_READONLY");
  965 #endif /* DIAGNOSTIC */
  966                 if (m->m_flags & M_EXT)
  967                         memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
  968                 else {
  969                         if (m->m_flags & M_PKTHDR)
  970                                 memset(m->m_pktdat, 0, MHLEN);
  971                         else
  972                                 memset(m->m_dat, 0, MLEN);
  973                 }
  974                 m = m->m_next;
  975         }
  976 }
  977 
  978 /*
  979  * Apply function f to the data in an mbuf chain starting "off" bytes from the
  980  * beginning, continuing for "len" bytes.
  981  */
  982 int
  983 m_apply(struct mbuf *m, int off, int len,
  984     int (*f)(caddr_t, caddr_t, unsigned int), caddr_t fstate)
  985 {
  986         int rval;
  987         unsigned int count;
  988 
  989         if (len < 0)
  990                 panic("m_apply: len %d < 0", len);
  991         if (off < 0)
  992                 panic("m_apply: off %d < 0", off);
  993         while (off > 0) {
  994                 if (m == NULL)
  995                         panic("m_apply: null mbuf in skip");
  996                 if (off < m->m_len)
  997                         break;
  998                 off -= m->m_len;
  999                 m = m->m_next;
 1000         }
 1001         while (len > 0) {
 1002                 if (m == NULL)
 1003                         panic("m_apply: null mbuf");
 1004                 count = min(m->m_len - off, len);
 1005 
 1006                 rval = f(fstate, mtod(m, caddr_t) + off, count);
 1007                 if (rval)
 1008                         return (rval);
 1009 
 1010                 len -= count;
 1011                 off = 0;
 1012                 m = m->m_next;
 1013         }
 1014 
 1015         return (0);
 1016 }

/* [<][>][^][v][top][bottom][index][help] */