2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_param.h"
36 #include "opt_mbuf_stress_test.h"
37 #include "opt_mbuf_profiling.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
44 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
52 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
53 "struct mbuf *", "mbufinfo_t *",
54 "uint32_t", "uint32_t",
55 "uint16_t", "uint16_t",
56 "uint32_t", "uint32_t",
57 "uint32_t", "uint32_t");
59 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
60 "uint32_t", "uint32_t",
61 "uint16_t", "uint16_t",
62 "struct mbuf *", "mbufinfo_t *");
64 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
65 "uint32_t", "uint32_t",
66 "uint16_t", "uint16_t",
67 "struct mbuf *", "mbufinfo_t *");
69 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
70 "uint32_t", "uint32_t",
71 "uint16_t", "uint16_t",
72 "uint32_t", "uint32_t",
73 "struct mbuf *", "mbufinfo_t *");
75 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
76 "struct mbuf *", "mbufinfo_t *",
77 "uint32_t", "uint32_t",
78 "uint32_t", "uint32_t");
80 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
81 "struct mbuf *", "mbufinfo_t *",
82 "uint32_t", "uint32_t",
83 "uint32_t", "uint32_t",
86 SDT_PROBE_DEFINE(sdt, , , m__cljset);
88 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
89 "struct mbuf *", "mbufinfo_t *");
91 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
92 "struct mbuf *", "mbufinfo_t *");
94 #include <security/mac/mac_framework.h>
100 #ifdef MBUF_STRESS_TEST
105 int m_defragrandomfailures;
109 * sysctl(8) exported objects
111 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
112 &max_linkhdr, 0, "Size of largest link layer header");
113 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
114 &max_protohdr, 0, "Size of largest protocol layer header");
115 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
116 &max_hdr, 0, "Size of largest link plus protocol header");
117 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
118 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
119 #ifdef MBUF_STRESS_TEST
120 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
121 &m_defragpackets, 0, "");
122 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
123 &m_defragbytes, 0, "");
124 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
125 &m_defraguseless, 0, "");
126 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
127 &m_defragfailure, 0, "");
128 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
129 &m_defragrandomfailures, 0, "");
133 * Ensure the correct size of various mbuf parameters. It could be off due
134 * to compiler-induced padding and alignment artifacts.
136 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
137 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
140 * mbuf data storage should be 64-bit aligned regardless of architectural
141 * pointer size; check this is the case with and without a packet header.
143 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
144 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
147 * While the specific values here don't matter too much (i.e., +/- a few
148 * words), we do want to ensure that changes to these values are carefully
149 * reasoned about and properly documented. This is especially the case as
150 * network-protocol and device-driver modules encode these layouts, and must
151 * be recompiled if the structures change. Check these values at compile time
152 * against the ones documented in comments in mbuf.h.
154 * NB: Possibly they should be documented there via #define's and not just
157 #if defined(__LP64__)
158 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
159 CTASSERT(sizeof(struct pkthdr) == 56);
160 CTASSERT(sizeof(struct m_ext) == 48);
162 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
163 CTASSERT(sizeof(struct pkthdr) == 48);
164 CTASSERT(sizeof(struct m_ext) == 28);
168 * Assert that the queue(3) macros produce code of the same size as an old
169 * plain pointer does.
172 static struct mbuf m_assertbuf;
173 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
174 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
175 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
176 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
180 * Attach the cluster from *m to *n, set up m_ext in *n
181 * and bump the refcount of the cluster.
184 mb_dupcl(struct mbuf *n, struct mbuf *m)
186 volatile u_int *refcnt;
188 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
189 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
193 n->m_flags |= m->m_flags & M_RDONLY;
195 /* See if this is the mbuf that holds the embedded refcount. */
196 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
197 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
198 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
200 KASSERT(m->m_ext.ext_cnt != NULL,
201 ("%s: no refcounting pointer on %p", __func__, m));
202 refcnt = m->m_ext.ext_cnt;
208 atomic_add_int(refcnt, 1);
212 m_demote_pkthdr(struct mbuf *m)
217 m_tag_delete_chain(m, NULL);
218 m->m_flags &= ~M_PKTHDR;
219 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
223 * Clean up mbuf (chain) from any tags and packet headers.
224 * If "all" is set then the first mbuf in the chain will be
228 m_demote(struct mbuf *m0, int all, int flags)
232 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
233 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
235 if (m->m_flags & M_PKTHDR)
237 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags);
242 * Sanity checks on mbuf (chain) for use in KASSERT() and general
244 * Returns 0 or panics when bad and 1 on all tests passed.
245 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
249 m_sanity(struct mbuf *m0, int sanitize)
256 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
258 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
261 for (m = m0; m != NULL; m = m->m_next) {
263 * Basic pointer checks. If any of these fails then some
264 * unrelated kernel memory before or after us is trashed.
265 * No way to recover from that.
269 if ((caddr_t)m->m_data < a)
270 M_SANITY_ACTION("m_data outside mbuf data range left");
271 if ((caddr_t)m->m_data > b)
272 M_SANITY_ACTION("m_data outside mbuf data range right");
273 if ((caddr_t)m->m_data + m->m_len > b)
274 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
276 /* m->m_nextpkt may only be set on first mbuf in chain. */
277 if (m != m0 && m->m_nextpkt != NULL) {
279 m_freem(m->m_nextpkt);
280 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
282 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
285 /* packet length (not mbuf length!) calculation */
286 if (m0->m_flags & M_PKTHDR)
289 /* m_tags may only be attached to first mbuf in chain. */
290 if (m != m0 && m->m_flags & M_PKTHDR &&
291 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
293 m_tag_delete_chain(m, NULL);
294 /* put in 0xDEADC0DE perhaps? */
296 M_SANITY_ACTION("m_tags on in-chain mbuf");
299 /* M_PKTHDR may only be set on first mbuf in chain */
300 if (m != m0 && m->m_flags & M_PKTHDR) {
302 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
303 m->m_flags &= ~M_PKTHDR;
304 /* put in 0xDEADCODE and leave hdr flag in */
306 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
310 if (pktlen && pktlen != m->m_pkthdr.len) {
314 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
318 #undef M_SANITY_ACTION
322 * Non-inlined part of m_init().
325 m_pkthdr_init(struct mbuf *m, int how)
330 m->m_data = m->m_pktdat;
331 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
333 /* If the label init fails, fail the alloc */
334 error = mac_mbuf_init(m, how);
343 * "Move" mbuf pkthdr from "from" to "to".
344 * "from" must have M_PKTHDR set, and "to" must be empty.
347 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
351 /* see below for why these are not enabled */
353 /* Note: with MAC, this may not be a good assertion. */
354 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
355 ("m_move_pkthdr: to has tags"));
359 * XXXMAC: It could be this should also occur for non-MAC?
361 if (to->m_flags & M_PKTHDR)
362 m_tag_delete_chain(to, NULL);
364 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
365 if ((to->m_flags & M_EXT) == 0)
366 to->m_data = to->m_pktdat;
367 to->m_pkthdr = from->m_pkthdr; /* especially tags */
368 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
369 from->m_flags &= ~M_PKTHDR;
373 * Duplicate "from"'s mbuf pkthdr in "to".
374 * "from" must have M_PKTHDR set, and "to" must be empty.
375 * In particular, this does a deep copy of the packet tags.
378 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
383 * The mbuf allocator only initializes the pkthdr
384 * when the mbuf is allocated with m_gethdr(). Many users
385 * (e.g. m_copy*, m_prepend) use m_get() and then
386 * smash the pkthdr as needed causing these
387 * assertions to trip. For now just disable them.
390 /* Note: with MAC, this may not be a good assertion. */
391 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
393 MBUF_CHECKSLEEP(how);
395 if (to->m_flags & M_PKTHDR)
396 m_tag_delete_chain(to, NULL);
398 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
399 if ((to->m_flags & M_EXT) == 0)
400 to->m_data = to->m_pktdat;
401 to->m_pkthdr = from->m_pkthdr;
402 SLIST_INIT(&to->m_pkthdr.tags);
403 return (m_tag_copy_chain(to, from, how));
407 * Lesser-used path for M_PREPEND:
408 * allocate new mbuf to prepend to chain,
412 m_prepend(struct mbuf *m, int len, int how)
416 if (m->m_flags & M_PKTHDR)
417 mn = m_gethdr(how, m->m_type);
419 mn = m_get(how, m->m_type);
424 if (m->m_flags & M_PKTHDR)
425 m_move_pkthdr(mn, m);
435 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
436 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
437 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
438 * Note that the copy is read-only, because clusters are not copied,
439 * only their reference counts are incremented.
442 m_copym(struct mbuf *m, int off0, int len, int wait)
444 struct mbuf *n, **np;
449 KASSERT(off >= 0, ("m_copym, negative off %d", off));
450 KASSERT(len >= 0, ("m_copym, negative len %d", len));
451 MBUF_CHECKSLEEP(wait);
452 if (off == 0 && m->m_flags & M_PKTHDR)
455 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
465 KASSERT(len == M_COPYALL,
466 ("m_copym, length > size of mbuf chain"));
470 n = m_gethdr(wait, m->m_type);
472 n = m_get(wait, m->m_type);
477 if (!m_dup_pkthdr(n, m, wait))
479 if (len == M_COPYALL)
480 n->m_pkthdr.len -= off0;
482 n->m_pkthdr.len = len;
485 n->m_len = min(len, m->m_len - off);
486 if (m->m_flags & M_EXT) {
487 n->m_data = m->m_data + off;
490 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
492 if (len != M_COPYALL)
506 * Copy an entire packet, including header (which must be present).
507 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
508 * Note that the copy is read-only, because clusters are not copied,
509 * only their reference counts are incremented.
510 * Preserve alignment of the first mbuf so if the creator has left
511 * some room at the beginning (e.g. for inserting protocol headers)
512 * the copies still have the room available.
515 m_copypacket(struct mbuf *m, int how)
517 struct mbuf *top, *n, *o;
519 MBUF_CHECKSLEEP(how);
520 n = m_get(how, m->m_type);
525 if (!m_dup_pkthdr(n, m, how))
528 if (m->m_flags & M_EXT) {
529 n->m_data = m->m_data;
532 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
533 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
538 o = m_get(how, m->m_type);
546 if (m->m_flags & M_EXT) {
547 n->m_data = m->m_data;
550 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
562 * Copy data from an mbuf chain starting "off" bytes from the beginning,
563 * continuing for "len" bytes, into the indicated buffer.
566 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
570 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
571 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
573 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
580 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
581 count = min(m->m_len - off, len);
582 bcopy(mtod(m, caddr_t) + off, cp, count);
591 * Copy a packet header mbuf chain into a completely new chain, including
592 * copying any mbuf clusters. Use this instead of m_copypacket() when
593 * you need a writable copy of an mbuf chain.
596 m_dup(const struct mbuf *m, int how)
598 struct mbuf **p, *top = NULL;
599 int remain, moff, nsize;
601 MBUF_CHECKSLEEP(how);
607 /* While there's more data, get a new mbuf, tack it on, and fill it */
608 remain = m->m_pkthdr.len;
611 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
614 /* Get the next new mbuf */
615 if (remain >= MINCLSIZE) {
616 n = m_getcl(how, m->m_type, 0);
619 n = m_get(how, m->m_type);
625 if (top == NULL) { /* First one, must be PKTHDR */
626 if (!m_dup_pkthdr(n, m, how)) {
630 if ((n->m_flags & M_EXT) == 0)
632 n->m_flags &= ~M_RDONLY;
636 /* Link it into the new chain */
640 /* Copy data from original mbuf(s) into new mbuf */
641 while (n->m_len < nsize && m != NULL) {
642 int chunk = min(nsize - n->m_len, m->m_len - moff);
644 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
648 if (moff == m->m_len) {
654 /* Check correct total mbuf length */
655 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
656 ("%s: bogus m_pkthdr.len", __func__));
666 * Concatenate mbuf chain n to m.
667 * Both chains must be of the same type (e.g. MT_DATA).
668 * Any m_pkthdr is not updated.
671 m_cat(struct mbuf *m, struct mbuf *n)
676 if (!M_WRITABLE(m) ||
677 M_TRAILINGSPACE(m) < n->m_len) {
678 /* just join the two chains */
682 /* splat the data from one into the other */
683 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
685 m->m_len += n->m_len;
691 * Concatenate two pkthdr mbuf chains.
694 m_catpkt(struct mbuf *m, struct mbuf *n)
700 m->m_pkthdr.len += n->m_pkthdr.len;
707 m_adj(struct mbuf *mp, int req_len)
713 if ((m = mp) == NULL)
719 while (m != NULL && len > 0) {
720 if (m->m_len <= len) {
730 if (mp->m_flags & M_PKTHDR)
731 mp->m_pkthdr.len -= (req_len - len);
734 * Trim from tail. Scan the mbuf chain,
735 * calculating its length and finding the last mbuf.
736 * If the adjustment only affects this mbuf, then just
737 * adjust and return. Otherwise, rescan and truncate
738 * after the remaining size.
744 if (m->m_next == (struct mbuf *)0)
748 if (m->m_len >= len) {
750 if (mp->m_flags & M_PKTHDR)
751 mp->m_pkthdr.len -= len;
758 * Correct length for chain is "count".
759 * Find the mbuf with last data, adjust its length,
760 * and toss data from remaining mbufs on chain.
763 if (m->m_flags & M_PKTHDR)
764 m->m_pkthdr.len = count;
765 for (; m; m = m->m_next) {
766 if (m->m_len >= count) {
768 if (m->m_next != NULL) {
780 * Rearange an mbuf chain so that len bytes are contiguous
781 * and in the data area of an mbuf (so that mtod will work
782 * for a structure of size len). Returns the resulting
783 * mbuf chain on success, frees it and returns null on failure.
784 * If there is room, it will add up to max_protohdr-len extra bytes to the
785 * contiguous region in an attempt to avoid being called next time.
788 m_pullup(struct mbuf *n, int len)
795 * If first mbuf has no cluster, and has room for len bytes
796 * without shifting current data, pullup into it,
797 * otherwise allocate a new mbuf to prepend to the chain.
799 if ((n->m_flags & M_EXT) == 0 &&
800 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
809 m = m_get(M_NOWAIT, n->m_type);
812 if (n->m_flags & M_PKTHDR)
815 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
817 count = min(min(max(len, max_protohdr), space), n->m_len);
818 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
828 } while (len > 0 && n);
841 * Like m_pullup(), except a new mbuf is always allocated, and we allow
842 * the amount of empty space before the data in the new mbuf to be specified
843 * (in the event that the caller expects to prepend later).
846 m_copyup(struct mbuf *n, int len, int dstoff)
851 if (len > (MHLEN - dstoff))
853 m = m_get(M_NOWAIT, n->m_type);
856 if (n->m_flags & M_PKTHDR)
859 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
861 count = min(min(max(len, max_protohdr), space), n->m_len);
862 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
872 } while (len > 0 && n);
885 * Partition an mbuf chain in two pieces, returning the tail --
886 * all but the first len0 bytes. In case of failure, it returns NULL and
887 * attempts to restore the chain to its original state.
889 * Note that the resulting mbufs might be read-only, because the new
890 * mbuf can end up sharing an mbuf cluster with the original mbuf if
891 * the "breaking point" happens to lie within a cluster mbuf. Use the
892 * M_WRITABLE() macro to check for this case.
895 m_split(struct mbuf *m0, int len0, int wait)
898 u_int len = len0, remain;
900 MBUF_CHECKSLEEP(wait);
901 for (m = m0; m && len > m->m_len; m = m->m_next)
905 remain = m->m_len - len;
906 if (m0->m_flags & M_PKTHDR && remain == 0) {
907 n = m_gethdr(wait, m0->m_type);
910 n->m_next = m->m_next;
912 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
913 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
914 m0->m_pkthdr.len = len0;
916 } else if (m0->m_flags & M_PKTHDR) {
917 n = m_gethdr(wait, m0->m_type);
920 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
921 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
922 m0->m_pkthdr.len = len0;
923 if (m->m_flags & M_EXT)
925 if (remain > MHLEN) {
926 /* m can't be the lead packet */
928 n->m_next = m_split(m, len, wait);
929 if (n->m_next == NULL) {
938 } else if (remain == 0) {
943 n = m_get(wait, m->m_type);
949 if (m->m_flags & M_EXT) {
950 n->m_data = m->m_data + len;
953 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
957 n->m_next = m->m_next;
962 * Routine to copy from device local memory into mbufs.
963 * Note that `off' argument is offset into first mbuf of target chain from
964 * which to begin copying the data to.
967 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
968 void (*copy)(char *from, caddr_t to, u_int len))
971 struct mbuf *top = NULL, **mp = ⊤
974 if (off < 0 || off > MHLEN)
978 if (top == NULL) { /* First one, must be PKTHDR */
979 if (totlen + off >= MINCLSIZE) {
980 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
983 m = m_gethdr(M_NOWAIT, MT_DATA);
986 /* Place initial small packet/header at end of mbuf */
987 if (m && totlen + off + max_linkhdr <= MHLEN) {
988 m->m_data += max_linkhdr;
994 m->m_pkthdr.rcvif = ifp;
995 m->m_pkthdr.len = totlen;
997 if (totlen + off >= MINCLSIZE) {
998 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1001 m = m_get(M_NOWAIT, MT_DATA);
1014 m->m_len = len = min(totlen, len);
1016 copy(buf, mtod(m, caddr_t), (u_int)len);
1018 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1028 * Copy data from a buffer back into the indicated mbuf chain,
1029 * starting "off" bytes from the beginning, extending the mbuf
1030 * chain if necessary.
1033 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1036 struct mbuf *m = m0, *n;
1041 while (off > (mlen = m->m_len)) {
1044 if (m->m_next == NULL) {
1045 n = m_get(M_NOWAIT, m->m_type);
1048 bzero(mtod(n, caddr_t), MLEN);
1049 n->m_len = min(MLEN, len + off);
1055 if (m->m_next == NULL && (len > m->m_len - off)) {
1056 m->m_len += min(len - (m->m_len - off),
1057 M_TRAILINGSPACE(m));
1059 mlen = min (m->m_len - off, len);
1060 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1068 if (m->m_next == NULL) {
1069 n = m_get(M_NOWAIT, m->m_type);
1072 n->m_len = min(MLEN, len);
1077 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1078 m->m_pkthdr.len = totlen;
1082 * Append the specified data to the indicated mbuf chain,
1083 * Extend the mbuf chain if the new data does not fit in
1086 * Return 1 if able to complete the job; otherwise 0.
1089 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1092 int remainder, space;
1094 for (m = m0; m->m_next != NULL; m = m->m_next)
1097 space = M_TRAILINGSPACE(m);
1100 * Copy into available space.
1102 if (space > remainder)
1104 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1106 cp += space, remainder -= space;
1108 while (remainder > 0) {
1110 * Allocate a new mbuf; could check space
1111 * and allocate a cluster instead.
1113 n = m_get(M_NOWAIT, m->m_type);
1116 n->m_len = min(MLEN, remainder);
1117 bcopy(cp, mtod(n, caddr_t), n->m_len);
1118 cp += n->m_len, remainder -= n->m_len;
1122 if (m0->m_flags & M_PKTHDR)
1123 m0->m_pkthdr.len += len - remainder;
1124 return (remainder == 0);
1128 * Apply function f to the data in an mbuf chain starting "off" bytes from
1129 * the beginning, continuing for "len" bytes.
1132 m_apply(struct mbuf *m, int off, int len,
1133 int (*f)(void *, void *, u_int), void *arg)
1138 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1139 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1141 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1148 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1149 count = min(m->m_len - off, len);
1150 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1161 * Return a pointer to mbuf/offset of location in mbuf chain.
1164 m_getptr(struct mbuf *m, int loc, int *off)
1168 /* Normal end of search. */
1169 if (m->m_len > loc) {
1174 if (m->m_next == NULL) {
1176 /* Point at the end of valid data. */
1189 m_print(const struct mbuf *m, int maxlen)
1193 const struct mbuf *m2;
1196 printf("mbuf: %p\n", m);
1200 if (m->m_flags & M_PKTHDR)
1201 len = m->m_pkthdr.len;
1205 while (m2 != NULL && (len == -1 || len)) {
1207 if (maxlen != -1 && pdata > maxlen)
1209 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1210 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1211 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1212 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1214 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1220 printf("%d bytes unaccounted for.\n", len);
1225 m_fixhdr(struct mbuf *m0)
1229 len = m_length(m0, NULL);
1230 m0->m_pkthdr.len = len;
1235 m_length(struct mbuf *m0, struct mbuf **last)
1241 for (m = m0; m != NULL; m = m->m_next) {
1243 if (m->m_next == NULL)
1252 * Defragment a mbuf chain, returning the shortest possible
1253 * chain of mbufs and clusters. If allocation fails and
1254 * this cannot be completed, NULL will be returned, but
1255 * the passed in chain will be unchanged. Upon success,
1256 * the original chain will be freed, and the new chain
1259 * If a non-packet header is passed in, the original
1260 * mbuf (chain?) will be returned unharmed.
1263 m_defrag(struct mbuf *m0, int how)
1265 struct mbuf *m_new = NULL, *m_final = NULL;
1266 int progress = 0, length;
1268 MBUF_CHECKSLEEP(how);
1269 if (!(m0->m_flags & M_PKTHDR))
1272 m_fixhdr(m0); /* Needed sanity check */
1274 #ifdef MBUF_STRESS_TEST
1275 if (m_defragrandomfailures) {
1276 int temp = arc4random() & 0xff;
1282 if (m0->m_pkthdr.len > MHLEN)
1283 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1285 m_final = m_gethdr(how, MT_DATA);
1287 if (m_final == NULL)
1290 if (m_dup_pkthdr(m_final, m0, how) == 0)
1295 while (progress < m0->m_pkthdr.len) {
1296 length = m0->m_pkthdr.len - progress;
1297 if (length > MCLBYTES)
1300 if (m_new == NULL) {
1302 m_new = m_getcl(how, MT_DATA, 0);
1304 m_new = m_get(how, MT_DATA);
1309 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1311 m_new->m_len = length;
1312 if (m_new != m_final)
1313 m_cat(m_final, m_new);
1316 #ifdef MBUF_STRESS_TEST
1317 if (m0->m_next == NULL)
1322 #ifdef MBUF_STRESS_TEST
1324 m_defragbytes += m0->m_pkthdr.len;
1328 #ifdef MBUF_STRESS_TEST
1337 * Defragment an mbuf chain, returning at most maxfrags separate
1338 * mbufs+clusters. If this is not possible NULL is returned and
1339 * the original mbuf chain is left in its present (potentially
1340 * modified) state. We use two techniques: collapsing consecutive
1341 * mbufs and replacing consecutive mbufs by a cluster.
1343 * NB: this should really be named m_defrag but that name is taken
1346 m_collapse(struct mbuf *m0, int how, int maxfrags)
1348 struct mbuf *m, *n, *n2, **prev;
1352 * Calculate the current number of frags.
1355 for (m = m0; m != NULL; m = m->m_next)
1358 * First, try to collapse mbufs. Note that we always collapse
1359 * towards the front so we don't need to deal with moving the
1360 * pkthdr. This may be suboptimal if the first mbuf has much
1361 * less data than the following.
1369 if (M_WRITABLE(m) &&
1370 n->m_len < M_TRAILINGSPACE(m)) {
1371 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1373 m->m_len += n->m_len;
1374 m->m_next = n->m_next;
1376 if (--curfrags <= maxfrags)
1381 KASSERT(maxfrags > 1,
1382 ("maxfrags %u, but normal collapse failed", maxfrags));
1384 * Collapse consecutive mbufs to a cluster.
1386 prev = &m0->m_next; /* NB: not the first mbuf */
1387 while ((n = *prev) != NULL) {
1388 if ((n2 = n->m_next) != NULL &&
1389 n->m_len + n2->m_len < MCLBYTES) {
1390 m = m_getcl(how, MT_DATA, 0);
1393 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1394 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1396 m->m_len = n->m_len + n2->m_len;
1397 m->m_next = n2->m_next;
1401 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1404 * Still not there, try the normal collapse
1405 * again before we allocate another cluster.
1412 * No place where we can collapse to a cluster; punt.
1413 * This can occur if, for example, you request 2 frags
1414 * but the packet requires that both be clusters (we
1415 * never reallocate the first mbuf to avoid moving the
1422 #ifdef MBUF_STRESS_TEST
1425 * Fragment an mbuf chain. There's no reason you'd ever want to do
1426 * this in normal usage, but it's great for stress testing various
1429 * If fragmentation is not possible, the original chain will be
1432 * Possible length values:
1433 * 0 no fragmentation will occur
1434 * > 0 each fragment will be of the specified length
1435 * -1 each fragment will be the same random value in length
1436 * -2 each fragment's length will be entirely random
1437 * (Random values range from 1 to 256)
1440 m_fragment(struct mbuf *m0, int how, int length)
1442 struct mbuf *m_new = NULL, *m_final = NULL;
1445 if (!(m0->m_flags & M_PKTHDR))
1448 if ((length == 0) || (length < -2))
1451 m_fixhdr(m0); /* Needed sanity check */
1453 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1455 if (m_final == NULL)
1458 if (m_dup_pkthdr(m_final, m0, how) == 0)
1464 length = 1 + (arc4random() & 255);
1466 while (progress < m0->m_pkthdr.len) {
1472 fraglen = 1 + (arc4random() & 255);
1473 if (fraglen > m0->m_pkthdr.len - progress)
1474 fraglen = m0->m_pkthdr.len - progress;
1476 if (fraglen > MCLBYTES)
1479 if (m_new == NULL) {
1480 m_new = m_getcl(how, MT_DATA, 0);
1485 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1486 progress += fraglen;
1487 m_new->m_len = fraglen;
1488 if (m_new != m_final)
1489 m_cat(m_final, m_new);
1498 /* Return the original chain on failure */
1505 * Copy the contents of uio into a properly sized mbuf chain.
1508 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1510 struct mbuf *m, *mb;
1516 * len can be zero or an arbitrary large value bound by
1517 * the total data supplied by the uio.
1520 total = min(uio->uio_resid, len);
1522 total = uio->uio_resid;
1525 * The smallest unit returned by m_getm2() is a single mbuf
1526 * with pkthdr. We can't align past it.
1532 * Give us the full allocation or nothing.
1533 * If len is zero return the smallest empty mbuf.
1535 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1540 /* Fill all mbufs with uio data and update header information. */
1541 for (mb = m; mb != NULL; mb = mb->m_next) {
1542 length = min(M_TRAILINGSPACE(mb), total - progress);
1544 error = uiomove(mtod(mb, void *), length, uio);
1552 if (flags & M_PKTHDR)
1553 m->m_pkthdr.len += length;
1555 KASSERT(progress == total, ("%s: progress != total", __func__));
1561 * Copy an mbuf chain into a uio limited by len if set.
1564 m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
1566 int error, length, total;
1570 total = min(uio->uio_resid, len);
1572 total = uio->uio_resid;
1574 /* Fill the uio with data from the mbufs. */
1575 for (; m != NULL; m = m->m_next) {
1576 length = min(m->m_len, total - progress);
1578 error = uiomove(mtod(m, void *), length, uio);
1589 * Create a writable copy of the mbuf chain. While doing this
1590 * we compact the chain with a goal of producing a chain with
1591 * at most two mbufs. The second mbuf in this chain is likely
1592 * to be a cluster. The primary purpose of this work is to create
1593 * a writable packet for encryption, compression, etc. The
1594 * secondary goal is to linearize the data so the data can be
1595 * passed to crypto hardware in the most efficient manner possible.
1598 m_unshare(struct mbuf *m0, int how)
1600 struct mbuf *m, *mprev;
1601 struct mbuf *n, *mfirst, *mlast;
1605 for (m = m0; m != NULL; m = mprev->m_next) {
1607 * Regular mbufs are ignored unless there's a cluster
1608 * in front of it that we can use to coalesce. We do
1609 * the latter mainly so later clusters can be coalesced
1610 * also w/o having to handle them specially (i.e. convert
1611 * mbuf+cluster -> cluster). This optimization is heavily
1612 * influenced by the assumption that we're running over
1613 * Ethernet where MCLBYTES is large enough that the max
1614 * packet size will permit lots of coalescing into a
1615 * single cluster. This in turn permits efficient
1616 * crypto operations, especially when using hardware.
1618 if ((m->m_flags & M_EXT) == 0) {
1619 if (mprev && (mprev->m_flags & M_EXT) &&
1620 m->m_len <= M_TRAILINGSPACE(mprev)) {
1621 /* XXX: this ignores mbuf types */
1622 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1623 mtod(m, caddr_t), m->m_len);
1624 mprev->m_len += m->m_len;
1625 mprev->m_next = m->m_next; /* unlink from chain */
1626 m_free(m); /* reclaim mbuf */
1628 newipsecstat.ips_mbcoalesced++;
1636 * Writable mbufs are left alone (for now).
1638 if (M_WRITABLE(m)) {
1644 * Not writable, replace with a copy or coalesce with
1645 * the previous mbuf if possible (since we have to copy
1646 * it anyway, we try to reduce the number of mbufs and
1647 * clusters so that future work is easier).
1649 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1650 /* NB: we only coalesce into a cluster or larger */
1651 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1652 m->m_len <= M_TRAILINGSPACE(mprev)) {
1653 /* XXX: this ignores mbuf types */
1654 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1655 mtod(m, caddr_t), m->m_len);
1656 mprev->m_len += m->m_len;
1657 mprev->m_next = m->m_next; /* unlink from chain */
1658 m_free(m); /* reclaim mbuf */
1660 newipsecstat.ips_clcoalesced++;
1666 * Allocate new space to hold the copy and copy the data.
1667 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1668 * splitting them into clusters. We could just malloc a
1669 * buffer and make it external but too many device drivers
1670 * don't know how to break up the non-contiguous memory when
1673 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1678 if (m->m_flags & M_PKTHDR) {
1679 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1681 m_move_pkthdr(n, m);
1688 int cc = min(len, MCLBYTES);
1689 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1695 newipsecstat.ips_clcopied++;
1703 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1710 n->m_next = m->m_next;
1712 m0 = mfirst; /* new head of chain */
1714 mprev->m_next = mfirst; /* replace old mbuf */
1715 m_free(m); /* release old mbuf */
1721 #ifdef MBUF_PROFILING
1723 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1724 struct mbufprofile {
1725 uintmax_t wasted[MP_BUCKETS];
1726 uintmax_t used[MP_BUCKETS];
1727 uintmax_t segments[MP_BUCKETS];
1730 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
1731 #define MP_NUMLINES 6
1732 #define MP_NUMSPERLINE 16
1733 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
1734 /* work out max space needed and add a bit of spare space too */
1735 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
1736 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
1738 char mbprofbuf[MP_BUFSIZE];
1741 m_profile(struct mbuf *m)
1750 if (m->m_flags & M_EXT) {
1751 wasted += MHLEN - sizeof(m->m_ext) +
1752 m->m_ext.ext_size - m->m_len;
1754 if (m->m_flags & M_PKTHDR)
1755 wasted += MHLEN - m->m_len;
1757 wasted += MLEN - m->m_len;
1761 /* be paranoid.. it helps */
1762 if (segments > MP_BUCKETS - 1)
1763 segments = MP_BUCKETS - 1;
1766 if (wasted > 100000)
1768 /* store in the appropriate bucket */
1769 /* don't bother locking. if it's slightly off, so what? */
1770 mbprof.segments[segments]++;
1771 mbprof.used[fls(used)]++;
1772 mbprof.wasted[fls(wasted)]++;
1776 mbprof_textify(void)
1782 p = &mbprof.wasted[0];
1784 offset = snprintf(c, MP_MAXLINE + 10,
1786 "%ju %ju %ju %ju %ju %ju %ju %ju "
1787 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1788 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1789 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1791 p = &mbprof.wasted[16];
1793 offset = snprintf(c, MP_MAXLINE,
1794 "%ju %ju %ju %ju %ju %ju %ju %ju "
1795 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1796 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1797 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1799 p = &mbprof.used[0];
1801 offset = snprintf(c, MP_MAXLINE + 10,
1803 "%ju %ju %ju %ju %ju %ju %ju %ju "
1804 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1805 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1806 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1808 p = &mbprof.used[16];
1810 offset = snprintf(c, MP_MAXLINE,
1811 "%ju %ju %ju %ju %ju %ju %ju %ju "
1812 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1813 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1814 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1816 p = &mbprof.segments[0];
1818 offset = snprintf(c, MP_MAXLINE + 10,
1820 "%ju %ju %ju %ju %ju %ju %ju %ju "
1821 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1822 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1823 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1825 p = &mbprof.segments[16];
1827 offset = snprintf(c, MP_MAXLINE,
1828 "%ju %ju %ju %ju %ju %ju %ju %ju "
1829 "%ju %ju %ju %ju %ju %ju %ju %jju",
1830 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1831 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1836 mbprof_handler(SYSCTL_HANDLER_ARGS)
1841 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
1846 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
1851 error = sysctl_handle_int(oidp, &clear, 0, req);
1852 if (error || !req->newptr)
1856 bzero(&mbprof, sizeof(mbprof));
1863 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
1864 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
1866 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
1867 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");