2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
35 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.3 2003/06/22 17:39:42 dillon Exp $
38 #include "opt_param.h"
39 #include "opt_mbuf_stress_test.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/domain.h>
47 #include <sys/protosw.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
54 #include <machine/cpu.h>
57 static void mbinit __P((void *));
58 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
63 u_long mbtypes[MT_NTYPES];
65 union mcluster *mclfree;
74 #ifdef MBUF_STRESS_TEST
75 int m_defragrandomfailures;
80 u_int m_mballoc_wid = 0;
81 u_int m_clalloc_wid = 0;
83 SYSCTL_DECL(_kern_ipc);
84 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
86 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
87 &max_protohdr, 0, "");
88 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
89 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
91 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
93 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
94 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
95 sizeof(mbtypes), "LU", "");
96 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
97 &nmbclusters, 0, "Maximum number of mbuf clusters available");
98 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
99 "Maximum number of mbufs available");
100 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
101 &m_defragpackets, 0, "");
102 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
103 &m_defragbytes, 0, "");
104 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
105 &m_defraguseless, 0, "");
106 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
107 &m_defragfailure, 0, "");
108 #ifdef MBUF_STRESS_TEST
109 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
110 &m_defragrandomfailures, 0, "");
113 static void m_reclaim __P((void));
116 #define NMBCLUSTERS (512 + maxusers * 16)
119 #define NMBUFS (nmbclusters * 4)
123 * Perform sanity checks of tunables declared above.
126 tunable_mbinit(void *dummy)
130 * This has to be done before VM init.
132 nmbclusters = NMBCLUSTERS;
133 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
135 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
137 if (nmbufs < nmbclusters * 2)
138 nmbufs = nmbclusters * 2;
142 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
144 /* "number of clusters of pages" */
156 mmbfree = NULL; mclfree = NULL;
157 mbstat.m_msize = MSIZE;
158 mbstat.m_mclbytes = MCLBYTES;
159 mbstat.m_minclsize = MINCLSIZE;
160 mbstat.m_mlen = MLEN;
161 mbstat.m_mhlen = MHLEN;
164 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
166 #if MCLBYTES <= PAGE_SIZE
167 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
170 /* It's OK to call contigmalloc in this context. */
171 if (m_clalloc(16, M_WAIT) == 0)
181 * Allocate at least nmb mbufs and place on mbuf free list.
182 * Must be called at splimp.
195 * If we've hit the mbuf limit, stop allocating from mb_map,
196 * (or trying to) in order to avoid dipping into the section of
197 * mb_map which we've "reserved" for clusters.
199 if ((nmb + mbstat.m_mbufs) > nmbufs)
203 * Once we run out of map space, it will be impossible to get
204 * any more (nothing is ever freed back to the map)
205 * -- however you are not dead as m_reclaim might
206 * still be able to free a substantial amount of space.
208 * XXX Furthermore, we can also work with "recycled" mbufs (when
209 * we're calling with M_WAIT the sleep procedure will be woken
210 * up when an mbuf is freed. See m_mballoc_wait()).
215 nbytes = round_page(nmb * MSIZE);
216 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
217 if (p == 0 && how == M_WAIT) {
219 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
223 * Either the map is now full, or `how' is M_NOWAIT and there
229 nmb = nbytes / MSIZE;
230 for (i = 0; i < nmb; i++) {
231 ((struct mbuf *)p)->m_next = mmbfree;
232 mmbfree = (struct mbuf *)p;
235 mbstat.m_mbufs += nmb;
236 mbtypes[MT_FREE] += nmb;
241 * Once the mb_map has been exhausted and if the call to the allocation macros
242 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
243 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
244 * designated (mbuf_wait) time.
247 m_mballoc_wait(int caller, int type)
254 if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
259 * Now that we (think) that we've got something, we will redo an
260 * MGET, but avoid getting into another instance of m_mballoc_wait()
261 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
262 * this way, purposely, in the [unlikely] case that an mbuf was
263 * freed but the sleep was not awakened in time.
268 MGET(p, M_DONTWAIT, type);
271 MGETHDR(p, M_DONTWAIT, type);
274 panic("m_mballoc_wait: invalid caller (%d)", caller);
278 if (p != NULL) { /* We waited and got something... */
280 /* Wake up another if we have more free. */
288 #if MCLBYTES > PAGE_SIZE
289 static int i_want_my_mcl;
297 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
299 for (; i_want_my_mcl; i_want_my_mcl--) {
300 if (m_clalloc(1, M_WAIT) == 0)
301 printf("m_clalloc failed even in process context!\n");
306 static struct thread *mclallocthread;
307 static struct kproc_desc mclalloc_kp = {
312 SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
317 * Allocate some number of mbuf clusters
318 * and place on cluster free list.
319 * Must be called at splimp.
332 * If we've hit the mcluster number limit, stop allocating from
333 * mb_map, (or trying to) in order to avoid dipping into the section
334 * of mb_map which we've "reserved" for mbufs.
336 if ((ncl + mbstat.m_clusters) > nmbclusters)
340 * Once we run out of map space, it will be impossible
341 * to get any more (nothing is ever freed back to the
342 * map). From this point on, we solely rely on freed
348 #if MCLBYTES > PAGE_SIZE
350 i_want_my_mcl += ncl;
351 wakeup(&i_want_my_mcl);
355 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
356 ~0ul, PAGE_SIZE, 0, mb_map);
360 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
361 how != M_WAIT ? M_NOWAIT : M_WAITOK);
362 ncl = ncl * PAGE_SIZE / MCLBYTES;
365 * Either the map is now full, or `how' is M_NOWAIT and there
369 static int last_report ; /* when we did that (in ticks) */
372 if (ticks < last_report || (ticks - last_report) >= hz) {
374 printf("All mbuf clusters exhausted, please see tuning(7).\n");
379 for (i = 0; i < ncl; i++) {
380 ((union mcluster *)p)->mcl_next = mclfree;
381 mclfree = (union mcluster *)p;
385 mbstat.m_clusters += ncl;
390 * Once the mb_map submap has been exhausted and the allocation is called with
391 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
392 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
393 * due to sudden mcluster availability.
402 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
403 KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
406 /* Sleep until something's available or until we expire. */
408 if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
412 * Now that we (think) that we've got something, we will redo and
413 * MGET, but avoid getting into another instance of m_clalloc_wait()
416 MCLALLOC(p, M_DONTWAIT);
419 if (p != NULL) { /* We waited and got something... */
421 /* Wake up another if we have more free. */
431 * When MGET fails, ask protocols to free space when short of memory,
432 * then re-attempt to allocate an mbuf.
438 register struct mbuf *m;
441 * Must only do the reclaim if not in an interrupt context.
445 KASSERT(intr_nesting_level == 0,
446 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
452 * Both m_mballoc_wait and m_retry must be nulled because
453 * when the MGET macro is run from here, we deffinately do _not_
454 * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
456 #define m_mballoc_wait(caller,type) (struct mbuf *)0
457 #define m_retry(i, t) (struct mbuf *)0
460 #undef m_mballoc_wait
465 static int last_report ; /* when we did that (in ticks) */
467 if (ticks < last_report || (ticks - last_report) >= hz) {
469 printf("All mbufs exhausted, please see tuning(7).\n");
477 * As above; retry an MGETHDR.
483 register struct mbuf *m;
486 * Must only do the reclaim if not in an interrupt context.
490 KASSERT(intr_nesting_level == 0,
491 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
496 #define m_mballoc_wait(caller,type) (struct mbuf *)0
497 #define m_retryhdr(i, t) (struct mbuf *)0
500 #undef m_mballoc_wait
505 static int last_report ; /* when we did that (in ticks) */
507 if (ticks < last_report || (ticks - last_report) >= hz) {
509 printf("All mbufs exhausted, please see tuning(7).\n");
519 register struct domain *dp;
520 register struct protosw *pr;
523 for (dp = domains; dp; dp = dp->dom_next)
524 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
532 * Space allocation routines.
533 * These are also available as macros
534 * for critical paths.
540 register struct mbuf *m;
550 register struct mbuf *m;
552 MGETHDR(m, how, type);
560 register struct mbuf *m;
565 bzero(mtod(m, caddr_t), MLEN);
570 * m_getcl() returns an mbuf with an attached cluster.
571 * Because many network drivers use this kind of buffers a lot, it is
572 * convenient to keep a small pool of free buffers of this kind.
573 * Even a small size such as 10 gives about 10% improvement in the
574 * forwarding rate in a bridge or router.
575 * The size of this free list is controlled by the sysctl variable
576 * mcl_pool_max. The list is populated on m_freem(), and used in
577 * m_getcl() if elements are available.
579 static struct mbuf *mcl_pool;
580 static int mcl_pool_now;
581 static int mcl_pool_max = 0;
583 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
584 "Maximum number of mbufs+cluster in free list");
585 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
586 "Current number of mbufs+cluster in free list");
589 m_getcl(int how, short type, int flags)
594 if (flags & M_PKTHDR) {
595 if (type == MT_DATA && mcl_pool) {
597 mcl_pool = mp->m_nextpkt;
600 mp->m_nextpkt = NULL;
601 mp->m_data = mp->m_ext.ext_buf;
602 mp->m_flags = M_PKTHDR|M_EXT;
603 mp->m_pkthdr.rcvif = NULL;
604 mp->m_pkthdr.csum_flags = 0;
607 MGETHDR(mp, how, type);
612 if ( (mp->m_flags & M_EXT) == 0) {
623 * m_getm(m, len, how, type)
625 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
626 * best) and return a pointer to the top of the allocated chain. If m is
627 * non-null, then we assume that it is a single mbuf or an mbuf chain to
628 * which we want len bytes worth of mbufs and/or clusters attached, and so
629 * if we succeed in allocating it, we will just return a pointer to m.
631 * If we happen to fail at any point during the allocation, we will free
632 * up everything we have already allocated and return NULL.
636 m_getm(struct mbuf *m, int len, int how, int type)
638 struct mbuf *top, *tail, *mp, *mtail = NULL;
640 KASSERT(len >= 0, ("len is < 0 in m_getm"));
645 else if (len > MINCLSIZE) {
647 if ((mp->m_flags & M_EXT) == 0) {
653 len -= M_TRAILINGSPACE(mp);
656 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
668 if (len > MINCLSIZE) {
670 if ((mp->m_flags & M_EXT) == 0)
675 len -= M_TRAILINGSPACE(mp);
688 * MFREE(struct mbuf *m, struct mbuf *n)
689 * Free a single mbuf and associated external storage.
690 * Place the successor, if any, in n.
692 * we do need to check non-first mbuf for m_aux, since some of existing
693 * code does not call M_PREPEND properly.
694 * (example: call to bpf_mtap from drivers)
696 #define MFREE(m, n) MBUFLOCK( \
697 struct mbuf *_mm = (m); \
699 KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
700 mbtypes[_mm->m_type]--; \
701 if ((_mm->m_flags & M_PKTHDR) != 0) \
702 m_tag_delete_chain(_mm, NULL); \
703 if (_mm->m_flags & M_EXT) \
706 _mm->m_type = MT_FREE; \
707 mbtypes[MT_FREE]++; \
708 _mm->m_next = mmbfree; \
717 register struct mbuf *n;
730 * Try to keep a small pool of mbuf+cluster for quick use in
731 * device drivers. A good candidate is a M_PKTHDR buffer with
732 * only one cluster attached. Other mbufs, or those exceeding
733 * the pool size, are just m_free'd in the usual way.
734 * The following code makes sure that m_next, m_type,
735 * m_pkthdr.aux and m_ext.* are properly initialized.
736 * Other fields in the mbuf are initialized in m_getcl()
739 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
740 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
741 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
742 m_tag_delete_chain(m, NULL);
743 m->m_nextpkt = mcl_pool;
754 * Mbuffer utility routines.
758 * Lesser-used path for M_PREPEND:
759 * allocate new mbuf to prepend to chain,
763 m_prepend(m, len, how)
764 register struct mbuf *m;
769 MGET(mn, how, m->m_type);
770 if (mn == (struct mbuf *)NULL) {
772 return ((struct mbuf *)NULL);
774 if (m->m_flags & M_PKTHDR)
775 M_MOVE_PKTHDR(mn, m);
785 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
786 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
787 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
788 * Note that the copy is read-only, because clusters are not copied,
789 * only their reference counts are incremented.
791 #define MCFail (mbstat.m_mcfail)
794 m_copym(m, off0, len, wait)
795 register struct mbuf *m;
799 register struct mbuf *n, **np;
800 register int off = off0;
804 KASSERT(off >= 0, ("m_copym, negative off %d", off));
805 KASSERT(len >= 0, ("m_copym, negative len %d", len));
806 if (off == 0 && m->m_flags & M_PKTHDR)
809 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
819 KASSERT(len == M_COPYALL,
820 ("m_copym, length > size of mbuf chain"));
823 MGET(n, wait, m->m_type);
828 if (!m_dup_pkthdr(n, m, wait))
830 if (len == M_COPYALL)
831 n->m_pkthdr.len -= off0;
833 n->m_pkthdr.len = len;
836 n->m_len = min(len, m->m_len - off);
837 if (m->m_flags & M_EXT) {
838 n->m_data = m->m_data + off;
839 if (m->m_ext.ext_ref == NULL) {
841 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
845 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
852 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
854 if (len != M_COPYALL)
870 * Copy an entire packet, including header (which must be present).
871 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
872 * Note that the copy is read-only, because clusters are not copied,
873 * only their reference counts are incremented.
874 * Preserve alignment of the first mbuf so if the creator has left
875 * some room at the beginning (e.g. for inserting protocol headers)
876 * the copies also have the room available.
883 struct mbuf *top, *n, *o;
885 MGET(n, how, m->m_type);
890 if (!m_dup_pkthdr(n, m, how))
893 if (m->m_flags & M_EXT) {
894 n->m_data = m->m_data;
895 if (m->m_ext.ext_ref == NULL)
896 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
900 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
907 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
908 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
913 MGET(o, how, m->m_type);
921 if (m->m_flags & M_EXT) {
922 n->m_data = m->m_data;
923 if (m->m_ext.ext_ref == NULL) {
925 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
929 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
936 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
949 * Copy data from an mbuf chain starting "off" bytes from the beginning,
950 * continuing for "len" bytes, into the indicated buffer.
953 m_copydata(m, off, len, cp)
954 register struct mbuf *m;
959 register unsigned count;
961 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
962 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
964 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
971 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
972 count = min(m->m_len - off, len);
973 bcopy(mtod(m, caddr_t) + off, cp, count);
982 * Copy a packet header mbuf chain into a completely new chain, including
983 * copying any mbuf clusters. Use this instead of m_copypacket() when
984 * you need a writable copy of an mbuf chain.
991 struct mbuf **p, *top = NULL;
992 int remain, moff, nsize;
997 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
999 /* While there's more data, get a new mbuf, tack it on, and fill it */
1000 remain = m->m_pkthdr.len;
1003 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1006 /* Get the next new mbuf */
1007 MGET(n, how, m->m_type);
1010 if (top == NULL) { /* first one, must be PKTHDR */
1011 if (!m_dup_pkthdr(n, m, how))
1014 } else /* not the first one */
1016 if (remain >= MINCLSIZE) {
1018 if ((n->m_flags & M_EXT) == 0) {
1026 /* Link it into the new chain */
1030 /* Copy data from original mbuf(s) into new mbuf */
1031 while (n->m_len < nsize && m != NULL) {
1032 int chunk = min(nsize - n->m_len, m->m_len - moff);
1034 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1038 if (moff == m->m_len) {
1044 /* Check correct total mbuf length */
1045 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1046 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1057 * Concatenate mbuf chain n to m.
1058 * Both chains must be of the same type (e.g. MT_DATA).
1059 * Any m_pkthdr is not updated.
1063 register struct mbuf *m, *n;
1068 if (m->m_flags & M_EXT ||
1069 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1070 /* just join the two chains */
1074 /* splat the data from one into the other */
1075 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1077 m->m_len += n->m_len;
1087 register int len = req_len;
1088 register struct mbuf *m;
1091 if ((m = mp) == NULL)
1097 while (m != NULL && len > 0) {
1098 if (m->m_len <= len) {
1109 if (mp->m_flags & M_PKTHDR)
1110 m->m_pkthdr.len -= (req_len - len);
1113 * Trim from tail. Scan the mbuf chain,
1114 * calculating its length and finding the last mbuf.
1115 * If the adjustment only affects this mbuf, then just
1116 * adjust and return. Otherwise, rescan and truncate
1117 * after the remaining size.
1123 if (m->m_next == (struct mbuf *)0)
1127 if (m->m_len >= len) {
1129 if (mp->m_flags & M_PKTHDR)
1130 mp->m_pkthdr.len -= len;
1137 * Correct length for chain is "count".
1138 * Find the mbuf with last data, adjust its length,
1139 * and toss data from remaining mbufs on chain.
1142 if (m->m_flags & M_PKTHDR)
1143 m->m_pkthdr.len = count;
1144 for (; m; m = m->m_next) {
1145 if (m->m_len >= count) {
1152 (m = m->m_next) ->m_len = 0;
1157 * Rearange an mbuf chain so that len bytes are contiguous
1158 * and in the data area of an mbuf (so that mtod and dtom
1159 * will work for a structure of size len). Returns the resulting
1160 * mbuf chain on success, frees it and returns null on failure.
1161 * If there is room, it will add up to max_protohdr-len extra bytes to the
1162 * contiguous region in an attempt to avoid being called next time.
1164 #define MPFail (mbstat.m_mpfail)
1168 register struct mbuf *n;
1171 register struct mbuf *m;
1176 * If first mbuf has no cluster, and has room for len bytes
1177 * without shifting current data, pullup into it,
1178 * otherwise allocate a new mbuf to prepend to the chain.
1180 if ((n->m_flags & M_EXT) == 0 &&
1181 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1182 if (n->m_len >= len)
1190 MGET(m, M_DONTWAIT, n->m_type);
1194 if (n->m_flags & M_PKTHDR)
1195 M_MOVE_PKTHDR(m, n);
1197 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1199 count = min(min(max(len, max_protohdr), space), n->m_len);
1200 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1210 } while (len > 0 && n);
1224 * Partition an mbuf chain in two pieces, returning the tail --
1225 * all but the first len0 bytes. In case of failure, it returns NULL and
1226 * attempts to restore the chain to its original state.
1228 * Note that the resulting mbufs might be read-only, because the new
1229 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1230 * the "breaking point" happens to lie within a cluster mbuf. Use the
1231 * M_WRITABLE() macro to check for this case.
1234 m_split(m0, len0, wait)
1235 register struct mbuf *m0;
1238 register struct mbuf *m, *n;
1239 unsigned len = len0, remain;
1241 for (m = m0; m && len > m->m_len; m = m->m_next)
1245 remain = m->m_len - len;
1246 if (m0->m_flags & M_PKTHDR) {
1247 MGETHDR(n, wait, m0->m_type);
1250 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1251 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1252 m0->m_pkthdr.len = len0;
1253 if (m->m_flags & M_EXT)
1255 if (remain > MHLEN) {
1256 /* m can't be the lead packet */
1258 n->m_next = m_split(m, len, wait);
1259 if (n->m_next == 0) {
1267 MH_ALIGN(n, remain);
1268 } else if (remain == 0) {
1273 MGET(n, wait, m->m_type);
1279 if (m->m_flags & M_EXT) {
1280 n->m_flags |= M_EXT;
1281 n->m_ext = m->m_ext;
1282 if (m->m_ext.ext_ref == NULL)
1283 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1287 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1291 n->m_data = m->m_data + len;
1293 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1297 n->m_next = m->m_next;
1302 * Routine to copy from device local memory into mbufs.
1305 m_devget(buf, totlen, off0, ifp, copy)
1309 void (*copy) __P((char *from, caddr_t to, u_int len));
1311 register struct mbuf *m;
1312 struct mbuf *top = 0, **mp = ⊤
1313 register int off = off0, len;
1320 cp += off + 2 * sizeof(u_short);
1321 totlen -= 2 * sizeof(u_short);
1323 MGETHDR(m, M_DONTWAIT, MT_DATA);
1326 m->m_pkthdr.rcvif = ifp;
1327 m->m_pkthdr.len = totlen;
1330 while (totlen > 0) {
1332 MGET(m, M_DONTWAIT, MT_DATA);
1339 len = min(totlen, epkt - cp);
1340 if (len >= MINCLSIZE) {
1341 MCLGET(m, M_DONTWAIT);
1342 if (m->m_flags & M_EXT)
1343 m->m_len = len = min(len, MCLBYTES);
1348 * Place initial small packet/header at end of mbuf.
1350 if (len < m->m_len) {
1351 if (top == 0 && len + max_linkhdr <= m->m_len)
1352 m->m_data += max_linkhdr;
1358 copy(cp, mtod(m, caddr_t), (unsigned)len);
1360 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1372 * Copy data from a buffer back into the indicated mbuf chain,
1373 * starting "off" bytes from the beginning, extending the mbuf
1374 * chain if necessary.
1377 m_copyback(m0, off, len, cp)
1384 register struct mbuf *m = m0, *n;
1389 while (off > (mlen = m->m_len)) {
1392 if (m->m_next == 0) {
1393 n = m_getclr(M_DONTWAIT, m->m_type);
1396 n->m_len = min(MLEN, len + off);
1402 mlen = min (m->m_len - off, len);
1403 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1411 if (m->m_next == 0) {
1412 n = m_get(M_DONTWAIT, m->m_type);
1415 n->m_len = min(MLEN, len);
1420 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1421 m->m_pkthdr.len = totlen;
1425 m_print(const struct mbuf *m)
1428 const struct mbuf *m2;
1430 len = m->m_pkthdr.len;
1433 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1441 * "Move" mbuf pkthdr from "from" to "to".
1442 * "from" must have M_PKTHDR set, and "to" must be empty.
1445 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1447 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1449 to->m_flags = from->m_flags & M_COPYFLAGS;
1450 to->m_data = to->m_pktdat;
1451 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1452 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1453 from->m_flags &= ~M_PKTHDR;
1457 * Duplicate "from"'s mbuf pkthdr in "to".
1458 * "from" must have M_PKTHDR set, and "to" must be empty.
1459 * In particular, this does a deep copy of the packet tags.
1462 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1464 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1465 if ((to->m_flags & M_EXT) == 0)
1466 to->m_data = to->m_pktdat;
1467 to->m_pkthdr = from->m_pkthdr;
1468 SLIST_INIT(&to->m_pkthdr.tags);
1469 return (m_tag_copy_chain(to, from, how));
1473 * Defragment a mbuf chain, returning the shortest possible
1474 * chain of mbufs and clusters. If allocation fails and
1475 * this cannot be completed, NULL will be returned, but
1476 * the passed in chain will be unchanged. Upon success,
1477 * the original chain will be freed, and the new chain
1480 * If a non-packet header is passed in, the original
1481 * mbuf (chain?) will be returned unharmed.
1484 m_defrag(struct mbuf *m0, int how)
1486 struct mbuf *m_new = NULL, *m_final = NULL;
1487 int progress = 0, length;
1489 if (!(m0->m_flags & M_PKTHDR))
1492 #ifdef MBUF_STRESS_TEST
1493 if (m_defragrandomfailures) {
1494 int temp = arc4random() & 0xff;
1500 if (m0->m_pkthdr.len > MHLEN)
1501 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1503 m_final = m_gethdr(how, MT_DATA);
1505 if (m_final == NULL)
1508 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1513 while (progress < m0->m_pkthdr.len) {
1514 length = m0->m_pkthdr.len - progress;
1515 if (length > MCLBYTES)
1518 if (m_new == NULL) {
1520 m_new = m_getcl(how, MT_DATA, 0);
1522 m_new = m_get(how, MT_DATA);
1527 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1529 m_new->m_len = length;
1530 if (m_new != m_final)
1531 m_cat(m_final, m_new);
1534 if (m0->m_next == NULL)
1539 m_defragbytes += m0->m_pkthdr.len;