2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
37 * License terms: all terms for the DragonFly license above plus the following:
39 * 4. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
42 * This product includes software developed by Jeffrey M. Hsu
43 * for the DragonFly Project.
45 * This requirement may be waived with permission from Jeffrey Hsu.
46 * This requirement will sunset and may be removed on July 8 2005,
47 * after which the standard DragonFly license (as shown above) will
52 * Copyright (c) 1982, 1986, 1988, 1991, 1993
53 * The Regents of the University of California. All rights reserved.
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. All advertising materials mentioning features or use of this software
64 * must display the following acknowledgement:
65 * This product includes software developed by the University of
66 * California, Berkeley and its contributors.
67 * 4. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
84 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
85 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.21 2004/07/08 22:07:34 hsu Exp $
88 #include "opt_param.h"
89 #include "opt_mbuf_stress_test.h"
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/sysctl.h>
96 #include <sys/domain.h>
97 #include <sys/protosw.h>
99 #include <sys/thread.h>
100 #include <sys/globaldata.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_extern.h>
107 #include <machine/cpu.h>
110 static void mbinit (void *);
111 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
116 struct mbstat mbstat;
117 u_long mbtypes[MT_NTYPES];
118 struct mbuf *mmbfree;
119 union mcluster *mclfree;
128 #ifdef MBUF_STRESS_TEST
129 int m_defragrandomfailures;
134 u_int m_mballoc_wid = 0;
135 u_int m_clalloc_wid = 0;
137 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
138 &max_linkhdr, 0, "");
139 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
140 &max_protohdr, 0, "");
141 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
142 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
143 &max_datalen, 0, "");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
146 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
147 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
148 sizeof(mbtypes), "LU", "");
149 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
150 &nmbclusters, 0, "Maximum number of mbuf clusters available");
151 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
152 "Maximum number of mbufs available");
153 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
154 &m_defragpackets, 0, "");
155 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
156 &m_defragbytes, 0, "");
157 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
158 &m_defraguseless, 0, "");
159 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
160 &m_defragfailure, 0, "");
161 #ifdef MBUF_STRESS_TEST
162 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
163 &m_defragrandomfailures, 0, "");
166 static void m_reclaim (void);
169 #define NMBCLUSTERS (512 + maxusers * 16)
172 #define NMBUFS (nmbclusters * 4)
176 * Perform sanity checks of tunables declared above.
179 tunable_mbinit(void *dummy)
183 * This has to be done before VM init.
185 nmbclusters = NMBCLUSTERS;
186 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
188 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
190 if (nmbufs < nmbclusters * 2)
191 nmbufs = nmbclusters * 2;
195 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
197 /* "number of clusters of pages" */
208 mmbfree = NULL; mclfree = NULL;
209 mbstat.m_msize = MSIZE;
210 mbstat.m_mclbytes = MCLBYTES;
211 mbstat.m_minclsize = MINCLSIZE;
212 mbstat.m_mlen = MLEN;
213 mbstat.m_mhlen = MHLEN;
216 if (m_mballoc(NMB_INIT, MB_DONTWAIT) == 0)
218 #if MCLBYTES <= PAGE_SIZE
219 if (m_clalloc(NCL_INIT, MB_DONTWAIT) == 0)
222 /* It's OK to call contigmalloc in this context. */
223 if (m_clalloc(16, MB_WAIT) == 0)
233 * Allocate at least nmb mbufs and place on mbuf free list.
234 * Must be called at splimp.
238 m_mballoc(int nmb, int how)
245 * If we've hit the mbuf limit, stop allocating from mb_map,
246 * (or trying to) in order to avoid dipping into the section of
247 * mb_map which we've "reserved" for clusters.
249 if ((nmb + mbstat.m_mbufs) > nmbufs)
253 * Once we run out of map space, it will be impossible to get
254 * any more (nothing is ever freed back to the map)
255 * -- however you are not dead as m_reclaim might
256 * still be able to free a substantial amount of space.
258 * XXX Furthermore, we can also work with "recycled" mbufs (when
259 * we're calling with MB_WAIT the sleep procedure will be woken
260 * up when an mbuf is freed. See m_mballoc_wait()).
265 nbytes = round_page(nmb * MSIZE);
266 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
267 if (p == 0 && how == MB_WAIT) {
269 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
273 * Either the map is now full, or `how' is M_NOWAIT and there
279 nmb = nbytes / MSIZE;
280 for (i = 0; i < nmb; i++) {
281 ((struct mbuf *)p)->m_next = mmbfree;
282 mmbfree = (struct mbuf *)p;
285 mbstat.m_mbufs += nmb;
286 mbtypes[MT_FREE] += nmb;
291 * Once the mb_map has been exhausted and if the call to the allocation macros
292 * (or, in some cases, functions) is with MB_WAIT, then it is necessary to rely
293 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
294 * designated (mbuf_wait) time.
297 m_mballoc_wait(int caller, int type)
304 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
309 * Now that we (think) that we've got something, we will redo an
310 * MGET, but avoid getting into another instance of m_mballoc_wait()
311 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
312 * this way, purposely, in the [unlikely] case that an mbuf was
313 * freed but the sleep was not awakened in time.
318 MGET(p, MB_DONTWAIT, type);
321 MGETHDR(p, MB_DONTWAIT, type);
324 panic("m_mballoc_wait: invalid caller (%d)", caller);
328 if (p != NULL) { /* We waited and got something... */
330 /* Wake up another if we have more free. */
338 #if MCLBYTES > PAGE_SIZE
339 static int i_want_my_mcl;
347 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
349 for (; i_want_my_mcl; i_want_my_mcl--) {
350 if (m_clalloc(1, MB_WAIT) == 0)
351 printf("m_clalloc failed even in process context!\n");
356 static struct thread *mclallocthread;
357 static struct kproc_desc mclalloc_kp = {
362 SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
367 * Allocate some number of mbuf clusters
368 * and place on cluster free list.
369 * Must be called at splimp.
373 m_clalloc(int ncl, int how)
380 * If we've hit the mcluster number limit, stop allocating from
381 * mb_map, (or trying to) in order to avoid dipping into the section
382 * of mb_map which we've "reserved" for mbufs.
384 if ((ncl + mbstat.m_clusters) > nmbclusters)
388 * Once we run out of map space, it will be impossible
389 * to get any more (nothing is ever freed back to the
390 * map). From this point on, we solely rely on freed
396 #if MCLBYTES > PAGE_SIZE
397 if (how != MB_WAIT) {
398 i_want_my_mcl += ncl;
399 wakeup(&i_want_my_mcl);
403 p = contigmalloc_map(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
404 ~0ul, PAGE_SIZE, 0, mb_map);
408 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
409 how != MB_WAIT ? M_NOWAIT : M_WAITOK);
410 ncl = ncl * PAGE_SIZE / MCLBYTES;
413 * Either the map is now full, or `how' is M_NOWAIT and there
417 static int last_report ; /* when we did that (in ticks) */
420 if (ticks < last_report || (ticks - last_report) >= hz) {
422 printf("All mbuf clusters exhausted, please see tuning(7).\n");
427 for (i = 0; i < ncl; i++) {
428 ((union mcluster *)p)->mcl_next = mclfree;
429 mclfree = (union mcluster *)p;
433 mbstat.m_clusters += ncl;
438 * Once the mb_map submap has been exhausted and the allocation is called with
439 * MB_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
440 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
441 * due to sudden mcluster availability.
449 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
450 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
452 /* Sleep until something's available or until we expire. */
454 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
458 * Now that we (think) that we've got something, we will redo and
459 * MGET, but avoid getting into another instance of m_clalloc_wait()
461 p = m_mclalloc(MB_DONTWAIT);
464 if (p != NULL) { /* We waited and got something... */
466 /* Wake up another if we have more free. */
476 * When MGET fails, ask protocols to free space when short of memory,
477 * then re-attempt to allocate an mbuf.
480 m_retry(int i, int t)
486 * Must only do the reclaim if not in an interrupt context.
489 KASSERT(mycpu->gd_intr_nesting_level == 0,
490 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
496 (void)m_mballoc(1, i);
505 m->m_data = m->m_dat;
510 static int last_report ; /* when we did that (in ticks) */
514 if (ticks < last_report || (ticks - last_report) >= hz) {
516 printf("All mbufs exhausted, please see tuning(7).\n");
524 * As above; retry an MGETHDR.
527 m_retryhdr(int i, int t)
533 * Must only do the reclaim if not in an interrupt context.
536 KASSERT(mycpu->gd_intr_nesting_level == 0,
537 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
543 (void)m_mballoc(1, i);
552 m->m_data = m->m_pktdat;
553 m->m_flags = M_PKTHDR;
554 m->m_pkthdr.rcvif = NULL;
555 SLIST_INIT(&m->m_pkthdr.tags);
556 m->m_pkthdr.csum_flags = 0;
560 static int last_report ; /* when we did that (in ticks) */
564 if (ticks < last_report || (ticks - last_report) >= hz) {
566 printf("All mbufs exhausted, please see tuning(7).\n");
580 for (dp = domains; dp; dp = dp->dom_next) {
581 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
591 * Space allocation routines.
592 * These are also available as macros
593 * for critical paths.
596 m_get(int how, int type)
603 (void)m_mballoc(1, how);
612 m->m_data = m->m_dat;
617 m = m_retry(how, type);
618 if (m == NULL && how == MB_WAIT)
619 m = m_mballoc_wait(MGET_C, type);
625 m_gethdr(int how, int type)
632 (void)m_mballoc(1, how);
641 m->m_data = m->m_pktdat;
642 m->m_flags = M_PKTHDR;
643 m->m_pkthdr.rcvif = NULL;
644 SLIST_INIT(&m->m_pkthdr.tags);
645 m->m_pkthdr.csum_flags = 0;
649 m = m_retryhdr(how, type);
650 if (m == NULL && how == MB_WAIT)
651 m = m_mballoc_wait(MGETHDR_C, type);
657 m_getclr(int how, int type)
664 bzero(mtod(m, caddr_t), MLEN);
669 * m_getcl() returns an mbuf with an attached cluster.
670 * Because many network drivers use this kind of buffers a lot, it is
671 * convenient to keep a small pool of free buffers of this kind.
672 * Even a small size such as 10 gives about 10% improvement in the
673 * forwarding rate in a bridge or router.
674 * The size of this free list is controlled by the sysctl variable
675 * mcl_pool_max. The list is populated on m_freem(), and used in
676 * m_getcl() if elements are available.
678 static struct mbuf *mcl_pool;
679 static int mcl_pool_now;
680 static int mcl_pool_max = 10;
682 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
683 "Maximum number of mbufs+cluster in free list");
684 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
685 "Current number of mbufs+cluster in free list");
688 m_getcl(int how, short type, int flags)
693 if (flags & M_PKTHDR) {
694 if (type == MT_DATA && mcl_pool) {
696 mcl_pool = mp->m_nextpkt;
699 mp->m_nextpkt = NULL;
700 mp->m_data = mp->m_ext.ext_buf;
701 mp->m_flags = M_PKTHDR|M_EXT;
702 mp->m_pkthdr.rcvif = NULL;
703 mp->m_pkthdr.csum_flags = 0;
706 MGETHDR(mp, how, type);
711 if ( (mp->m_flags & M_EXT) == 0) {
722 * m_getm(m, len, how, type)
724 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
725 * best) and return a pointer to the top of the allocated chain. If m is
726 * non-null, then we assume that it is a single mbuf or an mbuf chain to
727 * which we want len bytes worth of mbufs and/or clusters attached, and so
728 * if we succeed in allocating it, we will just return a pointer to m.
730 * If we happen to fail at any point during the allocation, we will free
731 * up everything we have already allocated and return NULL.
735 m_getm(struct mbuf *m, int len, int how, int type)
737 struct mbuf *top, *tail, *mp, *mtail = NULL;
739 KASSERT(len >= 0, ("len is < 0 in m_getm"));
744 else if (len > MINCLSIZE) {
746 if ((mp->m_flags & M_EXT) == 0) {
752 len -= M_TRAILINGSPACE(mp);
755 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
767 if (len > MINCLSIZE) {
769 if ((mp->m_flags & M_EXT) == 0)
774 len -= M_TRAILINGSPACE(mp);
787 * m_mclalloc() - Allocates an mbuf cluster.
799 mp = (caddr_t)mclfree;
801 KKASSERT((struct mbuf *)mp >= mbutl &&
802 (struct mbuf *)mp < mbute);
803 mclrefcnt[mtocl(mp)]++;
805 mclfree = ((union mcluster *)mp)->mcl_next;
811 return(m_clalloc_wait());
816 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
819 m_mclget(struct mbuf *m, int how)
821 m->m_ext.ext_buf = m_mclalloc(how);
822 if (m->m_ext.ext_buf != NULL) {
823 m->m_data = m->m_ext.ext_buf;
825 m->m_ext.ext_free = NULL;
826 m->m_ext.ext_ref = NULL;
827 m->m_ext.ext_size = MCLBYTES;
832 _m_mclfree(caddr_t data)
834 union mcluster *mp = (union mcluster *)data;
836 KASSERT(mclrefcnt[mtocl(mp)] > 0, ("freeing free cluster"));
837 KKASSERT((struct mbuf *)mp >= mbutl &&
838 (struct mbuf *)mp < mbute);
839 if (--mclrefcnt[mtocl(mp)] == 0) {
840 mp->mcl_next = mclfree;
848 m_mclfree(caddr_t mp)
858 * Free a single mbuf and any associated external storage. The successor,
859 * if any, is returned.
861 * We do need to check non-first mbuf for m_aux, since some of existing
862 * code does not call M_PREPEND properly.
863 * (example: call to bpf_mtap from drivers)
866 m_free(struct mbuf *m)
872 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf"));
873 mbtypes[m->m_type]--;
874 if ((m->m_flags & M_PKTHDR) != 0)
875 m_tag_delete_chain(m, NULL);
876 if (m->m_flags & M_EXT) {
877 if (m->m_ext.ext_free != NULL) {
878 m->m_ext.ext_free(m->m_ext.ext_buf, m->m_ext.ext_size);
880 _m_mclfree(m->m_ext.ext_buf); /* inlined */
895 m_freem(struct mbuf *m)
900 * Try to keep a small pool of mbuf+cluster for quick use in
901 * device drivers. A good candidate is a M_PKTHDR buffer with
902 * only one cluster attached. Other mbufs, or those exceeding
903 * the pool size, are just m_free'd in the usual way.
904 * The following code makes sure that m_next, m_type,
905 * m_pkthdr.aux and m_ext.* are properly initialized.
906 * Other fields in the mbuf are initialized in m_getcl()
909 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
910 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
911 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
912 m_tag_delete_chain(m, NULL);
913 m->m_nextpkt = mcl_pool;
924 * Mbuffer utility routines.
928 * Lesser-used path for M_PREPEND:
929 * allocate new mbuf to prepend to chain,
933 m_prepend(struct mbuf *m, int len, int how)
937 MGET(mn, how, m->m_type);
938 if (mn == (struct mbuf *)NULL) {
940 return ((struct mbuf *)NULL);
942 if (m->m_flags & M_PKTHDR)
943 M_MOVE_PKTHDR(mn, m);
953 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
954 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
955 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
956 * Note that the copy is read-only, because clusters are not copied,
957 * only their reference counts are incremented.
959 #define MCFail (mbstat.m_mcfail)
962 m_copym(const struct mbuf *m, int off0, int len, int wait)
964 struct mbuf *n, **np;
969 KASSERT(off >= 0, ("m_copym, negative off %d", off));
970 KASSERT(len >= 0, ("m_copym, negative len %d", len));
971 if (off == 0 && m->m_flags & M_PKTHDR)
974 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
984 KASSERT(len == M_COPYALL,
985 ("m_copym, length > size of mbuf chain"));
988 MGET(n, wait, m->m_type);
993 if (!m_dup_pkthdr(n, m, wait))
995 if (len == M_COPYALL)
996 n->m_pkthdr.len -= off0;
998 n->m_pkthdr.len = len;
1001 n->m_len = min(len, m->m_len - off);
1002 if (m->m_flags & M_EXT) {
1003 n->m_data = m->m_data + off;
1004 if (m->m_ext.ext_ref == NULL) {
1006 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1010 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1014 n->m_ext = m->m_ext;
1015 n->m_flags |= M_EXT;
1017 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1018 (unsigned)n->m_len);
1019 if (len != M_COPYALL)
1035 * Copy an entire packet, including header (which must be present).
1036 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1037 * Note that the copy is read-only, because clusters are not copied,
1038 * only their reference counts are incremented.
1039 * Preserve alignment of the first mbuf so if the creator has left
1040 * some room at the beginning (e.g. for inserting protocol headers)
1041 * the copies also have the room available.
1044 m_copypacket(struct mbuf *m, int how)
1046 struct mbuf *top, *n, *o;
1048 MGET(n, how, m->m_type);
1053 if (!m_dup_pkthdr(n, m, how))
1055 n->m_len = m->m_len;
1056 if (m->m_flags & M_EXT) {
1057 n->m_data = m->m_data;
1058 if (m->m_ext.ext_ref == NULL)
1059 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1063 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1067 n->m_ext = m->m_ext;
1068 n->m_flags |= M_EXT;
1070 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1071 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1076 MGET(o, how, m->m_type);
1083 n->m_len = m->m_len;
1084 if (m->m_flags & M_EXT) {
1085 n->m_data = m->m_data;
1086 if (m->m_ext.ext_ref == NULL) {
1088 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1092 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1096 n->m_ext = m->m_ext;
1097 n->m_flags |= M_EXT;
1099 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1112 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1113 * continuing for "len" bytes, into the indicated buffer.
1116 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1120 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1121 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1123 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1130 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1131 count = min(m->m_len - off, len);
1132 bcopy(mtod(m, caddr_t) + off, cp, count);
1141 * Copy a packet header mbuf chain into a completely new chain, including
1142 * copying any mbuf clusters. Use this instead of m_copypacket() when
1143 * you need a writable copy of an mbuf chain.
1146 m_dup(struct mbuf *m, int how)
1148 struct mbuf **p, *top = NULL;
1149 int remain, moff, nsize;
1154 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1156 /* While there's more data, get a new mbuf, tack it on, and fill it */
1157 remain = m->m_pkthdr.len;
1160 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1163 /* Get the next new mbuf */
1164 MGET(n, how, m->m_type);
1167 if (top == NULL) { /* first one, must be PKTHDR */
1168 if (!m_dup_pkthdr(n, m, how))
1171 } else /* not the first one */
1173 if (remain >= MINCLSIZE) {
1175 if ((n->m_flags & M_EXT) == 0) {
1183 /* Link it into the new chain */
1187 /* Copy data from original mbuf(s) into new mbuf */
1188 while (n->m_len < nsize && m != NULL) {
1189 int chunk = min(nsize - n->m_len, m->m_len - moff);
1191 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1195 if (moff == m->m_len) {
1201 /* Check correct total mbuf length */
1202 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1203 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1214 * Concatenate mbuf chain n to m.
1215 * Both chains must be of the same type (e.g. MT_DATA).
1216 * Any m_pkthdr is not updated.
1219 m_cat(struct mbuf *m, struct mbuf *n)
1224 if (m->m_flags & M_EXT ||
1225 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1226 /* just join the two chains */
1230 /* splat the data from one into the other */
1231 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1233 m->m_len += n->m_len;
1239 m_adj(struct mbuf *mp, int req_len)
1245 if ((m = mp) == NULL)
1251 while (m != NULL && len > 0) {
1252 if (m->m_len <= len) {
1263 if (mp->m_flags & M_PKTHDR)
1264 m->m_pkthdr.len -= (req_len - len);
1267 * Trim from tail. Scan the mbuf chain,
1268 * calculating its length and finding the last mbuf.
1269 * If the adjustment only affects this mbuf, then just
1270 * adjust and return. Otherwise, rescan and truncate
1271 * after the remaining size.
1277 if (m->m_next == (struct mbuf *)0)
1281 if (m->m_len >= len) {
1283 if (mp->m_flags & M_PKTHDR)
1284 mp->m_pkthdr.len -= len;
1291 * Correct length for chain is "count".
1292 * Find the mbuf with last data, adjust its length,
1293 * and toss data from remaining mbufs on chain.
1296 if (m->m_flags & M_PKTHDR)
1297 m->m_pkthdr.len = count;
1298 for (; m; m = m->m_next) {
1299 if (m->m_len >= count) {
1306 (m = m->m_next) ->m_len = 0;
1311 * Rearange an mbuf chain so that len bytes are contiguous
1312 * and in the data area of an mbuf (so that mtod will work for a structure
1313 * of size len). Returns the resulting mbuf chain on success, frees it and
1314 * returns null on failure. If there is room, it will add up to
1315 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1316 * avoid being called next time.
1318 #define MPFail (mbstat.m_mpfail)
1321 m_pullup(struct mbuf *n, int len)
1328 * If first mbuf has no cluster, and has room for len bytes
1329 * without shifting current data, pullup into it,
1330 * otherwise allocate a new mbuf to prepend to the chain.
1332 if ((n->m_flags & M_EXT) == 0 &&
1333 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1334 if (n->m_len >= len)
1342 MGET(m, MB_DONTWAIT, n->m_type);
1346 if (n->m_flags & M_PKTHDR)
1347 M_MOVE_PKTHDR(m, n);
1349 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1351 count = min(min(max(len, max_protohdr), space), n->m_len);
1352 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1362 } while (len > 0 && n);
1376 * Partition an mbuf chain in two pieces, returning the tail --
1377 * all but the first len0 bytes. In case of failure, it returns NULL and
1378 * attempts to restore the chain to its original state.
1380 * Note that the resulting mbufs might be read-only, because the new
1381 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1382 * the "breaking point" happens to lie within a cluster mbuf. Use the
1383 * M_WRITABLE() macro to check for this case.
1386 m_split(struct mbuf *m0, int len0, int wait)
1389 unsigned len = len0, remain;
1391 for (m = m0; m && len > m->m_len; m = m->m_next)
1395 remain = m->m_len - len;
1396 if (m0->m_flags & M_PKTHDR) {
1397 MGETHDR(n, wait, m0->m_type);
1400 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1401 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1402 m0->m_pkthdr.len = len0;
1403 if (m->m_flags & M_EXT)
1405 if (remain > MHLEN) {
1406 /* m can't be the lead packet */
1408 n->m_next = m_split(m, len, wait);
1409 if (n->m_next == 0) {
1417 MH_ALIGN(n, remain);
1418 } else if (remain == 0) {
1423 MGET(n, wait, m->m_type);
1429 if (m->m_flags & M_EXT) {
1430 n->m_flags |= M_EXT;
1431 n->m_ext = m->m_ext;
1432 if (m->m_ext.ext_ref == NULL)
1433 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1437 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1441 n->m_data = m->m_data + len;
1443 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1447 n->m_next = m->m_next;
1452 * Routine to copy from device local memory into mbufs.
1455 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
1456 void (*copy) (char *from, caddr_t to, u_int len))
1459 struct mbuf *top = 0, **mp = ⊤
1460 int off = off0, len;
1467 cp += off + 2 * sizeof(u_short);
1468 totlen -= 2 * sizeof(u_short);
1470 MGETHDR(m, MB_DONTWAIT, MT_DATA);
1473 m->m_pkthdr.rcvif = ifp;
1474 m->m_pkthdr.len = totlen;
1477 while (totlen > 0) {
1479 MGET(m, MB_DONTWAIT, MT_DATA);
1486 len = min(totlen, epkt - cp);
1487 if (len >= MINCLSIZE) {
1488 MCLGET(m, MB_DONTWAIT);
1489 if (m->m_flags & M_EXT)
1490 m->m_len = len = min(len, MCLBYTES);
1495 * Place initial small packet/header at end of mbuf.
1497 if (len < m->m_len) {
1498 if (top == 0 && len + max_linkhdr <= m->m_len)
1499 m->m_data += max_linkhdr;
1505 copy(cp, mtod(m, caddr_t), (unsigned)len);
1507 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1519 * Copy data from a buffer back into the indicated mbuf chain,
1520 * starting "off" bytes from the beginning, extending the mbuf
1521 * chain if necessary.
1524 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1527 struct mbuf *m = m0, *n;
1532 while (off > (mlen = m->m_len)) {
1535 if (m->m_next == 0) {
1536 n = m_getclr(MB_DONTWAIT, m->m_type);
1539 n->m_len = min(MLEN, len + off);
1545 mlen = min (m->m_len - off, len);
1546 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1554 if (m->m_next == 0) {
1555 n = m_get(MB_DONTWAIT, m->m_type);
1558 n->m_len = min(MLEN, len);
1563 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1564 m->m_pkthdr.len = totlen;
1568 m_print(const struct mbuf *m)
1571 const struct mbuf *m2;
1573 len = m->m_pkthdr.len;
1576 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1584 * "Move" mbuf pkthdr from "from" to "to".
1585 * "from" must have M_PKTHDR set, and "to" must be empty.
1588 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1590 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1592 to->m_flags = from->m_flags & M_COPYFLAGS;
1593 to->m_data = to->m_pktdat;
1594 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1595 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1596 from->m_flags &= ~M_PKTHDR;
1600 * Duplicate "from"'s mbuf pkthdr in "to".
1601 * "from" must have M_PKTHDR set, and "to" must be empty.
1602 * In particular, this does a deep copy of the packet tags.
1605 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1607 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1608 if ((to->m_flags & M_EXT) == 0)
1609 to->m_data = to->m_pktdat;
1610 to->m_pkthdr = from->m_pkthdr;
1611 SLIST_INIT(&to->m_pkthdr.tags);
1612 return (m_tag_copy_chain(to, from, how));
1616 * Defragment a mbuf chain, returning the shortest possible
1617 * chain of mbufs and clusters. If allocation fails and
1618 * this cannot be completed, NULL will be returned, but
1619 * the passed in chain will be unchanged. Upon success,
1620 * the original chain will be freed, and the new chain
1623 * If a non-packet header is passed in, the original
1624 * mbuf (chain?) will be returned unharmed.
1627 m_defrag(struct mbuf *m0, int how)
1629 struct mbuf *m_new = NULL, *m_final = NULL;
1630 int progress = 0, length;
1632 if (!(m0->m_flags & M_PKTHDR))
1635 #ifdef MBUF_STRESS_TEST
1636 if (m_defragrandomfailures) {
1637 int temp = arc4random() & 0xff;
1643 if (m0->m_pkthdr.len > MHLEN)
1644 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1646 m_final = m_gethdr(how, MT_DATA);
1648 if (m_final == NULL)
1651 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1656 while (progress < m0->m_pkthdr.len) {
1657 length = m0->m_pkthdr.len - progress;
1658 if (length > MCLBYTES)
1661 if (m_new == NULL) {
1663 m_new = m_getcl(how, MT_DATA, 0);
1665 m_new = m_get(how, MT_DATA);
1670 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1672 m_new->m_len = length;
1673 if (m_new != m_final)
1674 m_cat(m_final, m_new);
1677 if (m0->m_next == NULL)
1682 m_defragbytes += m0->m_pkthdr.len;
1694 * Move data from uio into mbufs.
1695 * A length of zero means copy the whole uio.
1698 m_uiomove(struct uio *uio, int wait, int len0)
1700 struct mbuf *head; /* result mbuf chain */
1701 struct mbuf *m; /* current working mbuf */
1703 int resid, datalen, error;
1705 resid = (len0 == 0) ? uio->uio_resid : min(len0, uio->uio_resid);
1710 if (resid > MHLEN) {
1711 m = m_getcl(wait, MT_DATA, head == NULL ? M_PKTHDR : 0);
1714 if (m->m_flags & M_PKTHDR)
1715 m->m_pkthdr.len = 0;
1718 MGETHDR(m, wait, MT_DATA);
1721 m->m_pkthdr.len = 0;
1722 /* Leave room for protocol headers. */
1726 MGET(m, wait, MT_DATA);
1731 datalen = min(MCLBYTES, resid);
1732 error = uiomove(mtod(m, caddr_t), datalen, uio);
1740 head->m_pkthdr.len += datalen;
1742 } while (resid > 0);