2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
37 * License terms: all terms for the DragonFly license above plus the following:
39 * 4. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
42 * This product includes software developed by Jeffrey M. Hsu
43 * for the DragonFly Project.
45 * This requirement may be waived with permission from Jeffrey Hsu.
46 * This requirement will sunset and may be removed on July 8 2005,
47 * after which the standard DragonFly license (as shown above) will
52 * Copyright (c) 1982, 1986, 1988, 1991, 1993
53 * The Regents of the University of California. All rights reserved.
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. All advertising materials mentioning features or use of this software
64 * must display the following acknowledgement:
65 * This product includes software developed by the University of
66 * California, Berkeley and its contributors.
67 * 4. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
84 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
85 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.30 2005/02/04 01:14:27 dillon Exp $
88 #include "opt_param.h"
89 #include "opt_mbuf_stress_test.h"
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/sysctl.h>
96 #include <sys/domain.h>
97 #include <sys/protosw.h>
99 #include <sys/thread.h>
100 #include <sys/globaldata.h>
101 #include <sys/thread2.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_extern.h>
108 #include <machine/cpu.h>
112 * mbuf cluster meta-data
114 typedef struct mbcluster {
115 struct mbcluster *mcl_next;
121 typedef struct mbuf *mbuf_t;
123 #define MCL_MAGIC 0x6d62636c
125 static void mbinit (void *);
126 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
128 static u_long mbtypes[MT_NTYPES];
130 struct mbstat mbstat;
139 #ifdef MBUF_STRESS_TEST
140 int m_defragrandomfailures;
145 u_int m_mballoc_wid = 0;
146 u_int m_clalloc_wid = 0;
148 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
149 &max_linkhdr, 0, "");
150 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
151 &max_protohdr, 0, "");
152 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
153 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
154 &max_datalen, 0, "");
155 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
157 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
158 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
159 sizeof(mbtypes), "LU", "");
160 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RW,
161 &nmbclusters, 0, "Maximum number of mbuf clusters available");
162 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RW, &nmbufs, 0,
163 "Maximum number of mbufs available");
164 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
165 &m_defragpackets, 0, "");
166 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
167 &m_defragbytes, 0, "");
168 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
169 &m_defraguseless, 0, "");
170 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
171 &m_defragfailure, 0, "");
172 #ifdef MBUF_STRESS_TEST
173 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
174 &m_defragrandomfailures, 0, "");
177 static int mcl_pool_count;
178 static int mcl_pool_max = 20;
179 static int mcl_free_max = 1000;
180 static int mbuf_free_max = 5000;
182 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
183 "Maximum number of mbufs+cluster in free list");
184 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_count, CTLFLAG_RD, &mcl_pool_count, 0,
185 "Current number of mbufs+cluster in free list");
186 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_free_max, CTLFLAG_RW, &mcl_free_max, 0,
187 "Maximum number of clusters on the free list");
188 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_free_max, CTLFLAG_RW, &mbuf_free_max, 0,
189 "Maximum number of mbufs on the free list");
191 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
192 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
194 static mbuf_t mmbfree;
195 static mbcluster_t mclfree;
196 static struct mbuf *mcl_pool;
198 static void m_reclaim (void);
199 static int m_mballoc(int nmb, int how);
200 static int m_clalloc(int ncl, int how);
201 static struct mbuf *m_mballoc_wait(int caller, int type);
202 static void m_mclref(void *arg);
203 static void m_mclfree(void *arg);
206 #define NMBCLUSTERS (512 + maxusers * 16)
209 #define NMBUFS (nmbclusters * 4)
213 * Perform sanity checks of tunables declared above.
216 tunable_mbinit(void *dummy)
220 * This has to be done before VM init.
222 nmbclusters = NMBCLUSTERS;
223 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
225 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
227 if (nmbufs < nmbclusters * 2)
228 nmbufs = nmbclusters * 2;
232 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
234 /* "number of clusters of pages" */
245 mbstat.m_msize = MSIZE;
246 mbstat.m_mclbytes = MCLBYTES;
247 mbstat.m_minclsize = MINCLSIZE;
248 mbstat.m_mlen = MLEN;
249 mbstat.m_mhlen = MHLEN;
252 if (m_mballoc(NMB_INIT, MB_DONTWAIT) == 0)
254 #if MCLBYTES <= PAGE_SIZE
255 if (m_clalloc(NCL_INIT, MB_DONTWAIT) == 0)
258 /* It's OK to call contigmalloc in this context. */
259 if (m_clalloc(16, MB_WAIT) == 0)
270 * Allocate at least nmb mbufs and place on mbuf free list.
271 * Returns the number of mbufs successfully allocated, 0 if none.
273 * Must be called while in a critical section.
276 m_mballoc(int nmb, int how)
282 * If we've hit the mbuf limit, stop allocating (or trying to)
283 * in order to avoid exhausting kernel memory entirely.
285 if ((nmb + mbstat.m_mbufs) > nmbufs)
289 * Attempt to allocate the requested number of mbufs, terminate when
290 * the allocation fails but if blocking is allowed allocate at least
293 for (i = 0; i < nmb; ++i) {
294 m = malloc(MSIZE, M_MBUF, M_NOWAIT|M_NULLOK|M_ZERO);
296 if (how == MB_WAIT) {
298 m = malloc(MSIZE, M_MBUF,
299 M_WAITOK|M_NULLOK|M_ZERO);
314 * Once mbuf memory has been exhausted and if the call to the allocation macros
315 * (or, in some cases, functions) is with MB_WAIT, then it is necessary to rely
316 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
317 * designated (mbuf_wait) time.
320 m_mballoc_wait(int caller, int type)
326 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
331 * Now that we (think) that we've got something, we will redo an
332 * MGET, but avoid getting into another instance of m_mballoc_wait()
333 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
334 * this way, purposely, in the [unlikely] case that an mbuf was
335 * freed but the sleep was not awakened in time.
340 MGET(m, MB_DONTWAIT, type);
343 MGETHDR(m, MB_DONTWAIT, type);
346 panic("m_mballoc_wait: invalid caller (%d)", caller);
350 if (m != NULL) { /* We waited and got something... */
352 /* Wake up another if we have more free. */
360 #if MCLBYTES > PAGE_SIZE
361 static int i_want_my_mcl;
370 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
372 while (i_want_my_mcl > 0) {
373 if (m_clalloc(1, MB_WAIT) == 0)
374 printf("m_clalloc failed even in thread context!\n");
382 static struct thread *mclallocthread;
383 static struct kproc_desc mclalloc_kp = {
388 SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
393 * Allocate at least nmb mbuf clusters and place on mbuf free list.
394 * Returns the number of mbuf clusters successfully allocated, 0 if none.
396 * Must be called while in a critical section.
399 m_clalloc(int ncl, int how)
401 static int last_report;
407 * If we've hit the mbuf cluster limit, stop allocating (or trying to).
409 if ((ncl + mbstat.m_clusters) > nmbclusters)
413 * Attempt to allocate the requested number of mbuf clusters,
414 * terminate when the allocation fails but if blocking is allowed
415 * allocate at least one.
417 * We need to allocate two structures for each cluster... a
418 * ref counting / governing structure and the actual data. MCLBYTES
419 * should be a power of 2 which means that the slab allocator will
420 * return a buffer that does not cross a page boundary.
422 for (i = 0; i < ncl; ++i) {
426 mcl = malloc(sizeof(*mcl), M_MBUFCL, M_NOWAIT|M_NULLOK|M_ZERO);
427 if (mcl == NULL && how == MB_WAIT) {
429 mcl = malloc(sizeof(*mcl),
430 M_MBUFCL, M_WAITOK|M_NULLOK|M_ZERO);
436 * Physically contiguous data buffer.
438 #if MCLBYTES > PAGE_SIZE
439 if (how != MB_WAIT) {
440 i_want_my_mcl += ncl - i;
441 wakeup(&i_want_my_mcl);
445 data = contigmalloc_map(MCLBYTES, M_MBUFCL,
446 M_WAITOK, 0ul, ~0ul, PAGE_SIZE, 0, kernel_map);
449 data = malloc(MCLBYTES, M_MBUFCL, M_NOWAIT|M_NULLOK);
451 if (how == MB_WAIT) {
453 data = malloc(MCLBYTES, M_MBUFCL,
462 mcl->mcl_next = mclfree;
463 mcl->mcl_data = data;
464 mcl->mcl_magic = MCL_MAGIC;
473 * If we could not allocate any report failure no more often then
478 if (ticks < last_report || (ticks - last_report) >= hz) {
480 printf("All mbuf clusters exhausted, please see tuning(7).\n");
487 * Once cluster memory has been exhausted and the allocation is called with
488 * MB_WAIT, we rely on the mclfree pointers. If nothing is free, we will
489 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
490 * due to sudden mcluster availability.
492 * Must be called while in a critical section.
497 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
498 KASSERT(mycpu->gd_intr_nesting_level == 0,
499 ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
502 * Sleep until something's available or until we expire.
505 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
509 * Try the allocation once more, and if we see mor then two
510 * free entries wake up others as well.
512 m_clalloc(1, MB_WAIT);
513 if (mclfree && mclfree->mcl_next) {
519 * Return the number of references to this mbuf's data. 0 is returned
520 * if the mbuf is not M_EXT, a reference count is returned if it is
521 * M_EXT|M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
524 m_sharecount(struct mbuf *m)
528 switch(m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
535 case M_EXT|M_EXT_CLUSTER:
536 count = ((mbcluster_t)m->m_ext.ext_arg)->mcl_refs;
539 panic("bad mbuf flags: %p", m);
546 * change mbuf to new type
549 m_chtype(struct mbuf *m, int type)
552 --mbtypes[m->m_type];
559 * When MGET fails, ask protocols to free space when short of memory,
560 * then re-attempt to allocate an mbuf.
563 m_retry(int how, int t)
568 * Must only do the reclaim if not in an interrupt context.
570 if (how == MB_WAIT) {
571 KASSERT(mycpu->gd_intr_nesting_level == 0,
572 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
577 * Try to pull a new mbuf out of the cache, if the cache is empty
578 * try to allocate a new one and if that doesn't work we give up.
581 if ((m = mmbfree) == NULL) {
583 if ((m = mmbfree) == NULL) {
584 static int last_report;
588 if (ticks < last_report ||
589 (ticks - last_report) >= hz) {
591 printf("All mbufs exhausted, please see tuning(7).\n");
598 * Cache case, adjust globals before leaving the critical section
609 m->m_data = m->m_dat;
615 * As above; retry an MGETHDR.
618 m_retryhdr(int how, int t)
623 * Must only do the reclaim if not in an interrupt context.
625 if (how == MB_WAIT) {
626 KASSERT(mycpu->gd_intr_nesting_level == 0,
627 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
632 * Try to pull a new mbuf out of the cache, if the cache is empty
633 * try to allocate a new one and if that doesn't work we give up.
636 if ((m = mmbfree) == NULL) {
638 if ((m = mmbfree) == NULL) {
639 static int last_report;
643 if (ticks < last_report ||
644 (ticks - last_report) >= hz) {
646 printf("All mbufs exhausted, please see tuning(7).\n");
653 * Cache case, adjust globals before leaving the critical section
664 m->m_data = m->m_pktdat;
665 m->m_flags = M_PKTHDR;
666 m->m_pkthdr.rcvif = NULL;
667 SLIST_INIT(&m->m_pkthdr.tags);
668 m->m_pkthdr.csum_flags = 0;
679 for (dp = domains; dp; dp = dp->dom_next) {
680 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
690 * Allocate an mbuf. If no mbufs are immediately available try to
691 * bring a bunch more into our cache (mmbfree list). A critical
692 * section is required to protect the mmbfree list and counters
693 * against interrupts.
696 m_get(int how, int type)
701 * Try to pull a new mbuf out of the cache, if the cache is empty
702 * try to allocate a new one and if that doesn't work try even harder
703 * by calling m_retryhdr().
706 if ((m = mmbfree) == NULL) {
708 if ((m = mmbfree) == NULL) {
710 m = m_retry(how, type);
711 if (m == NULL && how == MB_WAIT)
712 m = m_mballoc_wait(MGET_C, type);
718 * Cache case, adjust globals before leaving the critical section
728 m->m_data = m->m_dat;
734 m_gethdr(int how, int type)
739 * Try to pull a new mbuf out of the cache, if the cache is empty
740 * try to allocate a new one and if that doesn't work try even harder
741 * by calling m_retryhdr().
744 if ((m = mmbfree) == NULL) {
746 if ((m = mmbfree) == NULL) {
748 m = m_retryhdr(how, type);
749 if (m == NULL && how == MB_WAIT)
750 m = m_mballoc_wait(MGETHDR_C, type);
756 * Cache case, adjust globals before leaving the critical section
766 m->m_data = m->m_pktdat;
767 m->m_flags = M_PKTHDR;
768 m->m_pkthdr.rcvif = NULL;
769 SLIST_INIT(&m->m_pkthdr.tags);
770 m->m_pkthdr.csum_flags = 0;
771 m->m_pkthdr.pf_flags = 0;
776 m_getclr(int how, int type)
780 if ((m = m_get(how, type)) != NULL) {
781 bzero(mtod(m, caddr_t), MLEN);
787 * m_getcl() returns an mbuf with an attached cluster.
788 * Because many network drivers use this kind of buffers a lot, it is
789 * convenient to keep a small pool of free buffers of this kind.
790 * Even a small size such as 10 gives about 10% improvement in the
791 * forwarding rate in a bridge or router.
792 * The size of this free list is controlled by the sysctl variable
793 * mcl_pool_max. The list is populated on m_freem(), and used in
794 * m_getcl() if elements are available.
797 m_getcl(int how, short type, int flags)
802 if (flags & M_PKTHDR) {
803 if (type == MT_DATA && mcl_pool) {
805 mcl_pool = mp->m_nextpkt;
808 mp->m_nextpkt = NULL;
809 mp->m_data = mp->m_ext.ext_buf;
810 mp->m_flags = M_PKTHDR|M_EXT|M_EXT_CLUSTER;
811 mp->m_pkthdr.rcvif = NULL;
812 mp->m_pkthdr.csum_flags = 0;
815 MGETHDR(mp, how, type);
821 if ((mp->m_flags & M_EXT) == 0) {
832 * m_getm(m, len, how, type)
834 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
835 * best) and return a pointer to the top of the allocated chain. If m is
836 * non-null, then we assume that it is a single mbuf or an mbuf chain to
837 * which we want len bytes worth of mbufs and/or clusters attached, and so
838 * if we succeed in allocating it, we will just return a pointer to m.
840 * If we happen to fail at any point during the allocation, we will free
841 * up everything we have already allocated and return NULL.
845 m_getm(struct mbuf *m, int len, int how, int type)
847 struct mbuf *top, *tail, *mp, *mtail = NULL;
849 KASSERT(len >= 0, ("len is < 0 in m_getm"));
851 mp = m_get(how, type);
854 } else if (len > MINCLSIZE) {
856 if ((mp->m_flags & M_EXT) == 0) {
862 len -= M_TRAILINGSPACE(mp);
865 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
873 mp = m_get(how, type);
879 if (len > MINCLSIZE) {
881 if ((mp->m_flags & M_EXT) == 0)
886 len -= M_TRAILINGSPACE(mp);
898 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
901 m_mclget(struct mbuf *m, int how)
905 KKASSERT((m->m_flags & M_EXT_OLD) == 0);
908 * Allocate a cluster, return if we can't get one.
911 if ((mcl = mclfree) == NULL) {
913 if ((mcl = mclfree) == NULL) {
914 if (how == MB_WAIT) {
926 * We have a cluster, unlink it from the free list and set the ref
929 KKASSERT(mcl->mcl_refs == 0);
930 mclfree = mcl->mcl_next;
936 * Add the cluster to the mbuf. The caller will detect that the
937 * mbuf now has an attached cluster.
939 m->m_ext.ext_arg = mcl;
940 m->m_ext.ext_buf = mcl->mcl_data;
941 m->m_ext.ext_nref.new = m_mclref;
942 m->m_ext.ext_nfree.new = m_mclfree;
943 m->m_ext.ext_size = MCLBYTES;
945 m->m_data = m->m_ext.ext_buf;
946 m->m_flags |= M_EXT | M_EXT_CLUSTER;
952 mbcluster_t mcl = arg;
954 KKASSERT(mcl->mcl_magic == MCL_MAGIC);
955 KKASSERT(mcl->mcl_refs > 0);
957 if (--mcl->mcl_refs == 0) {
958 if (mbstat.m_clfree < mcl_free_max) {
959 mcl->mcl_next = mclfree;
965 free(mcl->mcl_data, M_MBUFCL);
976 mbcluster_t mcl = arg;
978 KKASSERT(mcl->mcl_magic == MCL_MAGIC);
985 * Helper routines for M_EXT reference/free
988 m_extref(const struct mbuf *m)
990 KKASSERT(m->m_ext.ext_nfree.any != NULL);
992 if (m->m_flags & M_EXT_OLD)
993 m->m_ext.ext_nref.old(m->m_ext.ext_buf, m->m_ext.ext_size);
995 m->m_ext.ext_nref.new(m->m_ext.ext_arg);
1002 * Free a single mbuf and any associated external storage. The successor,
1003 * if any, is returned.
1005 * We do need to check non-first mbuf for m_aux, since some of existing
1006 * code does not call M_PREPEND properly.
1007 * (example: call to bpf_mtap from drivers)
1010 m_free(struct mbuf *m)
1015 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
1018 * Adjust our type count and delete any attached chains if the
1019 * mbuf is a packet header.
1021 if ((m->m_flags & M_PKTHDR) != 0)
1022 m_tag_delete_chain(m, NULL);
1025 * Place the mbuf on the appropriate free list. Try to maintain a
1026 * small cache of mbuf+cluster pairs.
1030 if (m->m_flags & M_EXT) {
1031 KKASSERT(m->m_ext.ext_nfree.any != NULL);
1032 if (mcl_pool_count < mcl_pool_max && m && m->m_next == NULL &&
1033 (m->m_flags & (M_PKTHDR|M_EXT_CLUSTER)) == (M_PKTHDR|M_EXT_CLUSTER) &&
1034 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
1035 KKASSERT(((mbcluster_t)m->m_ext.ext_arg)->mcl_magic == MCL_MAGIC);
1036 m->m_nextpkt = mcl_pool;
1041 if (m->m_flags & M_EXT_OLD)
1042 m->m_ext.ext_nfree.old(m->m_ext.ext_buf, m->m_ext.ext_size);
1044 m->m_ext.ext_nfree.new(m->m_ext.ext_arg);
1046 m->m_ext.ext_arg = NULL;
1047 m->m_ext.ext_nref.new = NULL;
1048 m->m_ext.ext_nfree.new = NULL;
1052 --mbtypes[m->m_type];
1053 if (mbtypes[MT_FREE] < mbuf_free_max) {
1054 m->m_type = MT_FREE;
1056 m->m_next = mmbfree;
1069 m_freem(struct mbuf *m)
1078 * Mbuffer utility routines.
1082 * Lesser-used path for M_PREPEND:
1083 * allocate new mbuf to prepend to chain,
1087 m_prepend(struct mbuf *m, int len, int how)
1091 MGET(mn, how, m->m_type);
1092 if (mn == (struct mbuf *)NULL) {
1094 return ((struct mbuf *)NULL);
1096 if (m->m_flags & M_PKTHDR)
1097 M_MOVE_PKTHDR(mn, m);
1107 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1108 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1109 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1110 * Note that the copy is read-only, because clusters are not copied,
1111 * only their reference counts are incremented.
1113 #define MCFail (mbstat.m_mcfail)
1116 m_copym(const struct mbuf *m, int off0, int len, int wait)
1118 struct mbuf *n, **np;
1123 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1124 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1125 if (off == 0 && m->m_flags & M_PKTHDR)
1128 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1138 KASSERT(len == M_COPYALL,
1139 ("m_copym, length > size of mbuf chain"));
1142 MGET(n, wait, m->m_type);
1147 if (!m_dup_pkthdr(n, m, wait))
1149 if (len == M_COPYALL)
1150 n->m_pkthdr.len -= off0;
1152 n->m_pkthdr.len = len;
1155 n->m_len = min(len, m->m_len - off);
1156 if (m->m_flags & M_EXT) {
1157 n->m_data = m->m_data + off;
1159 n->m_ext = m->m_ext;
1160 n->m_flags |= m->m_flags &
1161 (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
1163 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1164 (unsigned)n->m_len);
1166 if (len != M_COPYALL)
1182 * Copy an entire packet, including header (which must be present).
1183 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1184 * Note that the copy is read-only, because clusters are not copied,
1185 * only their reference counts are incremented.
1186 * Preserve alignment of the first mbuf so if the creator has left
1187 * some room at the beginning (e.g. for inserting protocol headers)
1188 * the copies also have the room available.
1191 m_copypacket(struct mbuf *m, int how)
1193 struct mbuf *top, *n, *o;
1195 MGET(n, how, m->m_type);
1200 if (!m_dup_pkthdr(n, m, how))
1202 n->m_len = m->m_len;
1203 if (m->m_flags & M_EXT) {
1204 n->m_data = m->m_data;
1206 n->m_ext = m->m_ext;
1207 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
1209 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1210 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1215 MGET(o, how, m->m_type);
1222 n->m_len = m->m_len;
1223 if (m->m_flags & M_EXT) {
1224 n->m_data = m->m_data;
1226 n->m_ext = m->m_ext;
1227 n->m_flags |= m->m_flags &
1228 (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
1230 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1243 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1244 * continuing for "len" bytes, into the indicated buffer.
1247 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1251 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1252 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1254 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1261 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1262 count = min(m->m_len - off, len);
1263 bcopy(mtod(m, caddr_t) + off, cp, count);
1272 * Copy a packet header mbuf chain into a completely new chain, including
1273 * copying any mbuf clusters. Use this instead of m_copypacket() when
1274 * you need a writable copy of an mbuf chain.
1277 m_dup(struct mbuf *m, int how)
1279 struct mbuf **p, *top = NULL;
1280 int remain, moff, nsize;
1285 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1287 /* While there's more data, get a new mbuf, tack it on, and fill it */
1288 remain = m->m_pkthdr.len;
1291 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1294 /* Get the next new mbuf */
1295 MGET(n, how, m->m_type);
1298 if (top == NULL) { /* first one, must be PKTHDR */
1299 if (!m_dup_pkthdr(n, m, how))
1302 } else /* not the first one */
1304 if (remain >= MINCLSIZE) {
1306 if ((n->m_flags & M_EXT) == 0) {
1314 /* Link it into the new chain */
1318 /* Copy data from original mbuf(s) into new mbuf */
1319 while (n->m_len < nsize && m != NULL) {
1320 int chunk = min(nsize - n->m_len, m->m_len - moff);
1322 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1326 if (moff == m->m_len) {
1332 /* Check correct total mbuf length */
1333 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1334 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1345 * Concatenate mbuf chain n to m.
1346 * Both chains must be of the same type (e.g. MT_DATA).
1347 * Any m_pkthdr is not updated.
1350 m_cat(struct mbuf *m, struct mbuf *n)
1355 if (m->m_flags & M_EXT ||
1356 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1357 /* just join the two chains */
1361 /* splat the data from one into the other */
1362 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1364 m->m_len += n->m_len;
1370 m_adj(struct mbuf *mp, int req_len)
1376 if ((m = mp) == NULL)
1382 while (m != NULL && len > 0) {
1383 if (m->m_len <= len) {
1394 if (mp->m_flags & M_PKTHDR)
1395 m->m_pkthdr.len -= (req_len - len);
1398 * Trim from tail. Scan the mbuf chain,
1399 * calculating its length and finding the last mbuf.
1400 * If the adjustment only affects this mbuf, then just
1401 * adjust and return. Otherwise, rescan and truncate
1402 * after the remaining size.
1408 if (m->m_next == (struct mbuf *)0)
1412 if (m->m_len >= len) {
1414 if (mp->m_flags & M_PKTHDR)
1415 mp->m_pkthdr.len -= len;
1422 * Correct length for chain is "count".
1423 * Find the mbuf with last data, adjust its length,
1424 * and toss data from remaining mbufs on chain.
1427 if (m->m_flags & M_PKTHDR)
1428 m->m_pkthdr.len = count;
1429 for (; m; m = m->m_next) {
1430 if (m->m_len >= count) {
1437 (m = m->m_next) ->m_len = 0;
1442 * Rearange an mbuf chain so that len bytes are contiguous
1443 * and in the data area of an mbuf (so that mtod will work for a structure
1444 * of size len). Returns the resulting mbuf chain on success, frees it and
1445 * returns null on failure. If there is room, it will add up to
1446 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1447 * avoid being called next time.
1449 #define MPFail (mbstat.m_mpfail)
1452 m_pullup(struct mbuf *n, int len)
1459 * If first mbuf has no cluster, and has room for len bytes
1460 * without shifting current data, pullup into it,
1461 * otherwise allocate a new mbuf to prepend to the chain.
1463 if ((n->m_flags & M_EXT) == 0 &&
1464 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1465 if (n->m_len >= len)
1473 MGET(m, MB_DONTWAIT, n->m_type);
1477 if (n->m_flags & M_PKTHDR)
1478 M_MOVE_PKTHDR(m, n);
1480 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1482 count = min(min(max(len, max_protohdr), space), n->m_len);
1483 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1493 } while (len > 0 && n);
1507 * Partition an mbuf chain in two pieces, returning the tail --
1508 * all but the first len0 bytes. In case of failure, it returns NULL and
1509 * attempts to restore the chain to its original state.
1511 * Note that the resulting mbufs might be read-only, because the new
1512 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1513 * the "breaking point" happens to lie within a cluster mbuf. Use the
1514 * M_WRITABLE() macro to check for this case.
1517 m_split(struct mbuf *m0, int len0, int wait)
1520 unsigned len = len0, remain;
1522 for (m = m0; m && len > m->m_len; m = m->m_next)
1526 remain = m->m_len - len;
1527 if (m0->m_flags & M_PKTHDR) {
1528 MGETHDR(n, wait, m0->m_type);
1531 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1532 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1533 m0->m_pkthdr.len = len0;
1534 if (m->m_flags & M_EXT)
1536 if (remain > MHLEN) {
1537 /* m can't be the lead packet */
1539 n->m_next = m_split(m, len, wait);
1540 if (n->m_next == 0) {
1548 MH_ALIGN(n, remain);
1549 } else if (remain == 0) {
1554 MGET(n, wait, m->m_type);
1560 if (m->m_flags & M_EXT) {
1561 n->m_data = m->m_data + len;
1563 n->m_ext = m->m_ext;
1564 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
1566 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1570 n->m_next = m->m_next;
1575 * Routine to copy from device local memory into mbufs.
1578 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
1579 void (*copy) (char *from, caddr_t to, u_int len))
1582 struct mbuf *top = 0, **mp = ⊤
1583 int off = off0, len;
1590 cp += off + 2 * sizeof(u_short);
1591 totlen -= 2 * sizeof(u_short);
1593 MGETHDR(m, MB_DONTWAIT, MT_DATA);
1596 m->m_pkthdr.rcvif = ifp;
1597 m->m_pkthdr.len = totlen;
1600 while (totlen > 0) {
1602 MGET(m, MB_DONTWAIT, MT_DATA);
1609 len = min(totlen, epkt - cp);
1610 if (len >= MINCLSIZE) {
1611 MCLGET(m, MB_DONTWAIT);
1612 if (m->m_flags & M_EXT)
1613 m->m_len = len = min(len, MCLBYTES);
1618 * Place initial small packet/header at end of mbuf.
1620 if (len < m->m_len) {
1621 if (top == 0 && len + max_linkhdr <= m->m_len)
1622 m->m_data += max_linkhdr;
1628 copy(cp, mtod(m, caddr_t), (unsigned)len);
1630 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1642 * Copy data from a buffer back into the indicated mbuf chain,
1643 * starting "off" bytes from the beginning, extending the mbuf
1644 * chain if necessary.
1647 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1650 struct mbuf *m = m0, *n;
1655 while (off > (mlen = m->m_len)) {
1658 if (m->m_next == 0) {
1659 n = m_getclr(MB_DONTWAIT, m->m_type);
1662 n->m_len = min(MLEN, len + off);
1668 mlen = min (m->m_len - off, len);
1669 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1677 if (m->m_next == 0) {
1678 n = m_get(MB_DONTWAIT, m->m_type);
1681 n->m_len = min(MLEN, len);
1686 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1687 m->m_pkthdr.len = totlen;
1691 m_print(const struct mbuf *m)
1694 const struct mbuf *m2;
1696 len = m->m_pkthdr.len;
1699 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1707 * "Move" mbuf pkthdr from "from" to "to".
1708 * "from" must have M_PKTHDR set, and "to" must be empty.
1711 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1713 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1715 to->m_flags = from->m_flags & M_COPYFLAGS;
1716 to->m_data = to->m_pktdat;
1717 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1718 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1719 from->m_flags &= ~M_PKTHDR;
1723 * Duplicate "from"'s mbuf pkthdr in "to".
1724 * "from" must have M_PKTHDR set, and "to" must be empty.
1725 * In particular, this does a deep copy of the packet tags.
1728 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1730 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1731 if ((to->m_flags & M_EXT) == 0)
1732 to->m_data = to->m_pktdat;
1733 to->m_pkthdr = from->m_pkthdr;
1734 SLIST_INIT(&to->m_pkthdr.tags);
1735 return (m_tag_copy_chain(to, from, how));
1739 * Defragment a mbuf chain, returning the shortest possible
1740 * chain of mbufs and clusters. If allocation fails and
1741 * this cannot be completed, NULL will be returned, but
1742 * the passed in chain will be unchanged. Upon success,
1743 * the original chain will be freed, and the new chain
1746 * If a non-packet header is passed in, the original
1747 * mbuf (chain?) will be returned unharmed.
1750 m_defrag(struct mbuf *m0, int how)
1752 struct mbuf *m_new = NULL, *m_final = NULL;
1753 int progress = 0, length;
1755 if (!(m0->m_flags & M_PKTHDR))
1758 #ifdef MBUF_STRESS_TEST
1759 if (m_defragrandomfailures) {
1760 int temp = arc4random() & 0xff;
1766 if (m0->m_pkthdr.len > MHLEN)
1767 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1769 m_final = m_gethdr(how, MT_DATA);
1771 if (m_final == NULL)
1774 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1779 while (progress < m0->m_pkthdr.len) {
1780 length = m0->m_pkthdr.len - progress;
1781 if (length > MCLBYTES)
1784 if (m_new == NULL) {
1786 m_new = m_getcl(how, MT_DATA, 0);
1788 m_new = m_get(how, MT_DATA);
1793 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1795 m_new->m_len = length;
1796 if (m_new != m_final)
1797 m_cat(m_final, m_new);
1800 if (m0->m_next == NULL)
1805 m_defragbytes += m0->m_pkthdr.len;
1817 * Move data from uio into mbufs.
1818 * A length of zero means copy the whole uio.
1821 m_uiomove(struct uio *uio, int wait, int len0)
1823 struct mbuf *head; /* result mbuf chain */
1824 struct mbuf *m; /* current working mbuf */
1826 int resid, datalen, error;
1828 resid = (len0 == 0) ? uio->uio_resid : min(len0, uio->uio_resid);
1833 if (resid > MHLEN) {
1834 m = m_getcl(wait, MT_DATA, head == NULL ? M_PKTHDR : 0);
1837 if (m->m_flags & M_PKTHDR)
1838 m->m_pkthdr.len = 0;
1841 MGETHDR(m, wait, MT_DATA);
1844 m->m_pkthdr.len = 0;
1845 /* Leave room for protocol headers. */
1849 MGET(m, wait, MT_DATA);
1854 datalen = min(MCLBYTES, resid);
1855 error = uiomove(mtod(m, caddr_t), datalen, uio);
1863 head->m_pkthdr.len += datalen;
1865 } while (resid > 0);