2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
67 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.60 2007/03/04 18:51:59 swildner Exp $
71 #include "opt_param.h"
73 #include "opt_mbuf_stress_test.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/domain.h>
81 #include <sys/objcache.h>
82 #include <sys/protosw.h>
84 #include <sys/thread.h>
85 #include <sys/globaldata.h>
86 #include <sys/serialize.h>
87 #include <sys/thread2.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_extern.h>
94 #include <machine/cpu.h>
98 * mbuf cluster meta-data
103 struct lwkt_serialize mcl_serializer;
106 static void mbinit(void *);
107 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
109 static u_long mbtypes[MT_NTYPES];
111 struct mbstat mbstat;
120 #ifdef MBUF_STRESS_TEST
121 int m_defragrandomfailures;
124 struct objcache *mbuf_cache, *mbufphdr_cache;
125 struct objcache *mclmeta_cache;
126 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
131 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
132 &max_linkhdr, 0, "");
133 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
134 &max_protohdr, 0, "");
135 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
136 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
137 &max_datalen, 0, "");
138 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
140 SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
141 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
142 sizeof(mbtypes), "LU", "");
143 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RW,
144 &nmbclusters, 0, "Maximum number of mbuf clusters available");
145 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RW, &nmbufs, 0,
146 "Maximum number of mbufs available");
148 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
149 &m_defragpackets, 0, "");
150 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
151 &m_defragbytes, 0, "");
152 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
153 &m_defraguseless, 0, "");
154 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
155 &m_defragfailure, 0, "");
156 #ifdef MBUF_STRESS_TEST
157 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
158 &m_defragrandomfailures, 0, "");
161 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
162 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
163 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
165 static void m_reclaim (void);
166 static void m_mclref(void *arg);
167 static void m_mclfree(void *arg);
170 #define NMBCLUSTERS (512 + maxusers * 16)
173 #define NMBUFS (nmbclusters * 2)
177 * Perform sanity checks of tunables declared above.
180 tunable_mbinit(void *dummy)
184 * This has to be done before VM init.
186 nmbclusters = NMBCLUSTERS;
187 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
189 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
191 if (nmbufs < nmbclusters * 2)
192 nmbufs = nmbclusters * 2;
196 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
198 /* "number of clusters of pages" */
204 * The mbuf object cache only guarantees that m_next and m_nextpkt are
205 * NULL and that m_data points to the beginning of the data area. In
206 * particular, m_len and m_pkthdr.len are uninitialized. It is the
207 * responsibility of the caller to initialize those fields before use.
210 static boolean_t __inline
211 mbuf_ctor(void *obj, void *private, int ocflags)
213 struct mbuf *m = obj;
217 m->m_data = m->m_dat;
224 * Initialize the mbuf and the packet header fields.
227 mbufphdr_ctor(void *obj, void *private, int ocflags)
229 struct mbuf *m = obj;
233 m->m_data = m->m_pktdat;
234 m->m_flags = M_PKTHDR | M_PHCACHE;
236 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
237 SLIST_INIT(&m->m_pkthdr.tags);
238 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
239 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
245 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
248 mclmeta_ctor(void *obj, void *private, int ocflags)
250 struct mbcluster *cl = obj;
253 if (ocflags & M_NOWAIT)
254 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
256 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
261 lwkt_serialize_init(&cl->mcl_serializer);
266 mclmeta_dtor(void *obj, void *private)
268 struct mbcluster *mcl = obj;
270 KKASSERT(mcl->mcl_refs == 0);
271 kfree(mcl->mcl_data, M_MBUFCL);
275 linkcluster(struct mbuf *m, struct mbcluster *cl)
278 * Add the cluster to the mbuf. The caller will detect that the
279 * mbuf now has an attached cluster.
281 m->m_ext.ext_arg = cl;
282 m->m_ext.ext_buf = cl->mcl_data;
283 m->m_ext.ext_ref = m_mclref;
284 m->m_ext.ext_free = m_mclfree;
285 m->m_ext.ext_size = MCLBYTES;
286 atomic_add_int(&cl->mcl_refs, 1);
288 m->m_data = m->m_ext.ext_buf;
289 m->m_flags |= M_EXT | M_EXT_CLUSTER;
293 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
295 struct mbuf *m = obj;
296 struct mbcluster *cl;
298 mbufphdr_ctor(obj, private, ocflags);
299 cl = objcache_get(mclmeta_cache, ocflags);
302 m->m_flags |= M_CLCACHE;
308 mbufcluster_ctor(void *obj, void *private, int ocflags)
310 struct mbuf *m = obj;
311 struct mbcluster *cl;
313 mbuf_ctor(obj, private, ocflags);
314 cl = objcache_get(mclmeta_cache, ocflags);
317 m->m_flags |= M_CLCACHE;
323 * Used for both the cluster and cluster PHDR caches.
325 * The mbuf may have lost its cluster due to sharing, deal
326 * with the situation by checking M_EXT.
329 mbufcluster_dtor(void *obj, void *private)
331 struct mbuf *m = obj;
332 struct mbcluster *mcl;
334 if (m->m_flags & M_EXT) {
335 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
336 mcl = m->m_ext.ext_arg;
337 KKASSERT(mcl->mcl_refs == 1);
339 objcache_put(mclmeta_cache, mcl);
343 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
344 struct objcache_malloc_args mclmeta_malloc_args =
345 { sizeof(struct mbcluster), M_MCLMETA };
351 mbstat.m_msize = MSIZE;
352 mbstat.m_mclbytes = MCLBYTES;
353 mbstat.m_minclsize = MINCLSIZE;
354 mbstat.m_mlen = MLEN;
355 mbstat.m_mhlen = MHLEN;
357 mbuf_cache = objcache_create("mbuf", nmbufs, 0,
358 mbuf_ctor, NULL, NULL,
359 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
360 mbufphdr_cache = objcache_create("mbuf pkt hdr", nmbufs, 64,
361 mbufphdr_ctor, NULL, NULL,
362 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
363 mclmeta_cache = objcache_create("cluster mbuf", nmbclusters , 0,
364 mclmeta_ctor, mclmeta_dtor, NULL,
365 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
366 mbufcluster_cache = objcache_create("mbuf + cluster", nmbclusters, 0,
367 mbufcluster_ctor, mbufcluster_dtor, NULL,
368 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
369 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
370 nmbclusters, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
371 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
376 * Return the number of references to this mbuf's data. 0 is returned
377 * if the mbuf is not M_EXT, a reference count is returned if it is
378 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
381 m_sharecount(struct mbuf *m)
383 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
388 case M_EXT | M_EXT_CLUSTER:
389 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
392 return (0); /* to shut up compiler */
396 * change mbuf to new type
399 m_chtype(struct mbuf *m, int type)
403 --mbtypes[m->m_type];
415 SLIST_FOREACH(dp, &domains, dom_next) {
416 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
426 updatestats(struct mbuf *m, int type)
440 m_get(int how, int type)
444 int ocf = MBTOM(how);
448 m = objcache_get(mbuf_cache, ocf);
451 if ((how & MB_TRYWAIT) && ntries++ == 0) {
452 struct objcache *reclaimlist[] = {
454 mbufcluster_cache, mbufphdrcluster_cache
456 const int nreclaims = __arysize(reclaimlist);
458 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
465 updatestats(m, type);
470 m_gethdr(int how, int type)
473 int ocf = MBTOM(how);
478 m = objcache_get(mbufphdr_cache, ocf);
481 if ((how & MB_TRYWAIT) && ntries++ == 0) {
482 struct objcache *reclaimlist[] = {
484 mbufcluster_cache, mbufphdrcluster_cache
486 const int nreclaims = __arysize(reclaimlist);
488 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
495 updatestats(m, type);
500 * Get a mbuf (not a mbuf cluster!) and zero it.
504 m_getclr(int how, int type)
508 m = m_get(how, type);
510 bzero(m->m_data, MLEN);
515 * Returns an mbuf with an attached cluster.
516 * Because many network drivers use this kind of buffers a lot, it is
517 * convenient to keep a small pool of free buffers of this kind.
518 * Even a small size such as 10 gives about 10% improvement in the
519 * forwarding rate in a bridge or router.
522 m_getcl(int how, short type, int flags)
525 int ocflags = MBTOM(how);
530 if (flags & M_PKTHDR)
531 m = objcache_get(mbufphdrcluster_cache, ocflags);
533 m = objcache_get(mbufcluster_cache, ocflags);
536 if ((how & MB_TRYWAIT) && ntries++ == 0) {
537 struct objcache *reclaimlist[1];
539 if (flags & M_PKTHDR)
540 reclaimlist[0] = mbufcluster_cache;
542 reclaimlist[0] = mbufphdrcluster_cache;
543 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
560 * Allocate chain of requested length.
563 m_getc(int len, int how, int type)
565 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
569 n = m_getl(len, how, type, 0, &nsize);
585 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
586 * and return a pointer to the head of the allocated chain. If m0 is
587 * non-null, then we assume that it is a single mbuf or an mbuf chain to
588 * which we want len bytes worth of mbufs and/or clusters attached, and so
589 * if we succeed in allocating it, we will just return a pointer to m0.
591 * If we happen to fail at any point during the allocation, we will free
592 * up everything we have already allocated and return NULL.
594 * Deprecated. Use m_getc() and m_cat() instead.
597 m_getm(struct mbuf *m0, int len, int type, int how)
601 nfirst = m_getc(len, how, type);
604 m_last(m0)->m_next = nfirst;
612 * Adds a cluster to a normal mbuf, M_EXT is set on success.
613 * Deprecated. Use m_getcl() instead.
616 m_mclget(struct mbuf *m, int how)
618 struct mbcluster *mcl;
620 KKASSERT((m->m_flags & M_EXT) == 0);
621 mcl = objcache_get(mclmeta_cache, MBTOM(how));
626 /* leave the m_mbufs count intact for original mbuf */
632 * Updates to mbcluster must be MPSAFE. Only an entity which already has
633 * a reference to the cluster can ref it, so we are in no danger of
634 * racing an add with a subtract. But the operation must still be atomic
635 * since multiple entities may have a reference on the cluster.
637 * m_mclfree() is almost the same but it must contend with two entities
638 * freeing the cluster at the same time. If there is only one reference
639 * count we are the only entity referencing the cluster and no further
640 * locking is required. Otherwise we must protect against a race to 0
641 * with the serializer.
646 struct mbcluster *mcl = arg;
648 atomic_add_int(&mcl->mcl_refs, 1);
654 struct mbcluster *mcl = arg;
656 if (mcl->mcl_refs == 1) {
658 objcache_put(mclmeta_cache, mcl);
660 lwkt_serialize_enter(&mcl->mcl_serializer);
661 if (mcl->mcl_refs > 1) {
662 atomic_subtract_int(&mcl->mcl_refs, 1);
663 lwkt_serialize_exit(&mcl->mcl_serializer);
665 lwkt_serialize_exit(&mcl->mcl_serializer);
666 KKASSERT(mcl->mcl_refs == 1);
668 objcache_put(mclmeta_cache, mcl);
673 extern void db_print_backtrace(void);
676 * Free a single mbuf and any associated external storage. The successor,
677 * if any, is returned.
679 * We do need to check non-first mbuf for m_aux, since some of existing
680 * code does not call M_PREPEND properly.
681 * (example: call to bpf_mtap from drivers)
684 m_free(struct mbuf *m)
688 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
689 --mbtypes[m->m_type];
694 * Make sure the mbuf is in constructed state before returning it
699 KKASSERT(m->m_nextpkt == NULL);
701 if (m->m_nextpkt != NULL) {
703 static int afewtimes = 10;
705 if (afewtimes-- > 0) {
706 kprintf("mfree: m->m_nextpkt != NULL\n");
707 db_print_backtrace();
713 if (m->m_flags & M_PKTHDR) {
714 m_tag_delete_chain(m); /* eliminate XXX JH */
717 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
720 * Clean the M_PKTHDR state so we can return the mbuf to its original
721 * cache. This is based on the PHCACHE flag which tells us whether
722 * the mbuf was originally allocated out of a packet-header cache
723 * or a non-packet-header cache.
725 if (m->m_flags & M_PHCACHE) {
726 m->m_flags |= M_PKTHDR;
727 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
728 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
729 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
730 SLIST_INIT(&m->m_pkthdr.tags);
734 * Handle remaining flags combinations. M_CLCACHE tells us whether
735 * the mbuf was originally allocated from a cluster cache or not,
736 * and is totally separate from whether the mbuf is currently
737 * associated with a cluster.
740 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
741 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
743 * mbuf+cluster cache case. The mbuf was allocated from the
744 * combined mbuf_cluster cache and can be returned to the
745 * cache if the cluster hasn't been shared.
747 if (m_sharecount(m) == 1) {
749 * The cluster has not been shared, we can just
750 * reset the data pointer and return the mbuf
751 * to the cluster cache. Note that the reference
752 * count is left intact (it is still associated with
755 m->m_data = m->m_ext.ext_buf;
756 if (m->m_flags & M_PHCACHE)
757 objcache_put(mbufphdrcluster_cache, m);
759 objcache_put(mbufcluster_cache, m);
763 * Hell. Someone else has a ref on this cluster,
764 * we have to disconnect it which means we can't
765 * put it back into the mbufcluster_cache, we
766 * have to destroy the mbuf.
768 * Other mbuf references to the cluster will typically
769 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
771 * XXX we could try to connect another cluster to
774 m->m_ext.ext_free(m->m_ext.ext_arg);
775 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
776 if (m->m_flags & M_PHCACHE)
777 objcache_dtor(mbufphdrcluster_cache, m);
779 objcache_dtor(mbufcluster_cache, m);
782 case M_EXT | M_EXT_CLUSTER:
784 * Normal cluster associated with an mbuf that was allocated
785 * from the normal mbuf pool rather then the cluster pool.
786 * The cluster has to be independantly disassociated from the
789 if (m_sharecount(m) == 1)
794 * Normal cluster association case, disconnect the cluster from
795 * the mbuf. The cluster may or may not be custom.
797 m->m_ext.ext_free(m->m_ext.ext_arg);
798 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
802 * return the mbuf to the mbuf cache.
804 if (m->m_flags & M_PHCACHE) {
805 m->m_data = m->m_pktdat;
806 objcache_put(mbufphdr_cache, m);
808 m->m_data = m->m_dat;
809 objcache_put(mbuf_cache, m);
815 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
823 m_freem(struct mbuf *m)
832 * mbuf utility routines
836 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
840 m_prepend(struct mbuf *m, int len, int how)
844 if (m->m_flags & M_PKTHDR)
845 mn = m_gethdr(how, m->m_type);
847 mn = m_get(how, m->m_type);
852 if (m->m_flags & M_PKTHDR)
853 M_MOVE_PKTHDR(mn, m);
863 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
864 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
865 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
866 * Note that the copy is read-only, because clusters are not copied,
867 * only their reference counts are incremented.
870 m_copym(const struct mbuf *m, int off0, int len, int wait)
872 struct mbuf *n, **np;
877 KASSERT(off >= 0, ("m_copym, negative off %d", off));
878 KASSERT(len >= 0, ("m_copym, negative len %d", len));
879 if (off == 0 && m->m_flags & M_PKTHDR)
882 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
892 KASSERT(len == M_COPYALL,
893 ("m_copym, length > size of mbuf chain"));
897 * Because we are sharing any cluster attachment below,
898 * be sure to get an mbuf that does not have a cluster
899 * associated with it.
902 n = m_gethdr(wait, m->m_type);
904 n = m_get(wait, m->m_type);
909 if (!m_dup_pkthdr(n, m, wait))
911 if (len == M_COPYALL)
912 n->m_pkthdr.len -= off0;
914 n->m_pkthdr.len = len;
917 n->m_len = min(len, m->m_len - off);
918 if (m->m_flags & M_EXT) {
919 KKASSERT((n->m_flags & M_EXT) == 0);
920 n->m_data = m->m_data + off;
921 m->m_ext.ext_ref(m->m_ext.ext_arg);
923 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
925 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
928 if (len != M_COPYALL)
944 * Copy an entire packet, including header (which must be present).
945 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
946 * Note that the copy is read-only, because clusters are not copied,
947 * only their reference counts are incremented.
948 * Preserve alignment of the first mbuf so if the creator has left
949 * some room at the beginning (e.g. for inserting protocol headers)
950 * the copies also have the room available.
953 m_copypacket(struct mbuf *m, int how)
955 struct mbuf *top, *n, *o;
957 n = m_gethdr(how, m->m_type);
962 if (!m_dup_pkthdr(n, m, how))
965 if (m->m_flags & M_EXT) {
966 KKASSERT((n->m_flags & M_EXT) == 0);
967 n->m_data = m->m_data;
968 m->m_ext.ext_ref(m->m_ext.ext_arg);
970 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
972 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
973 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
978 o = m_get(how, m->m_type);
986 if (m->m_flags & M_EXT) {
987 KKASSERT((n->m_flags & M_EXT) == 0);
988 n->m_data = m->m_data;
989 m->m_ext.ext_ref(m->m_ext.ext_arg);
991 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
993 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1006 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1007 * continuing for "len" bytes, into the indicated buffer.
1010 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1014 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1015 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1017 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1024 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1025 count = min(m->m_len - off, len);
1026 bcopy(mtod(m, caddr_t) + off, cp, count);
1035 * Copy a packet header mbuf chain into a completely new chain, including
1036 * copying any mbuf clusters. Use this instead of m_copypacket() when
1037 * you need a writable copy of an mbuf chain.
1040 m_dup(struct mbuf *m, int how)
1042 struct mbuf **p, *top = NULL;
1043 int remain, moff, nsize;
1048 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1050 /* While there's more data, get a new mbuf, tack it on, and fill it */
1051 remain = m->m_pkthdr.len;
1054 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1057 /* Get the next new mbuf */
1058 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1063 if (!m_dup_pkthdr(n, m, how))
1066 /* Link it into the new chain */
1070 /* Copy data from original mbuf(s) into new mbuf */
1072 while (n->m_len < nsize && m != NULL) {
1073 int chunk = min(nsize - n->m_len, m->m_len - moff);
1075 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1079 if (moff == m->m_len) {
1085 /* Check correct total mbuf length */
1086 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1087 ("%s: bogus m_pkthdr.len", __func__));
1099 * Concatenate mbuf chain n to m.
1100 * Both chains must be of the same type (e.g. MT_DATA).
1101 * Any m_pkthdr is not updated.
1104 m_cat(struct mbuf *m, struct mbuf *n)
1108 if (m->m_flags & M_EXT ||
1109 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1110 /* just join the two chains */
1114 /* splat the data from one into the other */
1115 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1117 m->m_len += n->m_len;
1123 m_adj(struct mbuf *mp, int req_len)
1129 if ((m = mp) == NULL)
1135 while (m != NULL && len > 0) {
1136 if (m->m_len <= len) {
1147 if (mp->m_flags & M_PKTHDR)
1148 m->m_pkthdr.len -= (req_len - len);
1151 * Trim from tail. Scan the mbuf chain,
1152 * calculating its length and finding the last mbuf.
1153 * If the adjustment only affects this mbuf, then just
1154 * adjust and return. Otherwise, rescan and truncate
1155 * after the remaining size.
1161 if (m->m_next == (struct mbuf *)0)
1165 if (m->m_len >= len) {
1167 if (mp->m_flags & M_PKTHDR)
1168 mp->m_pkthdr.len -= len;
1175 * Correct length for chain is "count".
1176 * Find the mbuf with last data, adjust its length,
1177 * and toss data from remaining mbufs on chain.
1180 if (m->m_flags & M_PKTHDR)
1181 m->m_pkthdr.len = count;
1182 for (; m; m = m->m_next) {
1183 if (m->m_len >= count) {
1190 (m = m->m_next) ->m_len = 0;
1195 * Rearrange an mbuf chain so that len bytes are contiguous
1196 * and in the data area of an mbuf (so that mtod will work for a structure
1197 * of size len). Returns the resulting mbuf chain on success, frees it and
1198 * returns null on failure. If there is room, it will add up to
1199 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1200 * avoid being called next time.
1203 m_pullup(struct mbuf *n, int len)
1210 * If first mbuf has no cluster, and has room for len bytes
1211 * without shifting current data, pullup into it,
1212 * otherwise allocate a new mbuf to prepend to the chain.
1214 if (!(n->m_flags & M_EXT) &&
1215 n->m_data + len < &n->m_dat[MLEN] &&
1217 if (n->m_len >= len)
1225 if (n->m_flags & M_PKTHDR)
1226 m = m_gethdr(MB_DONTWAIT, n->m_type);
1228 m = m_get(MB_DONTWAIT, n->m_type);
1232 if (n->m_flags & M_PKTHDR)
1233 M_MOVE_PKTHDR(m, n);
1235 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1237 count = min(min(max(len, max_protohdr), space), n->m_len);
1238 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1248 } while (len > 0 && n);
1262 * Partition an mbuf chain in two pieces, returning the tail --
1263 * all but the first len0 bytes. In case of failure, it returns NULL and
1264 * attempts to restore the chain to its original state.
1266 * Note that the resulting mbufs might be read-only, because the new
1267 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1268 * the "breaking point" happens to lie within a cluster mbuf. Use the
1269 * M_WRITABLE() macro to check for this case.
1272 m_split(struct mbuf *m0, int len0, int wait)
1275 unsigned len = len0, remain;
1277 for (m = m0; m && len > m->m_len; m = m->m_next)
1281 remain = m->m_len - len;
1282 if (m0->m_flags & M_PKTHDR) {
1283 n = m_gethdr(wait, m0->m_type);
1286 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1287 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1288 m0->m_pkthdr.len = len0;
1289 if (m->m_flags & M_EXT)
1291 if (remain > MHLEN) {
1292 /* m can't be the lead packet */
1294 n->m_next = m_split(m, len, wait);
1295 if (n->m_next == NULL) {
1303 MH_ALIGN(n, remain);
1304 } else if (remain == 0) {
1309 n = m_get(wait, m->m_type);
1315 if (m->m_flags & M_EXT) {
1316 KKASSERT((n->m_flags & M_EXT) == 0);
1317 n->m_data = m->m_data + len;
1318 m->m_ext.ext_ref(m->m_ext.ext_arg);
1319 n->m_ext = m->m_ext;
1320 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1322 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1326 n->m_next = m->m_next;
1332 * Routine to copy from device local memory into mbufs.
1333 * Note: "offset" is ill-defined and always called as 0, so ignore it.
1336 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1337 void (*copy)(volatile const void *from, volatile void *to, size_t length))
1339 struct mbuf *m, *mfirst = NULL, **mtail;
1348 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1353 m->m_len = min(len, nsize);
1355 if (flags & M_PKTHDR) {
1356 if (len + max_linkhdr <= nsize)
1357 m->m_data += max_linkhdr;
1358 m->m_pkthdr.rcvif = ifp;
1359 m->m_pkthdr.len = len;
1363 copy(buf, m->m_data, (unsigned)m->m_len);
1374 * Copy data from a buffer back into the indicated mbuf chain,
1375 * starting "off" bytes from the beginning, extending the mbuf
1376 * chain if necessary.
1379 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1382 struct mbuf *m = m0, *n;
1387 while (off > (mlen = m->m_len)) {
1390 if (m->m_next == NULL) {
1391 n = m_getclr(MB_DONTWAIT, m->m_type);
1394 n->m_len = min(MLEN, len + off);
1400 mlen = min (m->m_len - off, len);
1401 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1409 if (m->m_next == NULL) {
1410 n = m_get(MB_DONTWAIT, m->m_type);
1413 n->m_len = min(MLEN, len);
1418 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1419 m->m_pkthdr.len = totlen;
1423 m_print(const struct mbuf *m)
1426 const struct mbuf *m2;
1428 len = m->m_pkthdr.len;
1431 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1439 * "Move" mbuf pkthdr from "from" to "to".
1440 * "from" must have M_PKTHDR set, and "to" must be empty.
1443 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1445 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1447 to->m_flags |= from->m_flags & M_COPYFLAGS;
1448 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1449 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1453 * Duplicate "from"'s mbuf pkthdr in "to".
1454 * "from" must have M_PKTHDR set, and "to" must be empty.
1455 * In particular, this does a deep copy of the packet tags.
1458 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1460 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1462 to->m_flags = (from->m_flags & M_COPYFLAGS) |
1463 (to->m_flags & ~M_COPYFLAGS);
1464 to->m_pkthdr = from->m_pkthdr;
1465 SLIST_INIT(&to->m_pkthdr.tags);
1466 return (m_tag_copy_chain(to, from, how));
1470 * Defragment a mbuf chain, returning the shortest possible
1471 * chain of mbufs and clusters. If allocation fails and
1472 * this cannot be completed, NULL will be returned, but
1473 * the passed in chain will be unchanged. Upon success,
1474 * the original chain will be freed, and the new chain
1477 * If a non-packet header is passed in, the original
1478 * mbuf (chain?) will be returned unharmed.
1480 * m_defrag_nofree doesn't free the passed in mbuf.
1483 m_defrag(struct mbuf *m0, int how)
1487 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1495 m_defrag_nofree(struct mbuf *m0, int how)
1497 struct mbuf *m_new = NULL, *m_final = NULL;
1498 int progress = 0, length, nsize;
1500 if (!(m0->m_flags & M_PKTHDR))
1503 #ifdef MBUF_STRESS_TEST
1504 if (m_defragrandomfailures) {
1505 int temp = karc4random() & 0xff;
1511 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1512 if (m_final == NULL)
1514 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
1516 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1521 while (progress < m0->m_pkthdr.len) {
1522 length = m0->m_pkthdr.len - progress;
1523 if (length > MCLBYTES)
1526 if (m_new == NULL) {
1527 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
1532 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1534 m_new->m_len = length;
1535 if (m_new != m_final)
1536 m_cat(m_final, m_new);
1539 if (m0->m_next == NULL)
1542 m_defragbytes += m_final->m_pkthdr.len;
1553 * Move data from uio into mbufs.
1556 m_uiomove(struct uio *uio)
1558 struct mbuf *m; /* current working mbuf */
1559 struct mbuf *head = NULL; /* result mbuf chain */
1560 struct mbuf **mp = &head;
1561 int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error;
1564 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
1566 m->m_pkthdr.len = 0;
1567 /* Leave room for protocol headers. */
1572 m->m_len = min(nsize, resid);
1573 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
1580 head->m_pkthdr.len += m->m_len;
1582 } while (resid > 0);
1592 m_last(struct mbuf *m)
1600 * Return the number of bytes in an mbuf chain.
1601 * If lastm is not NULL, also return the last mbuf.
1604 m_lengthm(struct mbuf *m, struct mbuf **lastm)
1607 struct mbuf *prev = m;
1620 * Like m_lengthm(), except also keep track of mbuf usage.
1623 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
1625 u_int len = 0, mbcnt = 0;
1626 struct mbuf *prev = m;
1631 if (m->m_flags & M_EXT)
1632 mbcnt += m->m_ext.ext_size;