4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
72 #include "opt_param.h"
73 #include "opt_mbuf_stress_test.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
77 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/domain.h>
82 #include <sys/objcache.h>
84 #include <sys/protosw.h>
86 #include <sys/thread.h>
87 #include <sys/globaldata.h>
89 #include <sys/thread2.h>
90 #include <sys/spinlock2.h>
92 #include <machine/atomic.h>
93 #include <machine/limits.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_extern.h>
100 #include <machine/cpu.h>
104 * mbuf cluster meta-data
112 * mbuf tracking for debugging purposes
116 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
119 RB_HEAD(mbuf_rb_tree, mbtrack);
120 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
123 RB_ENTRY(mbtrack) rb_node;
129 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
138 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
140 struct mbuf_rb_tree mbuf_track_root;
141 static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
144 mbuftrack(struct mbuf *m)
148 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
149 spin_lock(&mbuf_track_spin);
151 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
152 spin_unlock(&mbuf_track_spin);
153 panic("mbuftrack: mbuf %p already being tracked", m);
155 spin_unlock(&mbuf_track_spin);
159 mbufuntrack(struct mbuf *m)
163 spin_lock(&mbuf_track_spin);
164 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
166 spin_unlock(&mbuf_track_spin);
167 panic("mbufuntrack: mbuf %p was not tracked", m);
169 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
170 spin_unlock(&mbuf_track_spin);
171 kfree(mbt, M_MTRACK);
176 mbuftrackid(struct mbuf *m, int trackid)
181 spin_lock(&mbuf_track_spin);
185 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
187 spin_unlock(&mbuf_track_spin);
188 panic("mbuftrackid: mbuf %p not tracked", m);
190 mbt->trackid = trackid;
195 spin_unlock(&mbuf_track_spin);
199 mbuftrack_callback(struct mbtrack *mbt, void *arg)
201 struct sysctl_req *req = arg;
205 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
207 spin_unlock(&mbuf_track_spin);
208 error = SYSCTL_OUT(req, buf, strlen(buf));
209 spin_lock(&mbuf_track_spin);
216 mbuftrack_show(SYSCTL_HANDLER_ARGS)
220 spin_lock(&mbuf_track_spin);
221 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
222 mbuftrack_callback, req);
223 spin_unlock(&mbuf_track_spin);
226 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
227 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
232 #define mbufuntrack(m)
236 static void mbinit(void *);
237 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
239 static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
241 static struct mbstat mbstat[SMP_MAXCPU];
250 #ifdef MBUF_STRESS_TEST
251 int m_defragrandomfailures;
254 struct objcache *mbuf_cache, *mbufphdr_cache;
255 struct objcache *mclmeta_cache, *mjclmeta_cache;
256 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
257 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache;
260 static int nmbjclusters;
263 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
264 &max_linkhdr, 0, "Max size of a link-level header");
265 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
266 &max_protohdr, 0, "Max size of a protocol header");
267 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0,
268 "Max size of link+protocol headers");
269 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
270 &max_datalen, 0, "Max data payload size without headers");
271 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
272 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations");
273 static int do_mbstat(SYSCTL_HANDLER_ARGS);
275 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
276 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics");
278 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
280 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
281 0, 0, do_mbtypes, "LU", "");
284 do_mbstat(SYSCTL_HANDLER_ARGS)
286 struct mbstat mbstat_total;
287 struct mbstat *mbstat_totalp;
290 bzero(&mbstat_total, sizeof(mbstat_total));
291 mbstat_totalp = &mbstat_total;
293 for (i = 0; i < ncpus; i++)
295 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
296 mbstat_total.m_clusters += mbstat[i].m_clusters;
297 mbstat_total.m_spare += mbstat[i].m_spare;
298 mbstat_total.m_clfree += mbstat[i].m_clfree;
299 mbstat_total.m_drops += mbstat[i].m_drops;
300 mbstat_total.m_wait += mbstat[i].m_wait;
301 mbstat_total.m_drain += mbstat[i].m_drain;
302 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
303 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
307 * The following fields are not cumulative fields so just
308 * get their values once.
310 mbstat_total.m_msize = mbstat[0].m_msize;
311 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
312 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
313 mbstat_total.m_mlen = mbstat[0].m_mlen;
314 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
316 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
320 do_mbtypes(SYSCTL_HANDLER_ARGS)
322 u_long totals[MT_NTYPES];
325 for (i = 0; i < MT_NTYPES; i++)
328 for (i = 0; i < ncpus; i++)
330 for (j = 0; j < MT_NTYPES; j++)
331 totals[j] += mbtypes[i][j];
334 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
338 * These are read-only because we do not currently have any code
339 * to adjust the objcache limits after the fact. The variables
340 * may only be set as boot-time tunables.
342 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
343 &nmbclusters, 0, "Maximum number of mbuf clusters available");
344 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
345 "Maximum number of mbufs available");
346 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjclusters, CTLFLAG_RD, &nmbjclusters, 0,
347 "Maximum number of mbuf jclusters available");
349 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
350 &m_defragpackets, 0, "Number of defragment packets");
351 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
352 &m_defragbytes, 0, "Number of defragment bytes");
353 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
354 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations");
355 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
356 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations");
357 #ifdef MBUF_STRESS_TEST
358 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
359 &m_defragrandomfailures, 0, "");
362 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
363 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
364 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
366 static void m_reclaim (void);
367 static void m_mclref(void *arg);
368 static void m_mclfree(void *arg);
371 * NOTE: Default NMBUFS must take into account a possible DOS attack
372 * using fd passing on unix domain sockets.
375 #define NMBCLUSTERS (512 + maxusers * 16)
378 #define NMBJCLUSTERS 2048
381 #define NMBUFS (nmbclusters * 2 + maxfiles)
385 * Perform sanity checks of tunables declared above.
388 tunable_mbinit(void *dummy)
391 * This has to be done before VM init.
393 nmbclusters = NMBCLUSTERS;
394 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
395 nmbjclusters = NMBJCLUSTERS;
396 TUNABLE_INT_FETCH("kern.ipc.nmbjclusters", &nmbjclusters);
398 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
400 if (nmbufs < nmbclusters * 2)
401 nmbufs = nmbclusters * 2;
403 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
404 tunable_mbinit, NULL);
406 /* "number of clusters of pages" */
412 * The mbuf object cache only guarantees that m_next and m_nextpkt are
413 * NULL and that m_data points to the beginning of the data area. In
414 * particular, m_len and m_pkthdr.len are uninitialized. It is the
415 * responsibility of the caller to initialize those fields before use.
418 static __inline boolean_t
419 mbuf_ctor(void *obj, void *private, int ocflags)
421 struct mbuf *m = obj;
425 m->m_data = m->m_dat;
432 * Initialize the mbuf and the packet header fields.
435 mbufphdr_ctor(void *obj, void *private, int ocflags)
437 struct mbuf *m = obj;
441 m->m_data = m->m_pktdat;
442 m->m_flags = M_PKTHDR | M_PHCACHE;
444 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
445 SLIST_INIT(&m->m_pkthdr.tags);
446 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
447 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
453 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
456 mclmeta_ctor(void *obj, void *private, int ocflags)
458 struct mbcluster *cl = obj;
461 if (ocflags & M_NOWAIT)
462 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
464 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
473 mjclmeta_ctor(void *obj, void *private, int ocflags)
475 struct mbcluster *cl = obj;
478 if (ocflags & M_NOWAIT)
479 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO);
481 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO);
490 mclmeta_dtor(void *obj, void *private)
492 struct mbcluster *mcl = obj;
494 KKASSERT(mcl->mcl_refs == 0);
495 kfree(mcl->mcl_data, M_MBUFCL);
499 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
502 * Add the cluster to the mbuf. The caller will detect that the
503 * mbuf now has an attached cluster.
505 m->m_ext.ext_arg = cl;
506 m->m_ext.ext_buf = cl->mcl_data;
507 m->m_ext.ext_ref = m_mclref;
508 m->m_ext.ext_free = m_mclfree;
509 m->m_ext.ext_size = size;
510 atomic_add_int(&cl->mcl_refs, 1);
512 m->m_data = m->m_ext.ext_buf;
513 m->m_flags |= M_EXT | M_EXT_CLUSTER;
517 linkcluster(struct mbuf *m, struct mbcluster *cl)
519 linkjcluster(m, cl, MCLBYTES);
523 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
525 struct mbuf *m = obj;
526 struct mbcluster *cl;
528 mbufphdr_ctor(obj, private, ocflags);
529 cl = objcache_get(mclmeta_cache, ocflags);
531 ++mbstat[mycpu->gd_cpuid].m_drops;
534 m->m_flags |= M_CLCACHE;
540 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags)
542 struct mbuf *m = obj;
543 struct mbcluster *cl;
545 mbufphdr_ctor(obj, private, ocflags);
546 cl = objcache_get(mjclmeta_cache, ocflags);
548 ++mbstat[mycpu->gd_cpuid].m_drops;
551 m->m_flags |= M_CLCACHE;
552 linkjcluster(m, cl, MJUMPAGESIZE);
557 mbufcluster_ctor(void *obj, void *private, int ocflags)
559 struct mbuf *m = obj;
560 struct mbcluster *cl;
562 mbuf_ctor(obj, private, ocflags);
563 cl = objcache_get(mclmeta_cache, ocflags);
565 ++mbstat[mycpu->gd_cpuid].m_drops;
568 m->m_flags |= M_CLCACHE;
574 mbufjcluster_ctor(void *obj, void *private, int ocflags)
576 struct mbuf *m = obj;
577 struct mbcluster *cl;
579 mbuf_ctor(obj, private, ocflags);
580 cl = objcache_get(mjclmeta_cache, ocflags);
582 ++mbstat[mycpu->gd_cpuid].m_drops;
585 m->m_flags |= M_CLCACHE;
586 linkjcluster(m, cl, MJUMPAGESIZE);
591 * Used for both the cluster and cluster PHDR caches.
593 * The mbuf may have lost its cluster due to sharing, deal
594 * with the situation by checking M_EXT.
597 mbufcluster_dtor(void *obj, void *private)
599 struct mbuf *m = obj;
600 struct mbcluster *mcl;
602 if (m->m_flags & M_EXT) {
603 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
604 mcl = m->m_ext.ext_arg;
605 KKASSERT(mcl->mcl_refs == 1);
607 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES)
608 objcache_put(mjclmeta_cache, mcl);
610 objcache_put(mclmeta_cache, mcl);
614 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
615 struct objcache_malloc_args mclmeta_malloc_args =
616 { sizeof(struct mbcluster), M_MCLMETA };
622 int mb_limit, cl_limit, ncl_limit, jcl_limit;
627 * Initialize statistics
629 for (i = 0; i < ncpus; i++) {
630 mbstat[i].m_msize = MSIZE;
631 mbstat[i].m_mclbytes = MCLBYTES;
632 mbstat[i].m_mjumpagesize = MJUMPAGESIZE;
633 mbstat[i].m_minclsize = MINCLSIZE;
634 mbstat[i].m_mlen = MLEN;
635 mbstat[i].m_mhlen = MHLEN;
639 * Create objtect caches and save cluster limits, which will
640 * be used to adjust backing kmalloc pools' limit later.
643 mb_limit = cl_limit = 0;
646 mbuf_cache = objcache_create("mbuf",
648 mbuf_ctor, NULL, NULL,
649 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
653 mbufphdr_cache = objcache_create("mbuf pkt hdr",
655 mbufphdr_ctor, NULL, NULL,
656 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
659 ncl_limit = nmbclusters;
660 mclmeta_cache = objcache_create("cluster mbuf",
662 mclmeta_ctor, mclmeta_dtor, NULL,
663 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
664 cl_limit += ncl_limit;
666 jcl_limit = nmbjclusters;
667 mjclmeta_cache = objcache_create("jcluster mbuf",
669 mjclmeta_ctor, mclmeta_dtor, NULL,
670 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
671 cl_limit += jcl_limit;
674 mbufcluster_cache = objcache_create("mbuf + cluster",
676 mbufcluster_ctor, mbufcluster_dtor, NULL,
677 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
681 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
682 &limit, nmbclusters / 16,
683 mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
684 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
687 limit = nmbjclusters / 4; /* XXX really rarely used */
688 mbufjcluster_cache = objcache_create("mbuf + jcluster",
690 mbufjcluster_ctor, mbufcluster_dtor, NULL,
691 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
694 limit = nmbjclusters;
695 mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster",
696 &limit, nmbjclusters / 16,
697 mbufphdrjcluster_ctor, mbufcluster_dtor, NULL,
698 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
702 * Adjust backing kmalloc pools' limit
704 * NOTE: We raise the limit by another 1/8 to take the effect
705 * of loosememuse into account.
707 cl_limit += cl_limit / 8;
708 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
709 mclmeta_malloc_args.objsize * (size_t)cl_limit);
710 kmalloc_raise_limit(M_MBUFCL,
711 (MCLBYTES * (size_t)ncl_limit) +
712 (MJUMPAGESIZE * (size_t)jcl_limit));
714 mb_limit += mb_limit / 8;
715 kmalloc_raise_limit(mbuf_malloc_args.mtype,
716 mbuf_malloc_args.objsize * (size_t)mb_limit);
720 * Return the number of references to this mbuf's data. 0 is returned
721 * if the mbuf is not M_EXT, a reference count is returned if it is
722 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
725 m_sharecount(struct mbuf *m)
727 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
732 case M_EXT | M_EXT_CLUSTER:
733 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
736 return (0); /* to shut up compiler */
740 * change mbuf to new type
743 m_chtype(struct mbuf *m, int type)
745 struct globaldata *gd = mycpu;
747 ++mbtypes[gd->gd_cpuid][type];
748 --mbtypes[gd->gd_cpuid][m->m_type];
758 kprintf("Debug: m_reclaim() called\n");
760 SLIST_FOREACH(dp, &domains, dom_next) {
761 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
766 ++mbstat[mycpu->gd_cpuid].m_drain;
770 updatestats(struct mbuf *m, int type)
772 struct globaldata *gd = mycpu;
777 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
778 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
781 ++mbtypes[gd->gd_cpuid][type];
782 ++mbstat[gd->gd_cpuid].m_mbufs;
790 m_get(int how, int type)
794 int ocf = MBTOM(how);
798 m = objcache_get(mbuf_cache, ocf);
801 if ((how & MB_TRYWAIT) && ntries++ == 0) {
802 struct objcache *reclaimlist[] = {
805 mbufphdrcluster_cache,
807 mbufphdrjcluster_cache
809 const int nreclaims = NELEM(reclaimlist);
811 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
815 ++mbstat[mycpu->gd_cpuid].m_drops;
819 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
823 updatestats(m, type);
828 m_gethdr(int how, int type)
831 int ocf = MBTOM(how);
836 m = objcache_get(mbufphdr_cache, ocf);
839 if ((how & MB_TRYWAIT) && ntries++ == 0) {
840 struct objcache *reclaimlist[] = {
842 mbufcluster_cache, mbufphdrcluster_cache,
843 mbufjcluster_cache, mbufphdrjcluster_cache
845 const int nreclaims = NELEM(reclaimlist);
847 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
851 ++mbstat[mycpu->gd_cpuid].m_drops;
855 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
860 updatestats(m, type);
865 * Get a mbuf (not a mbuf cluster!) and zero it.
869 m_getclr(int how, int type)
873 m = m_get(how, type);
875 bzero(m->m_data, MLEN);
880 m_getcl_cache(int how, short type, int flags, struct objcache *mbclc,
881 struct objcache *mbphclc)
883 struct mbuf *m = NULL;
884 int ocflags = MBTOM(how);
889 if (flags & M_PKTHDR)
890 m = objcache_get(mbphclc, ocflags);
892 m = objcache_get(mbclc, ocflags);
895 if ((how & MB_TRYWAIT) && ntries++ == 0) {
896 struct objcache *reclaimlist[1];
898 if (flags & M_PKTHDR)
899 reclaimlist[0] = mbclc;
901 reclaimlist[0] = mbphclc;
902 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
906 ++mbstat[mycpu->gd_cpuid].m_drops;
911 KASSERT(m->m_data == m->m_ext.ext_buf,
912 ("mbuf %p: bad m_data in get", m));
916 m->m_pkthdr.len = 0; /* just do it unconditonally */
920 ++mbtypes[mycpu->gd_cpuid][type];
921 ++mbstat[mycpu->gd_cpuid].m_clusters;
926 m_getjcl(int how, short type, int flags, size_t size)
928 struct objcache *mbclc, *mbphclc;
932 mbclc = mbufcluster_cache;
933 mbphclc = mbufphdrcluster_cache;
937 mbclc = mbufjcluster_cache;
938 mbphclc = mbufphdrjcluster_cache;
941 return m_getcl_cache(how, type, flags, mbclc, mbphclc);
945 * Returns an mbuf with an attached cluster.
946 * Because many network drivers use this kind of buffers a lot, it is
947 * convenient to keep a small pool of free buffers of this kind.
948 * Even a small size such as 10 gives about 10% improvement in the
949 * forwarding rate in a bridge or router.
952 m_getcl(int how, short type, int flags)
954 return m_getcl_cache(how, type, flags,
955 mbufcluster_cache, mbufphdrcluster_cache);
959 * Allocate chain of requested length.
962 m_getc(int len, int how, int type)
964 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
968 n = m_getl(len, how, type, 0, &nsize);
984 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
985 * and return a pointer to the head of the allocated chain. If m0 is
986 * non-null, then we assume that it is a single mbuf or an mbuf chain to
987 * which we want len bytes worth of mbufs and/or clusters attached, and so
988 * if we succeed in allocating it, we will just return a pointer to m0.
990 * If we happen to fail at any point during the allocation, we will free
991 * up everything we have already allocated and return NULL.
993 * Deprecated. Use m_getc() and m_cat() instead.
996 m_getm(struct mbuf *m0, int len, int type, int how)
1000 nfirst = m_getc(len, how, type);
1003 m_last(m0)->m_next = nfirst;
1011 * Adds a cluster to a normal mbuf, M_EXT is set on success.
1012 * Deprecated. Use m_getcl() instead.
1015 m_mclget(struct mbuf *m, int how)
1017 struct mbcluster *mcl;
1019 KKASSERT((m->m_flags & M_EXT) == 0);
1020 mcl = objcache_get(mclmeta_cache, MBTOM(how));
1022 linkcluster(m, mcl);
1023 ++mbstat[mycpu->gd_cpuid].m_clusters;
1025 ++mbstat[mycpu->gd_cpuid].m_drops;
1030 * Updates to mbcluster must be MPSAFE. Only an entity which already has
1031 * a reference to the cluster can ref it, so we are in no danger of
1032 * racing an add with a subtract. But the operation must still be atomic
1033 * since multiple entities may have a reference on the cluster.
1035 * m_mclfree() is almost the same but it must contend with two entities
1036 * freeing the cluster at the same time.
1041 struct mbcluster *mcl = arg;
1043 atomic_add_int(&mcl->mcl_refs, 1);
1047 * When dereferencing a cluster we have to deal with a N->0 race, where
1048 * N entities free their references simultaniously. To do this we use
1049 * atomic_fetchadd_int().
1052 m_mclfree(void *arg)
1054 struct mbcluster *mcl = arg;
1056 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) {
1057 --mbstat[mycpu->gd_cpuid].m_clusters;
1058 objcache_put(mclmeta_cache, mcl);
1063 * Free a single mbuf and any associated external storage. The successor,
1064 * if any, is returned.
1066 * We do need to check non-first mbuf for m_aux, since some of existing
1067 * code does not call M_PREPEND properly.
1068 * (example: call to bpf_mtap from drivers)
1074 _m_free(struct mbuf *m, const char *func)
1079 m_free(struct mbuf *m)
1084 struct globaldata *gd = mycpu;
1086 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
1087 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
1088 --mbtypes[gd->gd_cpuid][m->m_type];
1093 * Make sure the mbuf is in constructed state before returning it
1099 m->m_hdr.mh_lastfunc = func;
1102 KKASSERT(m->m_nextpkt == NULL);
1104 if (m->m_nextpkt != NULL) {
1105 static int afewtimes = 10;
1107 if (afewtimes-- > 0) {
1108 kprintf("mfree: m->m_nextpkt != NULL\n");
1109 print_backtrace(-1);
1111 m->m_nextpkt = NULL;
1114 if (m->m_flags & M_PKTHDR) {
1115 m_tag_delete_chain(m); /* eliminate XXX JH */
1118 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
1121 * Clean the M_PKTHDR state so we can return the mbuf to its original
1122 * cache. This is based on the PHCACHE flag which tells us whether
1123 * the mbuf was originally allocated out of a packet-header cache
1124 * or a non-packet-header cache.
1126 if (m->m_flags & M_PHCACHE) {
1127 m->m_flags |= M_PKTHDR;
1128 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
1129 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
1130 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
1131 SLIST_INIT(&m->m_pkthdr.tags);
1135 * Handle remaining flags combinations. M_CLCACHE tells us whether
1136 * the mbuf was originally allocated from a cluster cache or not,
1137 * and is totally separate from whether the mbuf is currently
1138 * associated with a cluster.
1140 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1141 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1143 * mbuf+cluster cache case. The mbuf was allocated from the
1144 * combined mbuf_cluster cache and can be returned to the
1145 * cache if the cluster hasn't been shared.
1147 if (m_sharecount(m) == 1) {
1149 * The cluster has not been shared, we can just
1150 * reset the data pointer and return the mbuf
1151 * to the cluster cache. Note that the reference
1152 * count is left intact (it is still associated with
1155 m->m_data = m->m_ext.ext_buf;
1156 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) {
1157 if (m->m_flags & M_PHCACHE)
1158 objcache_put(mbufphdrjcluster_cache, m);
1160 objcache_put(mbufjcluster_cache, m);
1162 if (m->m_flags & M_PHCACHE)
1163 objcache_put(mbufphdrcluster_cache, m);
1165 objcache_put(mbufcluster_cache, m);
1167 --mbstat[mycpu->gd_cpuid].m_clusters;
1170 * Hell. Someone else has a ref on this cluster,
1171 * we have to disconnect it which means we can't
1172 * put it back into the mbufcluster_cache, we
1173 * have to destroy the mbuf.
1175 * Other mbuf references to the cluster will typically
1176 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1178 * XXX we could try to connect another cluster to
1181 m->m_ext.ext_free(m->m_ext.ext_arg);
1182 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1183 if (m->m_ext.ext_size == MCLBYTES) {
1184 if (m->m_flags & M_PHCACHE)
1185 objcache_dtor(mbufphdrcluster_cache, m);
1187 objcache_dtor(mbufcluster_cache, m);
1189 if (m->m_flags & M_PHCACHE)
1190 objcache_dtor(mbufphdrjcluster_cache, m);
1192 objcache_dtor(mbufjcluster_cache, m);
1196 case M_EXT | M_EXT_CLUSTER:
1199 * Normal cluster association case, disconnect the cluster from
1200 * the mbuf. The cluster may or may not be custom.
1202 m->m_ext.ext_free(m->m_ext.ext_arg);
1203 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1207 * return the mbuf to the mbuf cache.
1209 if (m->m_flags & M_PHCACHE) {
1210 m->m_data = m->m_pktdat;
1211 objcache_put(mbufphdr_cache, m);
1213 m->m_data = m->m_dat;
1214 objcache_put(mbuf_cache, m);
1216 --mbstat[mycpu->gd_cpuid].m_mbufs;
1220 panic("bad mbuf flags %p %08x", m, m->m_flags);
1229 _m_freem(struct mbuf *m, const char *func)
1232 m = _m_free(m, func);
1238 m_freem(struct mbuf *m)
1247 m_extadd(struct mbuf *m, caddr_t buf, u_int size, void (*reff)(void *),
1248 void (*freef)(void *), void *arg)
1250 m->m_ext.ext_arg = arg;
1251 m->m_ext.ext_buf = buf;
1252 m->m_ext.ext_ref = reff;
1253 m->m_ext.ext_free = freef;
1254 m->m_ext.ext_size = size;
1257 m->m_flags |= M_EXT;
1261 * mbuf utility routines
1265 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1269 m_prepend(struct mbuf *m, int len, int how)
1273 if (m->m_flags & M_PKTHDR)
1274 mn = m_gethdr(how, m->m_type);
1276 mn = m_get(how, m->m_type);
1281 if (m->m_flags & M_PKTHDR)
1282 M_MOVE_PKTHDR(mn, m);
1292 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1293 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1294 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1295 * Note that the copy is read-only, because clusters are not copied,
1296 * only their reference counts are incremented.
1299 m_copym(const struct mbuf *m, int off0, int len, int wait)
1301 struct mbuf *n, **np;
1306 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1307 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1308 if (off == 0 && (m->m_flags & M_PKTHDR))
1311 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1321 KASSERT(len == M_COPYALL,
1322 ("m_copym, length > size of mbuf chain"));
1326 * Because we are sharing any cluster attachment below,
1327 * be sure to get an mbuf that does not have a cluster
1328 * associated with it.
1331 n = m_gethdr(wait, m->m_type);
1333 n = m_get(wait, m->m_type);
1338 if (!m_dup_pkthdr(n, m, wait))
1340 if (len == M_COPYALL)
1341 n->m_pkthdr.len -= off0;
1343 n->m_pkthdr.len = len;
1346 n->m_len = min(len, m->m_len - off);
1347 if (m->m_flags & M_EXT) {
1348 KKASSERT((n->m_flags & M_EXT) == 0);
1349 n->m_data = m->m_data + off;
1350 m->m_ext.ext_ref(m->m_ext.ext_arg);
1351 n->m_ext = m->m_ext;
1352 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1354 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1355 (unsigned)n->m_len);
1357 if (len != M_COPYALL)
1364 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1368 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1373 * Copy an entire packet, including header (which must be present).
1374 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1375 * Note that the copy is read-only, because clusters are not copied,
1376 * only their reference counts are incremented.
1377 * Preserve alignment of the first mbuf so if the creator has left
1378 * some room at the beginning (e.g. for inserting protocol headers)
1379 * the copies also have the room available.
1382 m_copypacket(struct mbuf *m, int how)
1384 struct mbuf *top, *n, *o;
1386 n = m_gethdr(how, m->m_type);
1391 if (!m_dup_pkthdr(n, m, how))
1393 n->m_len = m->m_len;
1394 if (m->m_flags & M_EXT) {
1395 KKASSERT((n->m_flags & M_EXT) == 0);
1396 n->m_data = m->m_data;
1397 m->m_ext.ext_ref(m->m_ext.ext_arg);
1398 n->m_ext = m->m_ext;
1399 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1401 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1402 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1407 o = m_get(how, m->m_type);
1414 n->m_len = m->m_len;
1415 if (m->m_flags & M_EXT) {
1416 KKASSERT((n->m_flags & M_EXT) == 0);
1417 n->m_data = m->m_data;
1418 m->m_ext.ext_ref(m->m_ext.ext_arg);
1419 n->m_ext = m->m_ext;
1420 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1422 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1430 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1435 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1436 * continuing for "len" bytes, into the indicated buffer.
1439 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1443 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1444 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1446 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1453 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1454 count = min(m->m_len - off, len);
1455 bcopy(mtod(m, caddr_t) + off, cp, count);
1464 * Copy a packet header mbuf chain into a completely new chain, including
1465 * copying any mbuf clusters. Use this instead of m_copypacket() when
1466 * you need a writable copy of an mbuf chain.
1469 m_dup(struct mbuf *m, int how)
1471 struct mbuf **p, *top = NULL;
1472 int remain, moff, nsize;
1477 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1479 /* While there's more data, get a new mbuf, tack it on, and fill it */
1480 remain = m->m_pkthdr.len;
1483 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1486 /* Get the next new mbuf */
1487 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1492 if (!m_dup_pkthdr(n, m, how))
1495 /* Link it into the new chain */
1499 /* Copy data from original mbuf(s) into new mbuf */
1501 while (n->m_len < nsize && m != NULL) {
1502 int chunk = min(nsize - n->m_len, m->m_len - moff);
1504 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1508 if (moff == m->m_len) {
1514 /* Check correct total mbuf length */
1515 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1516 ("%s: bogus m_pkthdr.len", __func__));
1523 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1528 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1529 * copying any mbuf clusters. This is typically used to realign a data
1530 * chain by nfs_realign().
1532 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT
1533 * and NULL can be returned if MB_DONTWAIT is passed.
1535 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1536 * cluster mbufs can exhaust our supply of mbufs.
1539 m_dup_data(struct mbuf *m, int how)
1541 struct mbuf **p, *n, *top = NULL;
1542 int mlen, moff, chunk, gsize, nsize;
1551 * Optimize the mbuf allocation but do not get too carried away.
1553 if (m->m_next || m->m_len > MLEN)
1554 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES)
1557 gsize = MJUMPAGESIZE;
1567 * Scan the mbuf chain until nothing is left, the new mbuf chain
1568 * will be allocated on the fly as needed.
1575 KKASSERT(m->m_type == MT_DATA);
1577 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1584 chunk = imin(mlen, nsize);
1585 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1600 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1605 * Concatenate mbuf chain n to m.
1606 * Both chains must be of the same type (e.g. MT_DATA).
1607 * Any m_pkthdr is not updated.
1610 m_cat(struct mbuf *m, struct mbuf *n)
1614 if (m->m_flags & M_EXT ||
1615 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1616 /* just join the two chains */
1620 /* splat the data from one into the other */
1621 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1623 m->m_len += n->m_len;
1629 m_adj(struct mbuf *mp, int req_len)
1635 if ((m = mp) == NULL)
1641 while (m != NULL && len > 0) {
1642 if (m->m_len <= len) {
1653 if (mp->m_flags & M_PKTHDR)
1654 m->m_pkthdr.len -= (req_len - len);
1657 * Trim from tail. Scan the mbuf chain,
1658 * calculating its length and finding the last mbuf.
1659 * If the adjustment only affects this mbuf, then just
1660 * adjust and return. Otherwise, rescan and truncate
1661 * after the remaining size.
1667 if (m->m_next == NULL)
1671 if (m->m_len >= len) {
1673 if (mp->m_flags & M_PKTHDR)
1674 mp->m_pkthdr.len -= len;
1681 * Correct length for chain is "count".
1682 * Find the mbuf with last data, adjust its length,
1683 * and toss data from remaining mbufs on chain.
1686 if (m->m_flags & M_PKTHDR)
1687 m->m_pkthdr.len = count;
1688 for (; m; m = m->m_next) {
1689 if (m->m_len >= count) {
1696 (m = m->m_next) ->m_len = 0;
1701 * Set the m_data pointer of a newly-allocated mbuf
1702 * to place an object of the specified size at the
1703 * end of the mbuf, longword aligned.
1706 m_align(struct mbuf *m, int len)
1710 if (m->m_flags & M_EXT)
1711 adjust = m->m_ext.ext_size - len;
1712 else if (m->m_flags & M_PKTHDR)
1713 adjust = MHLEN - len;
1715 adjust = MLEN - len;
1716 m->m_data += adjust &~ (sizeof(long)-1);
1720 * Create a writable copy of the mbuf chain. While doing this
1721 * we compact the chain with a goal of producing a chain with
1722 * at most two mbufs. The second mbuf in this chain is likely
1723 * to be a cluster. The primary purpose of this work is to create
1724 * a writable packet for encryption, compression, etc. The
1725 * secondary goal is to linearize the data so the data can be
1726 * passed to crypto hardware in the most efficient manner possible.
1729 m_unshare(struct mbuf *m0, int how)
1731 struct mbuf *m, *mprev;
1732 struct mbuf *n, *mfirst, *mlast;
1736 for (m = m0; m != NULL; m = mprev->m_next) {
1738 * Regular mbufs are ignored unless there's a cluster
1739 * in front of it that we can use to coalesce. We do
1740 * the latter mainly so later clusters can be coalesced
1741 * also w/o having to handle them specially (i.e. convert
1742 * mbuf+cluster -> cluster). This optimization is heavily
1743 * influenced by the assumption that we're running over
1744 * Ethernet where MCLBYTES is large enough that the max
1745 * packet size will permit lots of coalescing into a
1746 * single cluster. This in turn permits efficient
1747 * crypto operations, especially when using hardware.
1749 if ((m->m_flags & M_EXT) == 0) {
1750 if (mprev && (mprev->m_flags & M_EXT) &&
1751 m->m_len <= M_TRAILINGSPACE(mprev)) {
1752 /* XXX: this ignores mbuf types */
1753 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1754 mtod(m, caddr_t), m->m_len);
1755 mprev->m_len += m->m_len;
1756 mprev->m_next = m->m_next; /* unlink from chain */
1757 m_free(m); /* reclaim mbuf */
1764 * Writable mbufs are left alone (for now).
1766 if (M_WRITABLE(m)) {
1772 * Not writable, replace with a copy or coalesce with
1773 * the previous mbuf if possible (since we have to copy
1774 * it anyway, we try to reduce the number of mbufs and
1775 * clusters so that future work is easier).
1777 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1778 /* NB: we only coalesce into a cluster or larger */
1779 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1780 m->m_len <= M_TRAILINGSPACE(mprev)) {
1781 /* XXX: this ignores mbuf types */
1782 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1783 mtod(m, caddr_t), m->m_len);
1784 mprev->m_len += m->m_len;
1785 mprev->m_next = m->m_next; /* unlink from chain */
1786 m_free(m); /* reclaim mbuf */
1791 * Allocate new space to hold the copy...
1793 /* XXX why can M_PKTHDR be set past the first mbuf? */
1794 if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1796 * NB: if a packet header is present we must
1797 * allocate the mbuf separately from any cluster
1798 * because M_MOVE_PKTHDR will smash the data
1799 * pointer and drop the M_EXT marker.
1801 MGETHDR(n, how, m->m_type);
1806 M_MOVE_PKTHDR(n, m);
1808 if ((n->m_flags & M_EXT) == 0) {
1814 n = m_getcl(how, m->m_type, m->m_flags);
1821 * ... and copy the data. We deal with jumbo mbufs
1822 * (i.e. m_len > MCLBYTES) by splitting them into
1823 * clusters. We could just malloc a buffer and make
1824 * it external but too many device drivers don't know
1825 * how to break up the non-contiguous memory when
1833 int cc = min(len, MCLBYTES);
1834 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1845 n = m_getcl(how, m->m_type, m->m_flags);
1852 n->m_next = m->m_next;
1854 m0 = mfirst; /* new head of chain */
1856 mprev->m_next = mfirst; /* replace old mbuf */
1857 m_free(m); /* release old mbuf */
1864 * Rearrange an mbuf chain so that len bytes are contiguous
1865 * and in the data area of an mbuf (so that mtod will work for a structure
1866 * of size len). Returns the resulting mbuf chain on success, frees it and
1867 * returns null on failure. If there is room, it will add up to
1868 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1869 * avoid being called next time.
1872 m_pullup(struct mbuf *n, int len)
1879 * If first mbuf has no cluster, and has room for len bytes
1880 * without shifting current data, pullup into it,
1881 * otherwise allocate a new mbuf to prepend to the chain.
1883 if (!(n->m_flags & M_EXT) &&
1884 n->m_data + len < &n->m_dat[MLEN] &&
1886 if (n->m_len >= len)
1894 if (n->m_flags & M_PKTHDR)
1895 m = m_gethdr(MB_DONTWAIT, n->m_type);
1897 m = m_get(MB_DONTWAIT, n->m_type);
1901 if (n->m_flags & M_PKTHDR)
1902 M_MOVE_PKTHDR(m, n);
1904 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1906 count = min(min(max(len, max_protohdr), space), n->m_len);
1907 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1917 } while (len > 0 && n);
1926 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1931 * Partition an mbuf chain in two pieces, returning the tail --
1932 * all but the first len0 bytes. In case of failure, it returns NULL and
1933 * attempts to restore the chain to its original state.
1935 * Note that the resulting mbufs might be read-only, because the new
1936 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1937 * the "breaking point" happens to lie within a cluster mbuf. Use the
1938 * M_WRITABLE() macro to check for this case.
1941 m_split(struct mbuf *m0, int len0, int wait)
1944 unsigned len = len0, remain;
1946 for (m = m0; m && len > m->m_len; m = m->m_next)
1950 remain = m->m_len - len;
1951 if (m0->m_flags & M_PKTHDR) {
1952 n = m_gethdr(wait, m0->m_type);
1955 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1956 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1957 m0->m_pkthdr.len = len0;
1958 if (m->m_flags & M_EXT)
1960 if (remain > MHLEN) {
1961 /* m can't be the lead packet */
1963 n->m_next = m_split(m, len, wait);
1964 if (n->m_next == NULL) {
1972 MH_ALIGN(n, remain);
1973 } else if (remain == 0) {
1978 n = m_get(wait, m->m_type);
1984 if (m->m_flags & M_EXT) {
1985 KKASSERT((n->m_flags & M_EXT) == 0);
1986 n->m_data = m->m_data + len;
1987 m->m_ext.ext_ref(m->m_ext.ext_arg);
1988 n->m_ext = m->m_ext;
1989 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1991 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1995 n->m_next = m->m_next;
2001 * Routine to copy from device local memory into mbufs.
2002 * Note: "offset" is ill-defined and always called as 0, so ignore it.
2005 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
2006 void (*copy)(volatile const void *from, volatile void *to, size_t length))
2008 struct mbuf *m, *mfirst = NULL, **mtail;
2017 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
2022 m->m_len = min(len, nsize);
2024 if (flags & M_PKTHDR) {
2025 if (len + max_linkhdr <= nsize)
2026 m->m_data += max_linkhdr;
2027 m->m_pkthdr.rcvif = ifp;
2028 m->m_pkthdr.len = len;
2032 copy(buf, m->m_data, (unsigned)m->m_len);
2043 * Routine to pad mbuf to the specified length 'padto'.
2046 m_devpad(struct mbuf *m, int padto)
2048 struct mbuf *last = NULL;
2051 if (padto <= m->m_pkthdr.len)
2054 padlen = padto - m->m_pkthdr.len;
2056 /* if there's only the packet-header and we can pad there, use it. */
2057 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
2061 * Walk packet chain to find last mbuf. We will either
2062 * pad there, or append a new mbuf and pad it
2064 for (last = m; last->m_next != NULL; last = last->m_next)
2067 /* `last' now points to last in chain. */
2068 if (M_TRAILINGSPACE(last) < padlen) {
2071 /* Allocate new empty mbuf, pad it. Compact later. */
2072 MGET(n, MB_DONTWAIT, MT_DATA);
2080 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
2081 KKASSERT(M_WRITABLE(last));
2083 /* Now zero the pad area */
2084 bzero(mtod(last, char *) + last->m_len, padlen);
2085 last->m_len += padlen;
2086 m->m_pkthdr.len += padlen;
2091 * Copy data from a buffer back into the indicated mbuf chain,
2092 * starting "off" bytes from the beginning, extending the mbuf
2093 * chain if necessary.
2096 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
2099 struct mbuf *m = m0, *n;
2104 while (off > (mlen = m->m_len)) {
2107 if (m->m_next == NULL) {
2108 n = m_getclr(MB_DONTWAIT, m->m_type);
2111 n->m_len = min(MLEN, len + off);
2117 mlen = min (m->m_len - off, len);
2118 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
2126 if (m->m_next == NULL) {
2127 n = m_get(MB_DONTWAIT, m->m_type);
2130 n->m_len = min(MLEN, len);
2135 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
2136 m->m_pkthdr.len = totlen;
2140 * Append the specified data to the indicated mbuf chain,
2141 * Extend the mbuf chain if the new data does not fit in
2144 * Return 1 if able to complete the job; otherwise 0.
2147 m_append(struct mbuf *m0, int len, c_caddr_t cp)
2150 int remainder, space;
2152 for (m = m0; m->m_next != NULL; m = m->m_next)
2155 space = M_TRAILINGSPACE(m);
2158 * Copy into available space.
2160 if (space > remainder)
2162 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
2164 cp += space, remainder -= space;
2166 while (remainder > 0) {
2168 * Allocate a new mbuf; could check space
2169 * and allocate a cluster instead.
2171 n = m_get(MB_DONTWAIT, m->m_type);
2174 n->m_len = min(MLEN, remainder);
2175 bcopy(cp, mtod(n, caddr_t), n->m_len);
2176 cp += n->m_len, remainder -= n->m_len;
2180 if (m0->m_flags & M_PKTHDR)
2181 m0->m_pkthdr.len += len - remainder;
2182 return (remainder == 0);
2186 * Apply function f to the data in an mbuf chain starting "off" bytes from
2187 * the beginning, continuing for "len" bytes.
2190 m_apply(struct mbuf *m, int off, int len,
2191 int (*f)(void *, void *, u_int), void *arg)
2196 KASSERT(off >= 0, ("m_apply, negative off %d", off));
2197 KASSERT(len >= 0, ("m_apply, negative len %d", len));
2199 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2206 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2207 count = min(m->m_len - off, len);
2208 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
2219 * Return a pointer to mbuf/offset of location in mbuf chain.
2222 m_getptr(struct mbuf *m, int loc, int *off)
2226 /* Normal end of search. */
2227 if (m->m_len > loc) {
2232 if (m->m_next == NULL) {
2234 /* Point at the end of valid data. */
2247 m_print(const struct mbuf *m)
2250 const struct mbuf *m2;
2252 len = m->m_pkthdr.len;
2255 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
2263 * "Move" mbuf pkthdr from "from" to "to".
2264 * "from" must have M_PKTHDR set, and "to" must be empty.
2267 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
2269 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
2271 to->m_flags |= from->m_flags & M_COPYFLAGS;
2272 to->m_pkthdr = from->m_pkthdr; /* especially tags */
2273 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
2277 * Duplicate "from"'s mbuf pkthdr in "to".
2278 * "from" must have M_PKTHDR set, and "to" must be empty.
2279 * In particular, this does a deep copy of the packet tags.
2282 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
2284 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
2286 to->m_flags = (from->m_flags & M_COPYFLAGS) |
2287 (to->m_flags & ~M_COPYFLAGS);
2288 to->m_pkthdr = from->m_pkthdr;
2289 SLIST_INIT(&to->m_pkthdr.tags);
2290 return (m_tag_copy_chain(to, from, how));
2294 * Defragment a mbuf chain, returning the shortest possible
2295 * chain of mbufs and clusters. If allocation fails and
2296 * this cannot be completed, NULL will be returned, but
2297 * the passed in chain will be unchanged. Upon success,
2298 * the original chain will be freed, and the new chain
2301 * If a non-packet header is passed in, the original
2302 * mbuf (chain?) will be returned unharmed.
2304 * m_defrag_nofree doesn't free the passed in mbuf.
2307 m_defrag(struct mbuf *m0, int how)
2311 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2319 m_defrag_nofree(struct mbuf *m0, int how)
2321 struct mbuf *m_new = NULL, *m_final = NULL;
2322 int progress = 0, length, nsize;
2324 if (!(m0->m_flags & M_PKTHDR))
2327 #ifdef MBUF_STRESS_TEST
2328 if (m_defragrandomfailures) {
2329 int temp = karc4random() & 0xff;
2335 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
2336 if (m_final == NULL)
2338 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
2340 if (m_dup_pkthdr(m_final, m0, how) == 0)
2345 while (progress < m0->m_pkthdr.len) {
2346 length = m0->m_pkthdr.len - progress;
2347 if (length > MCLBYTES)
2350 if (m_new == NULL) {
2351 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
2356 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2358 m_new->m_len = length;
2359 if (m_new != m_final)
2360 m_cat(m_final, m_new);
2363 if (m0->m_next == NULL)
2366 m_defragbytes += m_final->m_pkthdr.len;
2377 * Move data from uio into mbufs.
2380 m_uiomove(struct uio *uio)
2382 struct mbuf *m; /* current working mbuf */
2383 struct mbuf *head = NULL; /* result mbuf chain */
2384 struct mbuf **mp = &head;
2385 int flags = M_PKTHDR;
2391 if (uio->uio_resid > INT_MAX)
2394 resid = (int)uio->uio_resid;
2395 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
2397 m->m_pkthdr.len = 0;
2398 /* Leave room for protocol headers. */
2403 m->m_len = imin(nsize, resid);
2404 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
2411 head->m_pkthdr.len += m->m_len;
2412 } while (uio->uio_resid > 0);
2422 m_last(struct mbuf *m)
2430 * Return the number of bytes in an mbuf chain.
2431 * If lastm is not NULL, also return the last mbuf.
2434 m_lengthm(struct mbuf *m, struct mbuf **lastm)
2437 struct mbuf *prev = m;
2450 * Like m_lengthm(), except also keep track of mbuf usage.
2453 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2455 u_int len = 0, mbcnt = 0;
2456 struct mbuf *prev = m;
2461 if (m->m_flags & M_EXT)
2462 mbcnt += m->m_ext.ext_size;