4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
65 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68 #include "opt_param.h"
69 #include "opt_mbuf_stress_test.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
73 #include <sys/malloc.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/domain.h>
78 #include <sys/objcache.h>
80 #include <sys/protosw.h>
82 #include <sys/thread.h>
83 #include <sys/globaldata.h>
85 #include <sys/thread2.h>
86 #include <sys/spinlock2.h>
88 #include <machine/atomic.h>
89 #include <machine/limits.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
96 #include <machine/cpu.h>
100 * mbuf cluster meta-data
108 * mbuf tracking for debugging purposes
112 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
115 RB_HEAD(mbuf_rb_tree, mbtrack);
116 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
119 RB_ENTRY(mbtrack) rb_node;
125 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
134 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
136 struct mbuf_rb_tree mbuf_track_root;
137 static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin, "mbuf_track_spin");
140 mbuftrack(struct mbuf *m)
144 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
145 spin_lock(&mbuf_track_spin);
147 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
148 spin_unlock(&mbuf_track_spin);
149 panic("mbuftrack: mbuf %p already being tracked", m);
151 spin_unlock(&mbuf_track_spin);
155 mbufuntrack(struct mbuf *m)
159 spin_lock(&mbuf_track_spin);
160 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
162 spin_unlock(&mbuf_track_spin);
163 panic("mbufuntrack: mbuf %p was not tracked", m);
165 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
166 spin_unlock(&mbuf_track_spin);
167 kfree(mbt, M_MTRACK);
172 mbuftrackid(struct mbuf *m, int trackid)
177 spin_lock(&mbuf_track_spin);
181 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
183 spin_unlock(&mbuf_track_spin);
184 panic("mbuftrackid: mbuf %p not tracked", m);
186 mbt->trackid = trackid;
191 spin_unlock(&mbuf_track_spin);
195 mbuftrack_callback(struct mbtrack *mbt, void *arg)
197 struct sysctl_req *req = arg;
201 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
203 spin_unlock(&mbuf_track_spin);
204 error = SYSCTL_OUT(req, buf, strlen(buf));
205 spin_lock(&mbuf_track_spin);
212 mbuftrack_show(SYSCTL_HANDLER_ARGS)
216 spin_lock(&mbuf_track_spin);
217 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
218 mbuftrack_callback, req);
219 spin_unlock(&mbuf_track_spin);
222 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
223 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
228 #define mbufuntrack(m)
232 static void mbinit(void *);
233 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL);
235 struct mbtypes_stat {
236 u_long stats[MT_NTYPES];
239 static struct mbtypes_stat mbtypes[SMP_MAXCPU];
241 static struct mbstat mbstat[SMP_MAXCPU] __cachealign;
250 #ifdef MBUF_STRESS_TEST
251 int m_defragrandomfailures;
254 struct objcache *mbuf_cache, *mbufphdr_cache;
255 struct objcache *mclmeta_cache, *mjclmeta_cache;
256 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
257 struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache;
259 struct lock mbupdate_lk = LOCK_INITIALIZER("mbupdate", 0, 0);
262 static int nmbjclusters;
265 static int mjclph_cachefrac;
266 static int mjcl_cachefrac;
267 static int mclph_cachefrac;
268 static int mcl_cachefrac;
270 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
271 &max_linkhdr, 0, "Max size of a link-level header");
272 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
273 &max_protohdr, 0, "Max size of a protocol header");
274 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0,
275 "Max size of link+protocol headers");
276 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
277 &max_datalen, 0, "Max data payload size without headers");
278 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
279 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations");
280 static int do_mbstat(SYSCTL_HANDLER_ARGS);
282 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
283 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics");
285 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
287 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
288 0, 0, do_mbtypes, "LU", "");
291 do_mbstat(SYSCTL_HANDLER_ARGS)
293 struct mbstat mbstat_total;
294 struct mbstat *mbstat_totalp;
297 bzero(&mbstat_total, sizeof(mbstat_total));
298 mbstat_totalp = &mbstat_total;
300 for (i = 0; i < ncpus; i++)
302 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
303 mbstat_total.m_clusters += mbstat[i].m_clusters;
304 mbstat_total.m_jclusters += mbstat[i].m_jclusters;
305 mbstat_total.m_clfree += mbstat[i].m_clfree;
306 mbstat_total.m_drops += mbstat[i].m_drops;
307 mbstat_total.m_wait += mbstat[i].m_wait;
308 mbstat_total.m_drain += mbstat[i].m_drain;
309 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
310 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
314 * The following fields are not cumulative fields so just
315 * get their values once.
317 mbstat_total.m_msize = mbstat[0].m_msize;
318 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
319 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
320 mbstat_total.m_mlen = mbstat[0].m_mlen;
321 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
323 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
327 do_mbtypes(SYSCTL_HANDLER_ARGS)
329 u_long totals[MT_NTYPES];
332 for (i = 0; i < MT_NTYPES; i++)
335 for (i = 0; i < ncpus; i++)
337 for (j = 0; j < MT_NTYPES; j++)
338 totals[j] += mbtypes[i].stats[j];
341 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
345 * The variables may be set as boot-time tunables or live. Setting these
346 * values too low can deadlock your network. Network interfaces may also
347 * adjust nmbclusters and/or nmbjclusters to account for preloading the
350 static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS);
351 static int sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS);
352 static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS);
353 SYSCTL_PROC(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLTYPE_INT | CTLFLAG_RW,
354 0, 0, sysctl_nmbclusters, "I",
355 "Maximum number of mbuf clusters available");
356 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjclusters, CTLTYPE_INT | CTLFLAG_RW,
357 0, 0, sysctl_nmbjclusters, "I",
358 "Maximum number of mbuf jclusters available");
359 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT | CTLFLAG_RW,
360 0, 0, sysctl_nmbufs, "I",
361 "Maximum number of mbufs available");
363 SYSCTL_INT(_kern_ipc, OID_AUTO, mjclph_cachefrac, CTLFLAG_RD,
364 &mjclph_cachefrac, 0,
365 "Fraction of cacheable mbuf jclusters w/ pkthdr");
366 SYSCTL_INT(_kern_ipc, OID_AUTO, mjcl_cachefrac, CTLFLAG_RD,
368 "Fraction of cacheable mbuf jclusters");
369 SYSCTL_INT(_kern_ipc, OID_AUTO, mclph_cachefrac, CTLFLAG_RD,
371 "Fraction of cacheable mbuf clusters w/ pkthdr");
372 SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_cachefrac, CTLFLAG_RD,
373 &mcl_cachefrac, 0, "Fraction of cacheable mbuf clusters");
375 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
376 &m_defragpackets, 0, "Number of defragment packets");
377 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
378 &m_defragbytes, 0, "Number of defragment bytes");
379 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
380 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations");
381 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
382 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations");
383 #ifdef MBUF_STRESS_TEST
384 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
385 &m_defragrandomfailures, 0, "");
388 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
389 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
390 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
392 static void m_reclaim (void);
393 static void m_mclref(void *arg);
394 static void m_mclfree(void *arg);
395 static void m_mjclfree(void *arg);
397 static void mbupdatelimits(void);
400 * NOTE: Default NMBUFS must take into account a possible DOS attack
401 * using fd passing on unix domain sockets.
404 #define NMBCLUSTERS (512 + maxusers * 16)
406 #ifndef MJCLPH_CACHEFRAC
407 #define MJCLPH_CACHEFRAC 16
409 #ifndef MJCL_CACHEFRAC
410 #define MJCL_CACHEFRAC 4
412 #ifndef MCLPH_CACHEFRAC
413 #define MCLPH_CACHEFRAC 16
415 #ifndef MCL_CACHEFRAC
416 #define MCL_CACHEFRAC 4
419 #define NMBJCLUSTERS (NMBCLUSTERS / 2)
422 #define NMBUFS (nmbclusters * 2 + maxfiles)
425 #define NMBCLUSTERS_MIN (NMBCLUSTERS / 2)
426 #define NMBJCLUSTERS_MIN (NMBJCLUSTERS / 2)
427 #define NMBUFS_MIN ((NMBCLUSTERS * 2 + maxfiles) / 2)
430 * Perform sanity checks of tunables declared above.
433 tunable_mbinit(void *dummy)
436 * This has to be done before VM init.
438 nmbclusters = NMBCLUSTERS;
439 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
440 mjclph_cachefrac = MJCLPH_CACHEFRAC;
441 TUNABLE_INT_FETCH("kern.ipc.mjclph_cachefrac", &mjclph_cachefrac);
442 mjcl_cachefrac = MJCL_CACHEFRAC;
443 TUNABLE_INT_FETCH("kern.ipc.mjcl_cachefrac", &mjcl_cachefrac);
444 mclph_cachefrac = MCLPH_CACHEFRAC;
445 TUNABLE_INT_FETCH("kern.ipc.mclph_cachefrac", &mclph_cachefrac);
446 mcl_cachefrac = MCL_CACHEFRAC;
447 TUNABLE_INT_FETCH("kern.ipc.mcl_cachefrac", &mcl_cachefrac);
450 * WARNING! each mcl cache feeds two mbuf caches, so the minimum
451 * cachefrac is 2. For safety, use 3.
453 if (mjclph_cachefrac < 3)
454 mjclph_cachefrac = 3;
455 if (mjcl_cachefrac < 3)
457 if (mclph_cachefrac < 3)
459 if (mcl_cachefrac < 3)
462 nmbjclusters = NMBJCLUSTERS;
463 TUNABLE_INT_FETCH("kern.ipc.nmbjclusters", &nmbjclusters);
466 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
469 if (nmbufs < nmbclusters * 2)
470 nmbufs = nmbclusters * 2;
472 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
473 tunable_mbinit, NULL);
476 * Sysctl support to update nmbclusters, nmbjclusters, and nmbufs.
479 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
485 error = sysctl_handle_int(oidp, &value, 0, req);
486 if (error || req->newptr == NULL)
489 if (value < NMBCLUSTERS_MIN)
492 lockmgr(&mbupdate_lk, LK_EXCLUSIVE);
493 if (nmbclusters != value) {
497 lockmgr(&mbupdate_lk, LK_RELEASE);
502 sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS)
507 value = nmbjclusters;
508 error = sysctl_handle_int(oidp, &value, 0, req);
509 if (error || req->newptr == NULL)
512 if (value < NMBJCLUSTERS_MIN)
515 lockmgr(&mbupdate_lk, LK_EXCLUSIVE);
516 if (nmbjclusters != value) {
517 nmbjclusters = value;
520 lockmgr(&mbupdate_lk, LK_RELEASE);
525 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
531 error = sysctl_handle_int(oidp, &value, 0, req);
532 if (error || req->newptr == NULL)
535 if (value < NMBUFS_MIN)
538 lockmgr(&mbupdate_lk, LK_EXCLUSIVE);
539 if (nmbufs != value) {
543 lockmgr(&mbupdate_lk, LK_RELEASE);
547 /* "number of clusters of pages" */
553 * The mbuf object cache only guarantees that m_next and m_nextpkt are
554 * NULL and that m_data points to the beginning of the data area. In
555 * particular, m_len and m_pkthdr.len are uninitialized. It is the
556 * responsibility of the caller to initialize those fields before use.
558 static __inline boolean_t
559 mbuf_ctor(void *obj, void *private, int ocflags)
561 struct mbuf *m = obj;
565 m->m_data = m->m_dat;
572 * Initialize the mbuf and the packet header fields.
575 mbufphdr_ctor(void *obj, void *private, int ocflags)
577 struct mbuf *m = obj;
581 m->m_data = m->m_pktdat;
582 m->m_flags = M_PKTHDR | M_PHCACHE;
584 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
585 SLIST_INIT(&m->m_pkthdr.tags);
586 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
587 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
593 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
596 mclmeta_ctor(void *obj, void *private, int ocflags)
598 struct mbcluster *cl = obj;
601 if (ocflags & M_NOWAIT)
602 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
604 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
613 mjclmeta_ctor(void *obj, void *private, int ocflags)
615 struct mbcluster *cl = obj;
618 if (ocflags & M_NOWAIT)
619 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO);
621 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO);
630 mclmeta_dtor(void *obj, void *private)
632 struct mbcluster *mcl = obj;
634 KKASSERT(mcl->mcl_refs == 0);
635 kfree(mcl->mcl_data, M_MBUFCL);
639 linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
642 * Add the cluster to the mbuf. The caller will detect that the
643 * mbuf now has an attached cluster.
645 m->m_ext.ext_arg = cl;
646 m->m_ext.ext_buf = cl->mcl_data;
647 m->m_ext.ext_ref = m_mclref;
648 if (size != MCLBYTES)
649 m->m_ext.ext_free = m_mjclfree;
651 m->m_ext.ext_free = m_mclfree;
652 m->m_ext.ext_size = size;
653 atomic_add_int(&cl->mcl_refs, 1);
655 m->m_data = m->m_ext.ext_buf;
656 m->m_flags |= M_EXT | M_EXT_CLUSTER;
660 linkcluster(struct mbuf *m, struct mbcluster *cl)
662 linkjcluster(m, cl, MCLBYTES);
666 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
668 struct mbuf *m = obj;
669 struct mbcluster *cl;
671 mbufphdr_ctor(obj, private, ocflags);
672 cl = objcache_get(mclmeta_cache, ocflags);
674 ++mbstat[mycpu->gd_cpuid].m_drops;
677 m->m_flags |= M_CLCACHE;
683 mbufphdrjcluster_ctor(void *obj, void *private, int ocflags)
685 struct mbuf *m = obj;
686 struct mbcluster *cl;
688 mbufphdr_ctor(obj, private, ocflags);
689 cl = objcache_get(mjclmeta_cache, ocflags);
691 ++mbstat[mycpu->gd_cpuid].m_drops;
694 m->m_flags |= M_CLCACHE;
695 linkjcluster(m, cl, MJUMPAGESIZE);
700 mbufcluster_ctor(void *obj, void *private, int ocflags)
702 struct mbuf *m = obj;
703 struct mbcluster *cl;
705 mbuf_ctor(obj, private, ocflags);
706 cl = objcache_get(mclmeta_cache, ocflags);
708 ++mbstat[mycpu->gd_cpuid].m_drops;
711 m->m_flags |= M_CLCACHE;
717 mbufjcluster_ctor(void *obj, void *private, int ocflags)
719 struct mbuf *m = obj;
720 struct mbcluster *cl;
722 mbuf_ctor(obj, private, ocflags);
723 cl = objcache_get(mjclmeta_cache, ocflags);
725 ++mbstat[mycpu->gd_cpuid].m_drops;
728 m->m_flags |= M_CLCACHE;
729 linkjcluster(m, cl, MJUMPAGESIZE);
734 * Used for both the cluster and cluster PHDR caches.
736 * The mbuf may have lost its cluster due to sharing, deal
737 * with the situation by checking M_EXT.
740 mbufcluster_dtor(void *obj, void *private)
742 struct mbuf *m = obj;
743 struct mbcluster *mcl;
745 if (m->m_flags & M_EXT) {
746 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
747 mcl = m->m_ext.ext_arg;
748 KKASSERT(mcl->mcl_refs == 1);
750 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES)
751 objcache_put(mjclmeta_cache, mcl);
753 objcache_put(mclmeta_cache, mcl);
757 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
758 struct objcache_malloc_args mclmeta_malloc_args =
759 { sizeof(struct mbcluster), M_MCLMETA };
765 int mb_limit, cl_limit, ncl_limit, jcl_limit;
770 * Initialize statistics
772 for (i = 0; i < ncpus; i++) {
773 mbstat[i].m_msize = MSIZE;
774 mbstat[i].m_mclbytes = MCLBYTES;
775 mbstat[i].m_mjumpagesize = MJUMPAGESIZE;
776 mbstat[i].m_minclsize = MINCLSIZE;
777 mbstat[i].m_mlen = MLEN;
778 mbstat[i].m_mhlen = MHLEN;
782 * Create object caches and save cluster limits, which will
783 * be used to adjust backing kmalloc pools' limit later.
786 mb_limit = cl_limit = 0;
789 mbuf_cache = objcache_create("mbuf",
791 mbuf_ctor, NULL, NULL,
792 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
796 mbufphdr_cache = objcache_create("mbuf pkt hdr",
798 mbufphdr_ctor, NULL, NULL,
799 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
802 ncl_limit = nmbclusters;
803 mclmeta_cache = objcache_create("cluster mbuf",
804 ncl_limit, nmbclusters / 4,
805 mclmeta_ctor, mclmeta_dtor, NULL,
806 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
807 cl_limit += ncl_limit;
809 jcl_limit = nmbjclusters;
810 mjclmeta_cache = objcache_create("jcluster mbuf",
811 jcl_limit, nmbjclusters / 4,
812 mjclmeta_ctor, mclmeta_dtor, NULL,
813 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
814 cl_limit += jcl_limit;
817 mbufcluster_cache = objcache_create("mbuf + cluster",
818 limit, nmbclusters / mcl_cachefrac,
819 mbufcluster_ctor, mbufcluster_dtor, NULL,
820 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
824 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
825 limit, nmbclusters / mclph_cachefrac,
826 mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
827 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
830 limit = nmbjclusters;
831 mbufjcluster_cache = objcache_create("mbuf + jcluster",
832 limit, nmbjclusters / mjcl_cachefrac,
833 mbufjcluster_ctor, mbufcluster_dtor, NULL,
834 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
837 limit = nmbjclusters;
838 mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster",
839 limit, nmbjclusters / mjclph_cachefrac,
840 mbufphdrjcluster_ctor, mbufcluster_dtor, NULL,
841 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
845 * Adjust backing kmalloc pools' limit
847 * NOTE: We raise the limit by another 1/8 to take the effect
848 * of loosememuse into account.
850 cl_limit += cl_limit / 8;
851 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
852 mclmeta_malloc_args.objsize * (size_t)cl_limit);
853 kmalloc_raise_limit(M_MBUFCL,
854 (MCLBYTES * (size_t)ncl_limit) +
855 (MJUMPAGESIZE * (size_t)jcl_limit));
857 mb_limit += mb_limit / 8;
858 kmalloc_raise_limit(mbuf_malloc_args.mtype,
859 mbuf_malloc_args.objsize * (size_t)mb_limit);
863 * Adjust mbuf limits after changes have been made
865 * Caller must hold mbupdate_lk
870 int mb_limit, cl_limit, ncl_limit, jcl_limit;
873 KASSERT(lockstatus(&mbupdate_lk, curthread) != 0,
874 ("mbupdate_lk is not held"));
877 * Figure out adjustments to object caches after nmbufs, nmbclusters,
878 * or nmbjclusters has been modified.
880 mb_limit = cl_limit = 0;
883 objcache_set_cluster_limit(mbuf_cache, limit);
887 objcache_set_cluster_limit(mbufphdr_cache, limit);
890 ncl_limit = nmbclusters;
891 objcache_set_cluster_limit(mclmeta_cache, ncl_limit);
892 cl_limit += ncl_limit;
894 jcl_limit = nmbjclusters;
895 objcache_set_cluster_limit(mjclmeta_cache, jcl_limit);
896 cl_limit += jcl_limit;
899 objcache_set_cluster_limit(mbufcluster_cache, limit);
903 objcache_set_cluster_limit(mbufphdrcluster_cache, limit);
906 limit = nmbjclusters;
907 objcache_set_cluster_limit(mbufjcluster_cache, limit);
910 limit = nmbjclusters;
911 objcache_set_cluster_limit(mbufphdrjcluster_cache, limit);
915 * Adjust backing kmalloc pools' limit
917 * NOTE: We raise the limit by another 1/8 to take the effect
918 * of loosememuse into account.
920 cl_limit += cl_limit / 8;
921 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
922 mclmeta_malloc_args.objsize * (size_t)cl_limit);
923 kmalloc_raise_limit(M_MBUFCL,
924 (MCLBYTES * (size_t)ncl_limit) +
925 (MJUMPAGESIZE * (size_t)jcl_limit));
926 mb_limit += mb_limit / 8;
927 kmalloc_raise_limit(mbuf_malloc_args.mtype,
928 mbuf_malloc_args.objsize * (size_t)mb_limit);
932 * Return the number of references to this mbuf's data. 0 is returned
933 * if the mbuf is not M_EXT, a reference count is returned if it is
934 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
937 m_sharecount(struct mbuf *m)
939 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
944 case M_EXT | M_EXT_CLUSTER:
945 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
948 return (0); /* to shut up compiler */
952 * change mbuf to new type
955 m_chtype(struct mbuf *m, int type)
957 struct globaldata *gd = mycpu;
959 ++mbtypes[gd->gd_cpuid].stats[type];
960 --mbtypes[gd->gd_cpuid].stats[m->m_type];
970 kprintf("Debug: m_reclaim() called\n");
972 SLIST_FOREACH(dp, &domains, dom_next) {
973 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
978 ++mbstat[mycpu->gd_cpuid].m_drain;
982 updatestats(struct mbuf *m, int type)
984 struct globaldata *gd = mycpu;
989 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
990 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
993 ++mbtypes[gd->gd_cpuid].stats[type];
994 ++mbstat[gd->gd_cpuid].m_mbufs;
1002 m_get(int how, int type)
1006 int ocf = MB_OCFLAG(how);
1010 m = objcache_get(mbuf_cache, ocf);
1013 if ((ocf & M_WAITOK) && ntries++ == 0) {
1014 struct objcache *reclaimlist[] = {
1017 mbufphdrcluster_cache,
1019 mbufphdrjcluster_cache
1021 const int nreclaims = NELEM(reclaimlist);
1023 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
1027 ++mbstat[mycpu->gd_cpuid].m_drops;
1031 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
1035 updatestats(m, type);
1040 m_gethdr(int how, int type)
1043 int ocf = MB_OCFLAG(how);
1048 m = objcache_get(mbufphdr_cache, ocf);
1051 if ((ocf & M_WAITOK) && ntries++ == 0) {
1052 struct objcache *reclaimlist[] = {
1054 mbufcluster_cache, mbufphdrcluster_cache,
1055 mbufjcluster_cache, mbufphdrjcluster_cache
1057 const int nreclaims = NELEM(reclaimlist);
1059 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
1063 ++mbstat[mycpu->gd_cpuid].m_drops;
1067 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
1070 m->m_pkthdr.len = 0;
1072 updatestats(m, type);
1077 * Get a mbuf (not a mbuf cluster!) and zero it.
1081 m_getclr(int how, int type)
1085 m = m_get(how, type);
1087 bzero(m->m_data, MLEN);
1091 static struct mbuf *
1092 m_getcl_cache(int how, short type, int flags, struct objcache *mbclc,
1093 struct objcache *mbphclc, u_long *cl_stats)
1095 struct mbuf *m = NULL;
1096 int ocflags = MB_OCFLAG(how);
1101 if (flags & M_PKTHDR)
1102 m = objcache_get(mbphclc, ocflags);
1104 m = objcache_get(mbclc, ocflags);
1107 if ((ocflags & M_WAITOK) && ntries++ == 0) {
1108 struct objcache *reclaimlist[1];
1110 if (flags & M_PKTHDR)
1111 reclaimlist[0] = mbclc;
1113 reclaimlist[0] = mbphclc;
1114 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
1118 ++mbstat[mycpu->gd_cpuid].m_drops;
1123 KASSERT(m->m_data == m->m_ext.ext_buf,
1124 ("mbuf %p: bad m_data in get", m));
1128 m->m_pkthdr.len = 0; /* just do it unconditonally */
1132 ++mbtypes[mycpu->gd_cpuid].stats[type];
1138 m_getjcl(int how, short type, int flags, size_t size)
1140 struct objcache *mbclc, *mbphclc;
1145 mbclc = mbufcluster_cache;
1146 mbphclc = mbufphdrcluster_cache;
1147 cl_stats = &mbstat[mycpu->gd_cpuid].m_clusters;
1151 mbclc = mbufjcluster_cache;
1152 mbphclc = mbufphdrjcluster_cache;
1153 cl_stats = &mbstat[mycpu->gd_cpuid].m_jclusters;
1156 return m_getcl_cache(how, type, flags, mbclc, mbphclc, cl_stats);
1160 * Returns an mbuf with an attached cluster.
1161 * Because many network drivers use this kind of buffers a lot, it is
1162 * convenient to keep a small pool of free buffers of this kind.
1163 * Even a small size such as 10 gives about 10% improvement in the
1164 * forwarding rate in a bridge or router.
1167 m_getcl(int how, short type, int flags)
1169 return m_getcl_cache(how, type, flags,
1170 mbufcluster_cache, mbufphdrcluster_cache,
1171 &mbstat[mycpu->gd_cpuid].m_clusters);
1175 * Allocate chain of requested length.
1178 m_getc(int len, int how, int type)
1180 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
1184 n = m_getl(len, how, type, 0, &nsize);
1200 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
1201 * and return a pointer to the head of the allocated chain. If m0 is
1202 * non-null, then we assume that it is a single mbuf or an mbuf chain to
1203 * which we want len bytes worth of mbufs and/or clusters attached, and so
1204 * if we succeed in allocating it, we will just return a pointer to m0.
1206 * If we happen to fail at any point during the allocation, we will free
1207 * up everything we have already allocated and return NULL.
1209 * Deprecated. Use m_getc() and m_cat() instead.
1212 m_getm(struct mbuf *m0, int len, int type, int how)
1214 struct mbuf *nfirst;
1216 nfirst = m_getc(len, how, type);
1219 m_last(m0)->m_next = nfirst;
1227 * Adds a cluster to a normal mbuf, M_EXT is set on success.
1228 * Deprecated. Use m_getcl() instead.
1231 m_mclget(struct mbuf *m, int how)
1233 struct mbcluster *mcl;
1235 KKASSERT((m->m_flags & M_EXT) == 0);
1236 mcl = objcache_get(mclmeta_cache, MB_OCFLAG(how));
1238 linkcluster(m, mcl);
1239 ++mbstat[mycpu->gd_cpuid].m_clusters;
1241 ++mbstat[mycpu->gd_cpuid].m_drops;
1246 * Updates to mbcluster must be MPSAFE. Only an entity which already has
1247 * a reference to the cluster can ref it, so we are in no danger of
1248 * racing an add with a subtract. But the operation must still be atomic
1249 * since multiple entities may have a reference on the cluster.
1251 * m_mclfree() is almost the same but it must contend with two entities
1252 * freeing the cluster at the same time.
1257 struct mbcluster *mcl = arg;
1259 atomic_add_int(&mcl->mcl_refs, 1);
1263 * When dereferencing a cluster we have to deal with a N->0 race, where
1264 * N entities free their references simultaniously. To do this we use
1265 * atomic_fetchadd_int().
1268 m_mclfree(void *arg)
1270 struct mbcluster *mcl = arg;
1272 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) {
1273 --mbstat[mycpu->gd_cpuid].m_clusters;
1274 objcache_put(mclmeta_cache, mcl);
1279 m_mjclfree(void *arg)
1281 struct mbcluster *mcl = arg;
1283 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1) {
1284 --mbstat[mycpu->gd_cpuid].m_jclusters;
1285 objcache_put(mjclmeta_cache, mcl);
1290 * Free a single mbuf and any associated external storage. The successor,
1291 * if any, is returned.
1293 * We do need to check non-first mbuf for m_aux, since some of existing
1294 * code does not call M_PREPEND properly.
1295 * (example: call to bpf_mtap from drivers)
1301 _m_free(struct mbuf *m, const char *func)
1306 m_free(struct mbuf *m)
1311 struct globaldata *gd = mycpu;
1313 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
1314 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
1315 --mbtypes[gd->gd_cpuid].stats[m->m_type];
1320 * Make sure the mbuf is in constructed state before returning it
1326 m->m_hdr.mh_lastfunc = func;
1329 KKASSERT(m->m_nextpkt == NULL);
1331 if (m->m_nextpkt != NULL) {
1332 static int afewtimes = 10;
1334 if (afewtimes-- > 0) {
1335 kprintf("mfree: m->m_nextpkt != NULL\n");
1336 print_backtrace(-1);
1338 m->m_nextpkt = NULL;
1341 if (m->m_flags & M_PKTHDR) {
1342 m_tag_delete_chain(m); /* eliminate XXX JH */
1345 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
1348 * Clean the M_PKTHDR state so we can return the mbuf to its original
1349 * cache. This is based on the PHCACHE flag which tells us whether
1350 * the mbuf was originally allocated out of a packet-header cache
1351 * or a non-packet-header cache.
1353 if (m->m_flags & M_PHCACHE) {
1354 m->m_flags |= M_PKTHDR;
1355 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
1356 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
1357 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
1358 SLIST_INIT(&m->m_pkthdr.tags);
1362 * Handle remaining flags combinations. M_CLCACHE tells us whether
1363 * the mbuf was originally allocated from a cluster cache or not,
1364 * and is totally separate from whether the mbuf is currently
1365 * associated with a cluster.
1367 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1368 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1370 * mbuf+cluster cache case. The mbuf was allocated from the
1371 * combined mbuf_cluster cache and can be returned to the
1372 * cache if the cluster hasn't been shared.
1374 if (m_sharecount(m) == 1) {
1376 * The cluster has not been shared, we can just
1377 * reset the data pointer and return the mbuf
1378 * to the cluster cache. Note that the reference
1379 * count is left intact (it is still associated with
1382 m->m_data = m->m_ext.ext_buf;
1383 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) {
1384 if (m->m_flags & M_PHCACHE)
1385 objcache_put(mbufphdrjcluster_cache, m);
1387 objcache_put(mbufjcluster_cache, m);
1388 --mbstat[mycpu->gd_cpuid].m_jclusters;
1390 if (m->m_flags & M_PHCACHE)
1391 objcache_put(mbufphdrcluster_cache, m);
1393 objcache_put(mbufcluster_cache, m);
1394 --mbstat[mycpu->gd_cpuid].m_clusters;
1398 * Hell. Someone else has a ref on this cluster,
1399 * we have to disconnect it which means we can't
1400 * put it back into the mbufcluster_cache, we
1401 * have to destroy the mbuf.
1403 * Other mbuf references to the cluster will typically
1404 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1406 * XXX we could try to connect another cluster to
1409 m->m_ext.ext_free(m->m_ext.ext_arg);
1410 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1411 if (m->m_ext.ext_size == MCLBYTES) {
1412 if (m->m_flags & M_PHCACHE)
1413 objcache_dtor(mbufphdrcluster_cache, m);
1415 objcache_dtor(mbufcluster_cache, m);
1417 if (m->m_flags & M_PHCACHE)
1418 objcache_dtor(mbufphdrjcluster_cache, m);
1420 objcache_dtor(mbufjcluster_cache, m);
1424 case M_EXT | M_EXT_CLUSTER:
1427 * Normal cluster association case, disconnect the cluster from
1428 * the mbuf. The cluster may or may not be custom.
1430 m->m_ext.ext_free(m->m_ext.ext_arg);
1431 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1435 * return the mbuf to the mbuf cache.
1437 if (m->m_flags & M_PHCACHE) {
1438 m->m_data = m->m_pktdat;
1439 objcache_put(mbufphdr_cache, m);
1441 m->m_data = m->m_dat;
1442 objcache_put(mbuf_cache, m);
1444 --mbstat[mycpu->gd_cpuid].m_mbufs;
1448 panic("bad mbuf flags %p %08x", m, m->m_flags);
1457 _m_freem(struct mbuf *m, const char *func)
1460 m = _m_free(m, func);
1466 m_freem(struct mbuf *m)
1475 m_extadd(struct mbuf *m, caddr_t buf, u_int size, void (*reff)(void *),
1476 void (*freef)(void *), void *arg)
1478 m->m_ext.ext_arg = arg;
1479 m->m_ext.ext_buf = buf;
1480 m->m_ext.ext_ref = reff;
1481 m->m_ext.ext_free = freef;
1482 m->m_ext.ext_size = size;
1485 m->m_flags |= M_EXT;
1489 * mbuf utility routines
1493 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1497 m_prepend(struct mbuf *m, int len, int how)
1501 if (m->m_flags & M_PKTHDR)
1502 mn = m_gethdr(how, m->m_type);
1504 mn = m_get(how, m->m_type);
1509 if (m->m_flags & M_PKTHDR)
1510 M_MOVE_PKTHDR(mn, m);
1520 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1521 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1522 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
1523 * Note that the copy is read-only, because clusters are not copied,
1524 * only their reference counts are incremented.
1527 m_copym(const struct mbuf *m, int off0, int len, int wait)
1529 struct mbuf *n, **np;
1534 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1535 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1536 if (off == 0 && (m->m_flags & M_PKTHDR))
1539 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1549 KASSERT(len == M_COPYALL,
1550 ("m_copym, length > size of mbuf chain"));
1554 * Because we are sharing any cluster attachment below,
1555 * be sure to get an mbuf that does not have a cluster
1556 * associated with it.
1559 n = m_gethdr(wait, m->m_type);
1561 n = m_get(wait, m->m_type);
1566 if (!m_dup_pkthdr(n, m, wait))
1568 if (len == M_COPYALL)
1569 n->m_pkthdr.len -= off0;
1571 n->m_pkthdr.len = len;
1574 n->m_len = min(len, m->m_len - off);
1575 if (m->m_flags & M_EXT) {
1576 KKASSERT((n->m_flags & M_EXT) == 0);
1577 n->m_data = m->m_data + off;
1578 m->m_ext.ext_ref(m->m_ext.ext_arg);
1579 n->m_ext = m->m_ext;
1580 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1582 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1583 (unsigned)n->m_len);
1585 if (len != M_COPYALL)
1592 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1596 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1601 * Copy an entire packet, including header (which must be present).
1602 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1603 * Note that the copy is read-only, because clusters are not copied,
1604 * only their reference counts are incremented.
1605 * Preserve alignment of the first mbuf so if the creator has left
1606 * some room at the beginning (e.g. for inserting protocol headers)
1607 * the copies also have the room available.
1610 m_copypacket(struct mbuf *m, int how)
1612 struct mbuf *top, *n, *o;
1614 n = m_gethdr(how, m->m_type);
1619 if (!m_dup_pkthdr(n, m, how))
1621 n->m_len = m->m_len;
1622 if (m->m_flags & M_EXT) {
1623 KKASSERT((n->m_flags & M_EXT) == 0);
1624 n->m_data = m->m_data;
1625 m->m_ext.ext_ref(m->m_ext.ext_arg);
1626 n->m_ext = m->m_ext;
1627 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1629 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1630 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1635 o = m_get(how, m->m_type);
1642 n->m_len = m->m_len;
1643 if (m->m_flags & M_EXT) {
1644 KKASSERT((n->m_flags & M_EXT) == 0);
1645 n->m_data = m->m_data;
1646 m->m_ext.ext_ref(m->m_ext.ext_arg);
1647 n->m_ext = m->m_ext;
1648 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1650 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1658 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1663 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1664 * continuing for "len" bytes, into the indicated buffer.
1667 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1671 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1672 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1674 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1681 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1682 count = min(m->m_len - off, len);
1683 bcopy(mtod(m, caddr_t) + off, cp, count);
1692 * Copy a packet header mbuf chain into a completely new chain, including
1693 * copying any mbuf clusters. Use this instead of m_copypacket() when
1694 * you need a writable copy of an mbuf chain.
1697 m_dup(struct mbuf *m, int how)
1699 struct mbuf **p, *top = NULL;
1700 int remain, moff, nsize;
1705 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1707 /* While there's more data, get a new mbuf, tack it on, and fill it */
1708 remain = m->m_pkthdr.len;
1711 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1714 /* Get the next new mbuf */
1715 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1720 if (!m_dup_pkthdr(n, m, how))
1723 /* Link it into the new chain */
1727 /* Copy data from original mbuf(s) into new mbuf */
1729 while (n->m_len < nsize && m != NULL) {
1730 int chunk = min(nsize - n->m_len, m->m_len - moff);
1732 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1736 if (moff == m->m_len) {
1742 /* Check correct total mbuf length */
1743 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1744 ("%s: bogus m_pkthdr.len", __func__));
1751 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1756 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1757 * copying any mbuf clusters. This is typically used to realign a data
1758 * chain by nfs_realign().
1760 * The original chain is left intact. how should be M_WAITOK or M_NOWAIT
1761 * and NULL can be returned if M_NOWAIT is passed.
1763 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1764 * cluster mbufs can exhaust our supply of mbufs.
1767 m_dup_data(struct mbuf *m, int how)
1769 struct mbuf **p, *n, *top = NULL;
1770 int mlen, moff, chunk, gsize, nsize;
1779 * Optimize the mbuf allocation but do not get too carried away.
1781 if (m->m_next || m->m_len > MLEN)
1782 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES)
1785 gsize = MJUMPAGESIZE;
1795 * Scan the mbuf chain until nothing is left, the new mbuf chain
1796 * will be allocated on the fly as needed.
1803 KKASSERT(m->m_type == MT_DATA);
1805 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1812 chunk = imin(mlen, nsize);
1813 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1828 ++mbstat[mycpu->gd_cpuid].m_mcfail;
1833 * Concatenate mbuf chain n to m.
1834 * Both chains must be of the same type (e.g. MT_DATA).
1835 * Any m_pkthdr is not updated.
1838 m_cat(struct mbuf *m, struct mbuf *n)
1842 if (m->m_flags & M_EXT ||
1843 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1844 /* just join the two chains */
1848 /* splat the data from one into the other */
1849 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1851 m->m_len += n->m_len;
1857 m_adj(struct mbuf *mp, int req_len)
1863 if ((m = mp) == NULL)
1869 while (m != NULL && len > 0) {
1870 if (m->m_len <= len) {
1881 if (mp->m_flags & M_PKTHDR)
1882 m->m_pkthdr.len -= (req_len - len);
1885 * Trim from tail. Scan the mbuf chain,
1886 * calculating its length and finding the last mbuf.
1887 * If the adjustment only affects this mbuf, then just
1888 * adjust and return. Otherwise, rescan and truncate
1889 * after the remaining size.
1895 if (m->m_next == NULL)
1899 if (m->m_len >= len) {
1901 if (mp->m_flags & M_PKTHDR)
1902 mp->m_pkthdr.len -= len;
1909 * Correct length for chain is "count".
1910 * Find the mbuf with last data, adjust its length,
1911 * and toss data from remaining mbufs on chain.
1914 if (m->m_flags & M_PKTHDR)
1915 m->m_pkthdr.len = count;
1916 for (; m; m = m->m_next) {
1917 if (m->m_len >= count) {
1924 (m = m->m_next) ->m_len = 0;
1929 * Set the m_data pointer of a newly-allocated mbuf
1930 * to place an object of the specified size at the
1931 * end of the mbuf, longword aligned.
1934 m_align(struct mbuf *m, int len)
1938 if (m->m_flags & M_EXT)
1939 adjust = m->m_ext.ext_size - len;
1940 else if (m->m_flags & M_PKTHDR)
1941 adjust = MHLEN - len;
1943 adjust = MLEN - len;
1944 m->m_data += adjust &~ (sizeof(long)-1);
1948 * Create a writable copy of the mbuf chain. While doing this
1949 * we compact the chain with a goal of producing a chain with
1950 * at most two mbufs. The second mbuf in this chain is likely
1951 * to be a cluster. The primary purpose of this work is to create
1952 * a writable packet for encryption, compression, etc. The
1953 * secondary goal is to linearize the data so the data can be
1954 * passed to crypto hardware in the most efficient manner possible.
1957 m_unshare(struct mbuf *m0, int how)
1959 struct mbuf *m, *mprev;
1960 struct mbuf *n, *mfirst, *mlast;
1964 for (m = m0; m != NULL; m = mprev->m_next) {
1966 * Regular mbufs are ignored unless there's a cluster
1967 * in front of it that we can use to coalesce. We do
1968 * the latter mainly so later clusters can be coalesced
1969 * also w/o having to handle them specially (i.e. convert
1970 * mbuf+cluster -> cluster). This optimization is heavily
1971 * influenced by the assumption that we're running over
1972 * Ethernet where MCLBYTES is large enough that the max
1973 * packet size will permit lots of coalescing into a
1974 * single cluster. This in turn permits efficient
1975 * crypto operations, especially when using hardware.
1977 if ((m->m_flags & M_EXT) == 0) {
1978 if (mprev && (mprev->m_flags & M_EXT) &&
1979 m->m_len <= M_TRAILINGSPACE(mprev)) {
1980 /* XXX: this ignores mbuf types */
1981 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1982 mtod(m, caddr_t), m->m_len);
1983 mprev->m_len += m->m_len;
1984 mprev->m_next = m->m_next; /* unlink from chain */
1985 m_free(m); /* reclaim mbuf */
1992 * Writable mbufs are left alone (for now).
1994 if (M_WRITABLE(m)) {
2000 * Not writable, replace with a copy or coalesce with
2001 * the previous mbuf if possible (since we have to copy
2002 * it anyway, we try to reduce the number of mbufs and
2003 * clusters so that future work is easier).
2005 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
2006 /* NB: we only coalesce into a cluster or larger */
2007 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
2008 m->m_len <= M_TRAILINGSPACE(mprev)) {
2009 /* XXX: this ignores mbuf types */
2010 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
2011 mtod(m, caddr_t), m->m_len);
2012 mprev->m_len += m->m_len;
2013 mprev->m_next = m->m_next; /* unlink from chain */
2014 m_free(m); /* reclaim mbuf */
2019 * Allocate new space to hold the copy...
2021 /* XXX why can M_PKTHDR be set past the first mbuf? */
2022 if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
2024 * NB: if a packet header is present we must
2025 * allocate the mbuf separately from any cluster
2026 * because M_MOVE_PKTHDR will smash the data
2027 * pointer and drop the M_EXT marker.
2029 MGETHDR(n, how, m->m_type);
2034 M_MOVE_PKTHDR(n, m);
2036 if ((n->m_flags & M_EXT) == 0) {
2042 n = m_getcl(how, m->m_type, m->m_flags);
2049 * ... and copy the data. We deal with jumbo mbufs
2050 * (i.e. m_len > MCLBYTES) by splitting them into
2051 * clusters. We could just malloc a buffer and make
2052 * it external but too many device drivers don't know
2053 * how to break up the non-contiguous memory when
2061 int cc = min(len, MCLBYTES);
2062 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
2073 n = m_getcl(how, m->m_type, m->m_flags);
2080 n->m_next = m->m_next;
2082 m0 = mfirst; /* new head of chain */
2084 mprev->m_next = mfirst; /* replace old mbuf */
2085 m_free(m); /* release old mbuf */
2092 * Rearrange an mbuf chain so that len bytes are contiguous
2093 * and in the data area of an mbuf (so that mtod will work for a structure
2094 * of size len). Returns the resulting mbuf chain on success, frees it and
2095 * returns null on failure. If there is room, it will add up to
2096 * max_protohdr-len extra bytes to the contiguous region in an attempt to
2097 * avoid being called next time.
2100 m_pullup(struct mbuf *n, int len)
2107 * If first mbuf has no cluster, and has room for len bytes
2108 * without shifting current data, pullup into it,
2109 * otherwise allocate a new mbuf to prepend to the chain.
2111 if (!(n->m_flags & M_EXT) &&
2112 n->m_data + len < &n->m_dat[MLEN] &&
2114 if (n->m_len >= len)
2122 if (n->m_flags & M_PKTHDR)
2123 m = m_gethdr(M_NOWAIT, n->m_type);
2125 m = m_get(M_NOWAIT, n->m_type);
2129 if (n->m_flags & M_PKTHDR)
2130 M_MOVE_PKTHDR(m, n);
2132 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
2134 count = min(min(max(len, max_protohdr), space), n->m_len);
2135 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
2145 } while (len > 0 && n);
2154 ++mbstat[mycpu->gd_cpuid].m_mcfail;
2159 * Partition an mbuf chain in two pieces, returning the tail --
2160 * all but the first len0 bytes. In case of failure, it returns NULL and
2161 * attempts to restore the chain to its original state.
2163 * Note that the resulting mbufs might be read-only, because the new
2164 * mbuf can end up sharing an mbuf cluster with the original mbuf if
2165 * the "breaking point" happens to lie within a cluster mbuf. Use the
2166 * M_WRITABLE() macro to check for this case.
2169 m_split(struct mbuf *m0, int len0, int wait)
2172 unsigned len = len0, remain;
2174 for (m = m0; m && len > m->m_len; m = m->m_next)
2178 remain = m->m_len - len;
2179 if (m0->m_flags & M_PKTHDR) {
2180 n = m_gethdr(wait, m0->m_type);
2183 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
2184 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
2185 m0->m_pkthdr.len = len0;
2186 if (m->m_flags & M_EXT)
2188 if (remain > MHLEN) {
2189 /* m can't be the lead packet */
2191 n->m_next = m_split(m, len, wait);
2192 if (n->m_next == NULL) {
2200 MH_ALIGN(n, remain);
2201 } else if (remain == 0) {
2206 n = m_get(wait, m->m_type);
2212 if (m->m_flags & M_EXT) {
2213 KKASSERT((n->m_flags & M_EXT) == 0);
2214 n->m_data = m->m_data + len;
2215 m->m_ext.ext_ref(m->m_ext.ext_arg);
2216 n->m_ext = m->m_ext;
2217 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
2219 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
2223 n->m_next = m->m_next;
2229 * Routine to copy from device local memory into mbufs.
2230 * Note: "offset" is ill-defined and always called as 0, so ignore it.
2233 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
2234 void (*copy)(volatile const void *from, volatile void *to, size_t length))
2236 struct mbuf *m, *mfirst = NULL, **mtail;
2245 m = m_getl(len, M_NOWAIT, MT_DATA, flags, &nsize);
2250 m->m_len = min(len, nsize);
2252 if (flags & M_PKTHDR) {
2253 if (len + max_linkhdr <= nsize)
2254 m->m_data += max_linkhdr;
2255 m->m_pkthdr.rcvif = ifp;
2256 m->m_pkthdr.len = len;
2260 copy(buf, m->m_data, (unsigned)m->m_len);
2271 * Routine to pad mbuf to the specified length 'padto'.
2274 m_devpad(struct mbuf *m, int padto)
2276 struct mbuf *last = NULL;
2279 if (padto <= m->m_pkthdr.len)
2282 padlen = padto - m->m_pkthdr.len;
2284 /* if there's only the packet-header and we can pad there, use it. */
2285 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
2289 * Walk packet chain to find last mbuf. We will either
2290 * pad there, or append a new mbuf and pad it
2292 for (last = m; last->m_next != NULL; last = last->m_next)
2295 /* `last' now points to last in chain. */
2296 if (M_TRAILINGSPACE(last) < padlen) {
2299 /* Allocate new empty mbuf, pad it. Compact later. */
2300 MGET(n, M_NOWAIT, MT_DATA);
2308 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
2309 KKASSERT(M_WRITABLE(last));
2311 /* Now zero the pad area */
2312 bzero(mtod(last, char *) + last->m_len, padlen);
2313 last->m_len += padlen;
2314 m->m_pkthdr.len += padlen;
2319 * Copy data from a buffer back into the indicated mbuf chain,
2320 * starting "off" bytes from the beginning, extending the mbuf
2321 * chain if necessary.
2324 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
2327 struct mbuf *m = m0, *n;
2332 while (off > (mlen = m->m_len)) {
2335 if (m->m_next == NULL) {
2336 n = m_getclr(M_NOWAIT, m->m_type);
2339 n->m_len = min(MLEN, len + off);
2345 mlen = min (m->m_len - off, len);
2346 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
2354 if (m->m_next == NULL) {
2355 n = m_get(M_NOWAIT, m->m_type);
2358 n->m_len = min(MLEN, len);
2363 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
2364 m->m_pkthdr.len = totlen;
2368 * Append the specified data to the indicated mbuf chain,
2369 * Extend the mbuf chain if the new data does not fit in
2372 * Return 1 if able to complete the job; otherwise 0.
2375 m_append(struct mbuf *m0, int len, c_caddr_t cp)
2378 int remainder, space;
2380 for (m = m0; m->m_next != NULL; m = m->m_next)
2383 space = M_TRAILINGSPACE(m);
2386 * Copy into available space.
2388 if (space > remainder)
2390 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
2392 cp += space, remainder -= space;
2394 while (remainder > 0) {
2396 * Allocate a new mbuf; could check space
2397 * and allocate a cluster instead.
2399 n = m_get(M_NOWAIT, m->m_type);
2402 n->m_len = min(MLEN, remainder);
2403 bcopy(cp, mtod(n, caddr_t), n->m_len);
2404 cp += n->m_len, remainder -= n->m_len;
2408 if (m0->m_flags & M_PKTHDR)
2409 m0->m_pkthdr.len += len - remainder;
2410 return (remainder == 0);
2414 * Apply function f to the data in an mbuf chain starting "off" bytes from
2415 * the beginning, continuing for "len" bytes.
2418 m_apply(struct mbuf *m, int off, int len,
2419 int (*f)(void *, void *, u_int), void *arg)
2424 KASSERT(off >= 0, ("m_apply, negative off %d", off));
2425 KASSERT(len >= 0, ("m_apply, negative len %d", len));
2427 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2434 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2435 count = min(m->m_len - off, len);
2436 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
2447 * Return a pointer to mbuf/offset of location in mbuf chain.
2450 m_getptr(struct mbuf *m, int loc, int *off)
2454 /* Normal end of search. */
2455 if (m->m_len > loc) {
2460 if (m->m_next == NULL) {
2462 /* Point at the end of valid data. */
2475 m_print(const struct mbuf *m)
2478 const struct mbuf *m2;
2481 len = m->m_pkthdr.len;
2483 hexstr = kmalloc(HEX_NCPYLEN(len), M_TEMP, M_ZERO | M_WAITOK);
2485 kprintf("%p %s\n", m2, hexncpy(m2->m_data, m2->m_len, hexstr,
2486 HEX_NCPYLEN(m2->m_len), "-"));
2490 kfree(hexstr, M_TEMP);
2495 * "Move" mbuf pkthdr from "from" to "to".
2496 * "from" must have M_PKTHDR set, and "to" must be empty.
2499 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
2501 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
2503 to->m_flags |= from->m_flags & M_COPYFLAGS;
2504 to->m_pkthdr = from->m_pkthdr; /* especially tags */
2505 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
2509 * Duplicate "from"'s mbuf pkthdr in "to".
2510 * "from" must have M_PKTHDR set, and "to" must be empty.
2511 * In particular, this does a deep copy of the packet tags.
2514 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
2516 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
2518 to->m_flags = (from->m_flags & M_COPYFLAGS) |
2519 (to->m_flags & ~M_COPYFLAGS);
2520 to->m_pkthdr = from->m_pkthdr;
2521 SLIST_INIT(&to->m_pkthdr.tags);
2522 return (m_tag_copy_chain(to, from, how));
2526 * Defragment a mbuf chain, returning the shortest possible
2527 * chain of mbufs and clusters. If allocation fails and
2528 * this cannot be completed, NULL will be returned, but
2529 * the passed in chain will be unchanged. Upon success,
2530 * the original chain will be freed, and the new chain
2533 * If a non-packet header is passed in, the original
2534 * mbuf (chain?) will be returned unharmed.
2536 * m_defrag_nofree doesn't free the passed in mbuf.
2539 m_defrag(struct mbuf *m0, int how)
2543 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2551 m_defrag_nofree(struct mbuf *m0, int how)
2553 struct mbuf *m_new = NULL, *m_final = NULL;
2554 int progress = 0, length, nsize;
2556 if (!(m0->m_flags & M_PKTHDR))
2559 #ifdef MBUF_STRESS_TEST
2560 if (m_defragrandomfailures) {
2561 int temp = karc4random() & 0xff;
2567 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
2568 if (m_final == NULL)
2570 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
2572 if (m_dup_pkthdr(m_final, m0, how) == 0)
2577 while (progress < m0->m_pkthdr.len) {
2578 length = m0->m_pkthdr.len - progress;
2579 if (length > MCLBYTES)
2582 if (m_new == NULL) {
2583 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
2588 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2590 m_new->m_len = length;
2591 if (m_new != m_final)
2592 m_cat(m_final, m_new);
2595 if (m0->m_next == NULL)
2598 m_defragbytes += m_final->m_pkthdr.len;
2609 * Move data from uio into mbufs.
2612 m_uiomove(struct uio *uio)
2614 struct mbuf *m; /* current working mbuf */
2615 struct mbuf *head = NULL; /* result mbuf chain */
2616 struct mbuf **mp = &head;
2617 int flags = M_PKTHDR;
2623 if (uio->uio_resid > INT_MAX)
2626 resid = (int)uio->uio_resid;
2627 m = m_getl(resid, M_WAITOK, MT_DATA, flags, &nsize);
2629 m->m_pkthdr.len = 0;
2630 /* Leave room for protocol headers. */
2635 m->m_len = imin(nsize, resid);
2636 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
2643 head->m_pkthdr.len += m->m_len;
2644 } while (uio->uio_resid > 0);
2654 m_last(struct mbuf *m)
2662 * Return the number of bytes in an mbuf chain.
2663 * If lastm is not NULL, also return the last mbuf.
2666 m_lengthm(struct mbuf *m, struct mbuf **lastm)
2669 struct mbuf *prev = m;
2682 * Like m_lengthm(), except also keep track of mbuf usage.
2685 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2687 u_int len = 0, mbcnt = 0;
2688 struct mbuf *prev = m;
2693 if (m->m_flags & M_EXT)
2694 mbcnt += m->m_ext.ext_size;