2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
67 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
71 #include "opt_param.h"
73 #include "opt_mbuf_stress_test.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/domain.h>
81 #include <sys/objcache.h>
83 #include <sys/protosw.h>
85 #include <sys/thread.h>
86 #include <sys/globaldata.h>
87 #include <sys/thread2.h>
89 #include <machine/atomic.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
96 #include <machine/cpu.h>
100 * mbuf cluster meta-data
108 * mbuf tracking for debugging purposes
112 static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
115 RB_HEAD(mbuf_rb_tree, mbtrack);
116 RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
119 RB_ENTRY(mbtrack) rb_node;
125 mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
134 RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
136 struct mbuf_rb_tree mbuf_track_root;
139 mbuftrack(struct mbuf *m)
144 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
146 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt))
147 panic("mbuftrack: mbuf %p already being tracked\n", m);
152 mbufuntrack(struct mbuf *m)
157 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
159 kprintf("mbufuntrack: mbuf %p was not tracked\n", m);
161 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
162 kfree(mbt, M_MTRACK);
168 mbuftrackid(struct mbuf *m, int trackid)
177 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
179 mbt->trackid = trackid;
188 mbuftrack_callback(struct mbtrack *mbt, void *arg)
190 struct sysctl_req *req = arg;
194 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
196 error = SYSCTL_OUT(req, buf, strlen(buf));
203 mbuftrack_show(SYSCTL_HANDLER_ARGS)
208 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
209 mbuftrack_callback, req);
213 SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
214 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
219 #define mbufuntrack(m)
223 static void mbinit(void *);
224 SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
226 static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
228 static struct mbstat mbstat[SMP_MAXCPU];
237 #ifdef MBUF_STRESS_TEST
238 int m_defragrandomfailures;
241 struct objcache *mbuf_cache, *mbufphdr_cache;
242 struct objcache *mclmeta_cache;
243 struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
248 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
249 &max_linkhdr, 0, "");
250 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
251 &max_protohdr, 0, "");
252 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
253 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
254 &max_datalen, 0, "");
255 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
257 static int do_mbstat(SYSCTL_HANDLER_ARGS);
259 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
260 0, 0, do_mbstat, "S,mbstat", "");
262 static int do_mbtypes(SYSCTL_HANDLER_ARGS);
264 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
265 0, 0, do_mbtypes, "LU", "");
268 do_mbstat(SYSCTL_HANDLER_ARGS)
270 struct mbstat mbstat_total;
271 struct mbstat *mbstat_totalp;
274 bzero(&mbstat_total, sizeof(mbstat_total));
275 mbstat_totalp = &mbstat_total;
277 for (i = 0; i < ncpus; i++)
279 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
280 mbstat_total.m_clusters += mbstat[i].m_clusters;
281 mbstat_total.m_spare += mbstat[i].m_spare;
282 mbstat_total.m_clfree += mbstat[i].m_clfree;
283 mbstat_total.m_drops += mbstat[i].m_drops;
284 mbstat_total.m_wait += mbstat[i].m_wait;
285 mbstat_total.m_drain += mbstat[i].m_drain;
286 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
287 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
291 * The following fields are not cumulative fields so just
292 * get their values once.
294 mbstat_total.m_msize = mbstat[0].m_msize;
295 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
296 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
297 mbstat_total.m_mlen = mbstat[0].m_mlen;
298 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
300 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
304 do_mbtypes(SYSCTL_HANDLER_ARGS)
306 u_long totals[MT_NTYPES];
309 for (i = 0; i < MT_NTYPES; i++)
312 for (i = 0; i < ncpus; i++)
314 for (j = 0; j < MT_NTYPES; j++)
315 totals[j] += mbtypes[i][j];
318 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
322 * These are read-only because we do not currently have any code
323 * to adjust the objcache limits after the fact. The variables
324 * may only be set as boot-time tunables.
326 SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
327 &nmbclusters, 0, "Maximum number of mbuf clusters available");
328 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
329 "Maximum number of mbufs available");
331 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
332 &m_defragpackets, 0, "");
333 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
334 &m_defragbytes, 0, "");
335 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
336 &m_defraguseless, 0, "");
337 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
338 &m_defragfailure, 0, "");
339 #ifdef MBUF_STRESS_TEST
340 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
341 &m_defragrandomfailures, 0, "");
344 static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
345 static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
346 static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
348 static void m_reclaim (void);
349 static void m_mclref(void *arg);
350 static void m_mclfree(void *arg);
353 #define NMBCLUSTERS (512 + maxusers * 16)
356 #define NMBUFS (nmbclusters * 2)
360 * Perform sanity checks of tunables declared above.
363 tunable_mbinit(void *dummy)
366 * This has to be done before VM init.
368 nmbclusters = NMBCLUSTERS;
369 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
371 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
373 if (nmbufs < nmbclusters * 2)
374 nmbufs = nmbclusters * 2;
376 SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
377 tunable_mbinit, NULL);
379 /* "number of clusters of pages" */
385 * The mbuf object cache only guarantees that m_next and m_nextpkt are
386 * NULL and that m_data points to the beginning of the data area. In
387 * particular, m_len and m_pkthdr.len are uninitialized. It is the
388 * responsibility of the caller to initialize those fields before use.
391 static boolean_t __inline
392 mbuf_ctor(void *obj, void *private, int ocflags)
394 struct mbuf *m = obj;
398 m->m_data = m->m_dat;
405 * Initialize the mbuf and the packet header fields.
408 mbufphdr_ctor(void *obj, void *private, int ocflags)
410 struct mbuf *m = obj;
414 m->m_data = m->m_pktdat;
415 m->m_flags = M_PKTHDR | M_PHCACHE;
417 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
418 SLIST_INIT(&m->m_pkthdr.tags);
419 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
420 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
426 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
429 mclmeta_ctor(void *obj, void *private, int ocflags)
431 struct mbcluster *cl = obj;
434 if (ocflags & M_NOWAIT)
435 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
437 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
446 mclmeta_dtor(void *obj, void *private)
448 struct mbcluster *mcl = obj;
450 KKASSERT(mcl->mcl_refs == 0);
451 kfree(mcl->mcl_data, M_MBUFCL);
455 linkcluster(struct mbuf *m, struct mbcluster *cl)
458 * Add the cluster to the mbuf. The caller will detect that the
459 * mbuf now has an attached cluster.
461 m->m_ext.ext_arg = cl;
462 m->m_ext.ext_buf = cl->mcl_data;
463 m->m_ext.ext_ref = m_mclref;
464 m->m_ext.ext_free = m_mclfree;
465 m->m_ext.ext_size = MCLBYTES;
466 atomic_add_int(&cl->mcl_refs, 1);
468 m->m_data = m->m_ext.ext_buf;
469 m->m_flags |= M_EXT | M_EXT_CLUSTER;
473 mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
475 struct mbuf *m = obj;
476 struct mbcluster *cl;
478 mbufphdr_ctor(obj, private, ocflags);
479 cl = objcache_get(mclmeta_cache, ocflags);
482 m->m_flags |= M_CLCACHE;
488 mbufcluster_ctor(void *obj, void *private, int ocflags)
490 struct mbuf *m = obj;
491 struct mbcluster *cl;
493 mbuf_ctor(obj, private, ocflags);
494 cl = objcache_get(mclmeta_cache, ocflags);
497 m->m_flags |= M_CLCACHE;
503 * Used for both the cluster and cluster PHDR caches.
505 * The mbuf may have lost its cluster due to sharing, deal
506 * with the situation by checking M_EXT.
509 mbufcluster_dtor(void *obj, void *private)
511 struct mbuf *m = obj;
512 struct mbcluster *mcl;
514 if (m->m_flags & M_EXT) {
515 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
516 mcl = m->m_ext.ext_arg;
517 KKASSERT(mcl->mcl_refs == 1);
519 objcache_put(mclmeta_cache, mcl);
523 struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
524 struct objcache_malloc_args mclmeta_malloc_args =
525 { sizeof(struct mbcluster), M_MCLMETA };
531 int mb_limit, cl_limit, mbcl_limit;
536 * Initialize statistics
538 for (i = 0; i < ncpus; i++) {
539 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
540 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
541 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
542 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
543 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
547 * Create objtect caches and save cluster limits, which will
548 * be used to adjust backing kmalloc pools' limit later.
551 mb_limit = cl_limit = mbcl_limit = 0;
554 mbuf_cache = objcache_create("mbuf", &limit, 0,
555 mbuf_ctor, NULL, NULL,
556 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
557 if (limit > mb_limit)
561 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
562 mbufphdr_ctor, NULL, NULL,
563 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
564 if (limit > mb_limit)
567 cl_limit = nmbclusters;
568 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
569 mclmeta_ctor, mclmeta_dtor, NULL,
570 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
573 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
574 mbufcluster_ctor, mbufcluster_dtor, NULL,
575 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
576 if (limit > mbcl_limit)
580 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
581 &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
582 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
583 if (limit > mbcl_limit)
587 * Adjust backing kmalloc pools' limit
589 * NOTE: We raise the limit by another 1/8 to take the effect
590 * of loosememuse into account.
592 cl_limit += cl_limit / 8;
593 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
594 mclmeta_malloc_args.objsize * cl_limit);
595 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
597 mb_limit += mbcl_limit;
598 mb_limit += mb_limit / 4; /* save some space for non-pkthdr mbufs */
599 mb_limit += mb_limit / 8;
600 kmalloc_raise_limit(mbuf_malloc_args.mtype,
601 mbuf_malloc_args.objsize * mb_limit);
605 * Return the number of references to this mbuf's data. 0 is returned
606 * if the mbuf is not M_EXT, a reference count is returned if it is
607 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
610 m_sharecount(struct mbuf *m)
612 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
617 case M_EXT | M_EXT_CLUSTER:
618 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
621 return (0); /* to shut up compiler */
625 * change mbuf to new type
628 m_chtype(struct mbuf *m, int type)
630 struct globaldata *gd = mycpu;
632 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
633 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
634 atomic_set_short_nonlocked(&m->m_type, type);
644 SLIST_FOREACH(dp, &domains, dom_next) {
645 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
651 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
655 updatestats(struct mbuf *m, int type)
657 struct globaldata *gd = mycpu;
662 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
663 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
671 m_get(int how, int type)
675 int ocf = MBTOM(how);
679 m = objcache_get(mbuf_cache, ocf);
682 if ((how & MB_TRYWAIT) && ntries++ == 0) {
683 struct objcache *reclaimlist[] = {
685 mbufcluster_cache, mbufphdrcluster_cache
687 const int nreclaims = __arysize(reclaimlist);
689 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
696 updatestats(m, type);
701 m_gethdr(int how, int type)
704 int ocf = MBTOM(how);
709 m = objcache_get(mbufphdr_cache, ocf);
712 if ((how & MB_TRYWAIT) && ntries++ == 0) {
713 struct objcache *reclaimlist[] = {
715 mbufcluster_cache, mbufphdrcluster_cache
717 const int nreclaims = __arysize(reclaimlist);
719 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
726 updatestats(m, type);
731 * Get a mbuf (not a mbuf cluster!) and zero it.
735 m_getclr(int how, int type)
739 m = m_get(how, type);
741 bzero(m->m_data, MLEN);
746 * Returns an mbuf with an attached cluster.
747 * Because many network drivers use this kind of buffers a lot, it is
748 * convenient to keep a small pool of free buffers of this kind.
749 * Even a small size such as 10 gives about 10% improvement in the
750 * forwarding rate in a bridge or router.
753 m_getcl(int how, short type, int flags)
756 int ocflags = MBTOM(how);
761 if (flags & M_PKTHDR)
762 m = objcache_get(mbufphdrcluster_cache, ocflags);
764 m = objcache_get(mbufcluster_cache, ocflags);
767 if ((how & MB_TRYWAIT) && ntries++ == 0) {
768 struct objcache *reclaimlist[1];
770 if (flags & M_PKTHDR)
771 reclaimlist[0] = mbufcluster_cache;
773 reclaimlist[0] = mbufphdrcluster_cache;
774 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
785 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
786 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
791 * Allocate chain of requested length.
794 m_getc(int len, int how, int type)
796 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
800 n = m_getl(len, how, type, 0, &nsize);
816 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
817 * and return a pointer to the head of the allocated chain. If m0 is
818 * non-null, then we assume that it is a single mbuf or an mbuf chain to
819 * which we want len bytes worth of mbufs and/or clusters attached, and so
820 * if we succeed in allocating it, we will just return a pointer to m0.
822 * If we happen to fail at any point during the allocation, we will free
823 * up everything we have already allocated and return NULL.
825 * Deprecated. Use m_getc() and m_cat() instead.
828 m_getm(struct mbuf *m0, int len, int type, int how)
832 nfirst = m_getc(len, how, type);
835 m_last(m0)->m_next = nfirst;
843 * Adds a cluster to a normal mbuf, M_EXT is set on success.
844 * Deprecated. Use m_getcl() instead.
847 m_mclget(struct mbuf *m, int how)
849 struct mbcluster *mcl;
851 KKASSERT((m->m_flags & M_EXT) == 0);
852 mcl = objcache_get(mclmeta_cache, MBTOM(how));
855 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
860 * Updates to mbcluster must be MPSAFE. Only an entity which already has
861 * a reference to the cluster can ref it, so we are in no danger of
862 * racing an add with a subtract. But the operation must still be atomic
863 * since multiple entities may have a reference on the cluster.
865 * m_mclfree() is almost the same but it must contend with two entities
866 * freeing the cluster at the same time. If there is only one reference
867 * count we are the only entity referencing the cluster and no further
868 * locking is required. Otherwise we must protect against a race to 0
869 * with the serializer.
874 struct mbcluster *mcl = arg;
876 atomic_add_int(&mcl->mcl_refs, 1);
880 * When dereferencing a cluster we have to deal with a N->0 race, where
881 * N entities free their references simultaniously. To do this we use
882 * atomic_fetchadd_int().
887 struct mbcluster *mcl = arg;
889 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
890 objcache_put(mclmeta_cache, mcl);
893 extern void db_print_backtrace(void);
896 * Free a single mbuf and any associated external storage. The successor,
897 * if any, is returned.
899 * We do need to check non-first mbuf for m_aux, since some of existing
900 * code does not call M_PREPEND properly.
901 * (example: call to bpf_mtap from drivers)
904 m_free(struct mbuf *m)
907 struct globaldata *gd = mycpu;
909 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
910 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
915 * Make sure the mbuf is in constructed state before returning it
921 KKASSERT(m->m_nextpkt == NULL);
923 if (m->m_nextpkt != NULL) {
925 static int afewtimes = 10;
927 if (afewtimes-- > 0) {
928 kprintf("mfree: m->m_nextpkt != NULL\n");
929 db_print_backtrace();
935 if (m->m_flags & M_PKTHDR) {
936 m_tag_delete_chain(m); /* eliminate XXX JH */
939 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
942 * Clean the M_PKTHDR state so we can return the mbuf to its original
943 * cache. This is based on the PHCACHE flag which tells us whether
944 * the mbuf was originally allocated out of a packet-header cache
945 * or a non-packet-header cache.
947 if (m->m_flags & M_PHCACHE) {
948 m->m_flags |= M_PKTHDR;
949 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
950 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
951 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
952 SLIST_INIT(&m->m_pkthdr.tags);
956 * Handle remaining flags combinations. M_CLCACHE tells us whether
957 * the mbuf was originally allocated from a cluster cache or not,
958 * and is totally separate from whether the mbuf is currently
959 * associated with a cluster.
962 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
963 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
965 * mbuf+cluster cache case. The mbuf was allocated from the
966 * combined mbuf_cluster cache and can be returned to the
967 * cache if the cluster hasn't been shared.
969 if (m_sharecount(m) == 1) {
971 * The cluster has not been shared, we can just
972 * reset the data pointer and return the mbuf
973 * to the cluster cache. Note that the reference
974 * count is left intact (it is still associated with
977 m->m_data = m->m_ext.ext_buf;
978 if (m->m_flags & M_PHCACHE)
979 objcache_put(mbufphdrcluster_cache, m);
981 objcache_put(mbufcluster_cache, m);
982 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
985 * Hell. Someone else has a ref on this cluster,
986 * we have to disconnect it which means we can't
987 * put it back into the mbufcluster_cache, we
988 * have to destroy the mbuf.
990 * Other mbuf references to the cluster will typically
991 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
993 * XXX we could try to connect another cluster to
996 m->m_ext.ext_free(m->m_ext.ext_arg);
997 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
998 if (m->m_flags & M_PHCACHE)
999 objcache_dtor(mbufphdrcluster_cache, m);
1001 objcache_dtor(mbufcluster_cache, m);
1004 case M_EXT | M_EXT_CLUSTER:
1006 * Normal cluster associated with an mbuf that was allocated
1007 * from the normal mbuf pool rather then the cluster pool.
1008 * The cluster has to be independantly disassociated from the
1011 if (m_sharecount(m) == 1)
1012 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
1016 * Normal cluster association case, disconnect the cluster from
1017 * the mbuf. The cluster may or may not be custom.
1019 m->m_ext.ext_free(m->m_ext.ext_arg);
1020 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1024 * return the mbuf to the mbuf cache.
1026 if (m->m_flags & M_PHCACHE) {
1027 m->m_data = m->m_pktdat;
1028 objcache_put(mbufphdr_cache, m);
1030 m->m_data = m->m_dat;
1031 objcache_put(mbuf_cache, m);
1033 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
1037 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1045 m_freem(struct mbuf *m)
1054 * mbuf utility routines
1058 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1062 m_prepend(struct mbuf *m, int len, int how)
1066 if (m->m_flags & M_PKTHDR)
1067 mn = m_gethdr(how, m->m_type);
1069 mn = m_get(how, m->m_type);
1074 if (m->m_flags & M_PKTHDR)
1075 M_MOVE_PKTHDR(mn, m);
1085 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1086 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1087 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1088 * Note that the copy is read-only, because clusters are not copied,
1089 * only their reference counts are incremented.
1092 m_copym(const struct mbuf *m, int off0, int len, int wait)
1094 struct mbuf *n, **np;
1099 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1100 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1101 if (off == 0 && m->m_flags & M_PKTHDR)
1104 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1114 KASSERT(len == M_COPYALL,
1115 ("m_copym, length > size of mbuf chain"));
1119 * Because we are sharing any cluster attachment below,
1120 * be sure to get an mbuf that does not have a cluster
1121 * associated with it.
1124 n = m_gethdr(wait, m->m_type);
1126 n = m_get(wait, m->m_type);
1131 if (!m_dup_pkthdr(n, m, wait))
1133 if (len == M_COPYALL)
1134 n->m_pkthdr.len -= off0;
1136 n->m_pkthdr.len = len;
1139 n->m_len = min(len, m->m_len - off);
1140 if (m->m_flags & M_EXT) {
1141 KKASSERT((n->m_flags & M_EXT) == 0);
1142 n->m_data = m->m_data + off;
1143 m->m_ext.ext_ref(m->m_ext.ext_arg);
1144 n->m_ext = m->m_ext;
1145 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1147 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1148 (unsigned)n->m_len);
1150 if (len != M_COPYALL)
1157 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1161 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1166 * Copy an entire packet, including header (which must be present).
1167 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1168 * Note that the copy is read-only, because clusters are not copied,
1169 * only their reference counts are incremented.
1170 * Preserve alignment of the first mbuf so if the creator has left
1171 * some room at the beginning (e.g. for inserting protocol headers)
1172 * the copies also have the room available.
1175 m_copypacket(struct mbuf *m, int how)
1177 struct mbuf *top, *n, *o;
1179 n = m_gethdr(how, m->m_type);
1184 if (!m_dup_pkthdr(n, m, how))
1186 n->m_len = m->m_len;
1187 if (m->m_flags & M_EXT) {
1188 KKASSERT((n->m_flags & M_EXT) == 0);
1189 n->m_data = m->m_data;
1190 m->m_ext.ext_ref(m->m_ext.ext_arg);
1191 n->m_ext = m->m_ext;
1192 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1194 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1195 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1200 o = m_get(how, m->m_type);
1207 n->m_len = m->m_len;
1208 if (m->m_flags & M_EXT) {
1209 KKASSERT((n->m_flags & M_EXT) == 0);
1210 n->m_data = m->m_data;
1211 m->m_ext.ext_ref(m->m_ext.ext_arg);
1212 n->m_ext = m->m_ext;
1213 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1215 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1223 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1228 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1229 * continuing for "len" bytes, into the indicated buffer.
1232 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1236 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1237 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1239 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1246 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1247 count = min(m->m_len - off, len);
1248 bcopy(mtod(m, caddr_t) + off, cp, count);
1257 * Copy a packet header mbuf chain into a completely new chain, including
1258 * copying any mbuf clusters. Use this instead of m_copypacket() when
1259 * you need a writable copy of an mbuf chain.
1262 m_dup(struct mbuf *m, int how)
1264 struct mbuf **p, *top = NULL;
1265 int remain, moff, nsize;
1270 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1272 /* While there's more data, get a new mbuf, tack it on, and fill it */
1273 remain = m->m_pkthdr.len;
1276 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1279 /* Get the next new mbuf */
1280 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1285 if (!m_dup_pkthdr(n, m, how))
1288 /* Link it into the new chain */
1292 /* Copy data from original mbuf(s) into new mbuf */
1294 while (n->m_len < nsize && m != NULL) {
1295 int chunk = min(nsize - n->m_len, m->m_len - moff);
1297 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1301 if (moff == m->m_len) {
1307 /* Check correct total mbuf length */
1308 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1309 ("%s: bogus m_pkthdr.len", __func__));
1316 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1321 * Concatenate mbuf chain n to m.
1322 * Both chains must be of the same type (e.g. MT_DATA).
1323 * Any m_pkthdr is not updated.
1326 m_cat(struct mbuf *m, struct mbuf *n)
1330 if (m->m_flags & M_EXT ||
1331 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1332 /* just join the two chains */
1336 /* splat the data from one into the other */
1337 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1339 m->m_len += n->m_len;
1345 m_adj(struct mbuf *mp, int req_len)
1351 if ((m = mp) == NULL)
1357 while (m != NULL && len > 0) {
1358 if (m->m_len <= len) {
1369 if (mp->m_flags & M_PKTHDR)
1370 m->m_pkthdr.len -= (req_len - len);
1373 * Trim from tail. Scan the mbuf chain,
1374 * calculating its length and finding the last mbuf.
1375 * If the adjustment only affects this mbuf, then just
1376 * adjust and return. Otherwise, rescan and truncate
1377 * after the remaining size.
1383 if (m->m_next == (struct mbuf *)0)
1387 if (m->m_len >= len) {
1389 if (mp->m_flags & M_PKTHDR)
1390 mp->m_pkthdr.len -= len;
1397 * Correct length for chain is "count".
1398 * Find the mbuf with last data, adjust its length,
1399 * and toss data from remaining mbufs on chain.
1402 if (m->m_flags & M_PKTHDR)
1403 m->m_pkthdr.len = count;
1404 for (; m; m = m->m_next) {
1405 if (m->m_len >= count) {
1412 (m = m->m_next) ->m_len = 0;
1417 * Rearrange an mbuf chain so that len bytes are contiguous
1418 * and in the data area of an mbuf (so that mtod will work for a structure
1419 * of size len). Returns the resulting mbuf chain on success, frees it and
1420 * returns null on failure. If there is room, it will add up to
1421 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1422 * avoid being called next time.
1425 m_pullup(struct mbuf *n, int len)
1432 * If first mbuf has no cluster, and has room for len bytes
1433 * without shifting current data, pullup into it,
1434 * otherwise allocate a new mbuf to prepend to the chain.
1436 if (!(n->m_flags & M_EXT) &&
1437 n->m_data + len < &n->m_dat[MLEN] &&
1439 if (n->m_len >= len)
1447 if (n->m_flags & M_PKTHDR)
1448 m = m_gethdr(MB_DONTWAIT, n->m_type);
1450 m = m_get(MB_DONTWAIT, n->m_type);
1454 if (n->m_flags & M_PKTHDR)
1455 M_MOVE_PKTHDR(m, n);
1457 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1459 count = min(min(max(len, max_protohdr), space), n->m_len);
1460 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1470 } while (len > 0 && n);
1479 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1484 * Partition an mbuf chain in two pieces, returning the tail --
1485 * all but the first len0 bytes. In case of failure, it returns NULL and
1486 * attempts to restore the chain to its original state.
1488 * Note that the resulting mbufs might be read-only, because the new
1489 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1490 * the "breaking point" happens to lie within a cluster mbuf. Use the
1491 * M_WRITABLE() macro to check for this case.
1494 m_split(struct mbuf *m0, int len0, int wait)
1497 unsigned len = len0, remain;
1499 for (m = m0; m && len > m->m_len; m = m->m_next)
1503 remain = m->m_len - len;
1504 if (m0->m_flags & M_PKTHDR) {
1505 n = m_gethdr(wait, m0->m_type);
1508 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1509 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1510 m0->m_pkthdr.len = len0;
1511 if (m->m_flags & M_EXT)
1513 if (remain > MHLEN) {
1514 /* m can't be the lead packet */
1516 n->m_next = m_split(m, len, wait);
1517 if (n->m_next == NULL) {
1525 MH_ALIGN(n, remain);
1526 } else if (remain == 0) {
1531 n = m_get(wait, m->m_type);
1537 if (m->m_flags & M_EXT) {
1538 KKASSERT((n->m_flags & M_EXT) == 0);
1539 n->m_data = m->m_data + len;
1540 m->m_ext.ext_ref(m->m_ext.ext_arg);
1541 n->m_ext = m->m_ext;
1542 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1544 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1548 n->m_next = m->m_next;
1554 * Routine to copy from device local memory into mbufs.
1555 * Note: "offset" is ill-defined and always called as 0, so ignore it.
1558 m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1559 void (*copy)(volatile const void *from, volatile void *to, size_t length))
1561 struct mbuf *m, *mfirst = NULL, **mtail;
1570 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1575 m->m_len = min(len, nsize);
1577 if (flags & M_PKTHDR) {
1578 if (len + max_linkhdr <= nsize)
1579 m->m_data += max_linkhdr;
1580 m->m_pkthdr.rcvif = ifp;
1581 m->m_pkthdr.len = len;
1585 copy(buf, m->m_data, (unsigned)m->m_len);
1596 * Routine to pad mbuf to the specified length 'padto'.
1599 m_devpad(struct mbuf *m, int padto)
1601 struct mbuf *last = NULL;
1604 if (padto <= m->m_pkthdr.len)
1607 padlen = padto - m->m_pkthdr.len;
1609 /* if there's only the packet-header and we can pad there, use it. */
1610 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1614 * Walk packet chain to find last mbuf. We will either
1615 * pad there, or append a new mbuf and pad it
1617 for (last = m; last->m_next != NULL; last = last->m_next)
1620 /* `last' now points to last in chain. */
1621 if (M_TRAILINGSPACE(last) < padlen) {
1624 /* Allocate new empty mbuf, pad it. Compact later. */
1625 MGET(n, MB_DONTWAIT, MT_DATA);
1633 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1634 KKASSERT(M_WRITABLE(last));
1636 /* Now zero the pad area */
1637 bzero(mtod(last, char *) + last->m_len, padlen);
1638 last->m_len += padlen;
1639 m->m_pkthdr.len += padlen;
1644 * Copy data from a buffer back into the indicated mbuf chain,
1645 * starting "off" bytes from the beginning, extending the mbuf
1646 * chain if necessary.
1649 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1652 struct mbuf *m = m0, *n;
1657 while (off > (mlen = m->m_len)) {
1660 if (m->m_next == NULL) {
1661 n = m_getclr(MB_DONTWAIT, m->m_type);
1664 n->m_len = min(MLEN, len + off);
1670 mlen = min (m->m_len - off, len);
1671 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1679 if (m->m_next == NULL) {
1680 n = m_get(MB_DONTWAIT, m->m_type);
1683 n->m_len = min(MLEN, len);
1688 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1689 m->m_pkthdr.len = totlen;
1693 m_print(const struct mbuf *m)
1696 const struct mbuf *m2;
1698 len = m->m_pkthdr.len;
1701 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1709 * "Move" mbuf pkthdr from "from" to "to".
1710 * "from" must have M_PKTHDR set, and "to" must be empty.
1713 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1715 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1717 to->m_flags |= from->m_flags & M_COPYFLAGS;
1718 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1719 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1723 * Duplicate "from"'s mbuf pkthdr in "to".
1724 * "from" must have M_PKTHDR set, and "to" must be empty.
1725 * In particular, this does a deep copy of the packet tags.
1728 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1730 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1732 to->m_flags = (from->m_flags & M_COPYFLAGS) |
1733 (to->m_flags & ~M_COPYFLAGS);
1734 to->m_pkthdr = from->m_pkthdr;
1735 SLIST_INIT(&to->m_pkthdr.tags);
1736 return (m_tag_copy_chain(to, from, how));
1740 * Defragment a mbuf chain, returning the shortest possible
1741 * chain of mbufs and clusters. If allocation fails and
1742 * this cannot be completed, NULL will be returned, but
1743 * the passed in chain will be unchanged. Upon success,
1744 * the original chain will be freed, and the new chain
1747 * If a non-packet header is passed in, the original
1748 * mbuf (chain?) will be returned unharmed.
1750 * m_defrag_nofree doesn't free the passed in mbuf.
1753 m_defrag(struct mbuf *m0, int how)
1757 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1765 m_defrag_nofree(struct mbuf *m0, int how)
1767 struct mbuf *m_new = NULL, *m_final = NULL;
1768 int progress = 0, length, nsize;
1770 if (!(m0->m_flags & M_PKTHDR))
1773 #ifdef MBUF_STRESS_TEST
1774 if (m_defragrandomfailures) {
1775 int temp = karc4random() & 0xff;
1781 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1782 if (m_final == NULL)
1784 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
1786 if (m_dup_pkthdr(m_final, m0, how) == 0)
1791 while (progress < m0->m_pkthdr.len) {
1792 length = m0->m_pkthdr.len - progress;
1793 if (length > MCLBYTES)
1796 if (m_new == NULL) {
1797 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
1802 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1804 m_new->m_len = length;
1805 if (m_new != m_final)
1806 m_cat(m_final, m_new);
1809 if (m0->m_next == NULL)
1812 m_defragbytes += m_final->m_pkthdr.len;
1823 * Move data from uio into mbufs.
1826 m_uiomove(struct uio *uio)
1828 struct mbuf *m; /* current working mbuf */
1829 struct mbuf *head = NULL; /* result mbuf chain */
1830 struct mbuf **mp = &head;
1831 int resid = uio->uio_resid, nsize, flags = M_PKTHDR, error;
1834 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
1836 m->m_pkthdr.len = 0;
1837 /* Leave room for protocol headers. */
1842 m->m_len = min(nsize, resid);
1843 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
1850 head->m_pkthdr.len += m->m_len;
1852 } while (resid > 0);
1862 m_last(struct mbuf *m)
1870 * Return the number of bytes in an mbuf chain.
1871 * If lastm is not NULL, also return the last mbuf.
1874 m_lengthm(struct mbuf *m, struct mbuf **lastm)
1877 struct mbuf *prev = m;
1890 * Like m_lengthm(), except also keep track of mbuf usage.
1893 m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
1895 u_int len = 0, mbcnt = 0;
1896 struct mbuf *prev = m;
1901 if (m->m_flags & M_EXT)
1902 mbcnt += m->m_ext.ext_size;