kernel - Make interrupt thread preemption programmable
[dragonfly.git] / sys / kern / uipc_mbuf.c
... / ...
CommitLineData
1/*
2 * (MPSAFE)
3 *
4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey M. Hsu.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36/*
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
70 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
71 */
72
73#include "opt_param.h"
74#include "opt_mbuf_stress_test.h"
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/malloc.h>
78#include <sys/mbuf.h>
79#include <sys/kernel.h>
80#include <sys/sysctl.h>
81#include <sys/domain.h>
82#include <sys/objcache.h>
83#include <sys/tree.h>
84#include <sys/protosw.h>
85#include <sys/uio.h>
86#include <sys/thread.h>
87#include <sys/globaldata.h>
88
89#include <sys/thread2.h>
90#include <sys/spinlock2.h>
91
92#include <machine/atomic.h>
93#include <machine/limits.h>
94
95#include <vm/vm.h>
96#include <vm/vm_kern.h>
97#include <vm/vm_extern.h>
98
99#ifdef INVARIANTS
100#include <machine/cpu.h>
101#endif
102
103/*
104 * mbuf cluster meta-data
105 */
106struct mbcluster {
107 int32_t mcl_refs;
108 void *mcl_data;
109};
110
111/*
112 * mbuf tracking for debugging purposes
113 */
114#ifdef MBUF_DEBUG
115
116static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
117
118struct mbctrack;
119RB_HEAD(mbuf_rb_tree, mbtrack);
120RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
121
122struct mbtrack {
123 RB_ENTRY(mbtrack) rb_node;
124 int trackid;
125 struct mbuf *m;
126};
127
128static int
129mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
130{
131 if (mb1->m < mb2->m)
132 return(-1);
133 if (mb1->m > mb2->m)
134 return(1);
135 return(0);
136}
137
138RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
139
140struct mbuf_rb_tree mbuf_track_root;
141static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
142
143static void
144mbuftrack(struct mbuf *m)
145{
146 struct mbtrack *mbt;
147
148 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
149 spin_lock(&mbuf_track_spin);
150 mbt->m = m;
151 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
152 spin_unlock(&mbuf_track_spin);
153 panic("mbuftrack: mbuf %p already being tracked\n", m);
154 }
155 spin_unlock(&mbuf_track_spin);
156}
157
158static void
159mbufuntrack(struct mbuf *m)
160{
161 struct mbtrack *mbt;
162
163 spin_lock(&mbuf_track_spin);
164 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
165 if (mbt == NULL) {
166 spin_unlock(&mbuf_track_spin);
167 panic("mbufuntrack: mbuf %p was not tracked\n", m);
168 } else {
169 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
170 spin_unlock(&mbuf_track_spin);
171 kfree(mbt, M_MTRACK);
172 }
173}
174
175void
176mbuftrackid(struct mbuf *m, int trackid)
177{
178 struct mbtrack *mbt;
179 struct mbuf *n;
180
181 spin_lock(&mbuf_track_spin);
182 while (m) {
183 n = m->m_nextpkt;
184 while (m) {
185 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
186 if (mbt == NULL) {
187 spin_unlock(&mbuf_track_spin);
188 panic("mbuftrackid: mbuf %p not tracked", m);
189 }
190 mbt->trackid = trackid;
191 m = m->m_next;
192 }
193 m = n;
194 }
195 spin_unlock(&mbuf_track_spin);
196}
197
198static int
199mbuftrack_callback(struct mbtrack *mbt, void *arg)
200{
201 struct sysctl_req *req = arg;
202 char buf[64];
203 int error;
204
205 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
206
207 spin_unlock(&mbuf_track_spin);
208 error = SYSCTL_OUT(req, buf, strlen(buf));
209 spin_lock(&mbuf_track_spin);
210 if (error)
211 return(-error);
212 return(0);
213}
214
215static int
216mbuftrack_show(SYSCTL_HANDLER_ARGS)
217{
218 int error;
219
220 spin_lock(&mbuf_track_spin);
221 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
222 mbuftrack_callback, req);
223 spin_unlock(&mbuf_track_spin);
224 return (-error);
225}
226SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
227 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
228
229#else
230
231#define mbuftrack(m)
232#define mbufuntrack(m)
233
234#endif
235
236static void mbinit(void *);
237SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
238
239static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
240
241static struct mbstat mbstat[SMP_MAXCPU];
242int max_linkhdr;
243int max_protohdr;
244int max_hdr;
245int max_datalen;
246int m_defragpackets;
247int m_defragbytes;
248int m_defraguseless;
249int m_defragfailure;
250#ifdef MBUF_STRESS_TEST
251int m_defragrandomfailures;
252#endif
253
254struct objcache *mbuf_cache, *mbufphdr_cache;
255struct objcache *mclmeta_cache;
256struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
257
258int nmbclusters;
259int nmbufs;
260
261SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
262 &max_linkhdr, 0, "");
263SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
264 &max_protohdr, 0, "");
265SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
266SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
267 &max_datalen, 0, "");
268SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
269 &mbuf_wait, 0, "");
270static int do_mbstat(SYSCTL_HANDLER_ARGS);
271
272SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
273 0, 0, do_mbstat, "S,mbstat", "");
274
275static int do_mbtypes(SYSCTL_HANDLER_ARGS);
276
277SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
278 0, 0, do_mbtypes, "LU", "");
279
280static int
281do_mbstat(SYSCTL_HANDLER_ARGS)
282{
283 struct mbstat mbstat_total;
284 struct mbstat *mbstat_totalp;
285 int i;
286
287 bzero(&mbstat_total, sizeof(mbstat_total));
288 mbstat_totalp = &mbstat_total;
289
290 for (i = 0; i < ncpus; i++)
291 {
292 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
293 mbstat_total.m_clusters += mbstat[i].m_clusters;
294 mbstat_total.m_spare += mbstat[i].m_spare;
295 mbstat_total.m_clfree += mbstat[i].m_clfree;
296 mbstat_total.m_drops += mbstat[i].m_drops;
297 mbstat_total.m_wait += mbstat[i].m_wait;
298 mbstat_total.m_drain += mbstat[i].m_drain;
299 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
300 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
301
302 }
303 /*
304 * The following fields are not cumulative fields so just
305 * get their values once.
306 */
307 mbstat_total.m_msize = mbstat[0].m_msize;
308 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
309 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
310 mbstat_total.m_mlen = mbstat[0].m_mlen;
311 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
312
313 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
314}
315
316static int
317do_mbtypes(SYSCTL_HANDLER_ARGS)
318{
319 u_long totals[MT_NTYPES];
320 int i, j;
321
322 for (i = 0; i < MT_NTYPES; i++)
323 totals[i] = 0;
324
325 for (i = 0; i < ncpus; i++)
326 {
327 for (j = 0; j < MT_NTYPES; j++)
328 totals[j] += mbtypes[i][j];
329 }
330
331 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
332}
333
334/*
335 * These are read-only because we do not currently have any code
336 * to adjust the objcache limits after the fact. The variables
337 * may only be set as boot-time tunables.
338 */
339SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
340 &nmbclusters, 0, "Maximum number of mbuf clusters available");
341SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
342 "Maximum number of mbufs available");
343
344SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
345 &m_defragpackets, 0, "");
346SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
347 &m_defragbytes, 0, "");
348SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
349 &m_defraguseless, 0, "");
350SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
351 &m_defragfailure, 0, "");
352#ifdef MBUF_STRESS_TEST
353SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
354 &m_defragrandomfailures, 0, "");
355#endif
356
357static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
358static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
359static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
360
361static void m_reclaim (void);
362static void m_mclref(void *arg);
363static void m_mclfree(void *arg);
364
365#ifndef NMBCLUSTERS
366#define NMBCLUSTERS (512 + maxusers * 16)
367#endif
368#ifndef NMBUFS
369#define NMBUFS (nmbclusters * 2)
370#endif
371
372/*
373 * Perform sanity checks of tunables declared above.
374 */
375static void
376tunable_mbinit(void *dummy)
377{
378 /*
379 * This has to be done before VM init.
380 */
381 nmbclusters = NMBCLUSTERS;
382 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
383 nmbufs = NMBUFS;
384 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
385 /* Sanity checks */
386 if (nmbufs < nmbclusters * 2)
387 nmbufs = nmbclusters * 2;
388}
389SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
390 tunable_mbinit, NULL);
391
392/* "number of clusters of pages" */
393#define NCL_INIT 1
394
395#define NMB_INIT 16
396
397/*
398 * The mbuf object cache only guarantees that m_next and m_nextpkt are
399 * NULL and that m_data points to the beginning of the data area. In
400 * particular, m_len and m_pkthdr.len are uninitialized. It is the
401 * responsibility of the caller to initialize those fields before use.
402 */
403
404static boolean_t __inline
405mbuf_ctor(void *obj, void *private, int ocflags)
406{
407 struct mbuf *m = obj;
408
409 m->m_next = NULL;
410 m->m_nextpkt = NULL;
411 m->m_data = m->m_dat;
412 m->m_flags = 0;
413
414 return (TRUE);
415}
416
417/*
418 * Initialize the mbuf and the packet header fields.
419 */
420static boolean_t
421mbufphdr_ctor(void *obj, void *private, int ocflags)
422{
423 struct mbuf *m = obj;
424
425 m->m_next = NULL;
426 m->m_nextpkt = NULL;
427 m->m_data = m->m_pktdat;
428 m->m_flags = M_PKTHDR | M_PHCACHE;
429
430 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
431 SLIST_INIT(&m->m_pkthdr.tags);
432 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
433 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
434
435 return (TRUE);
436}
437
438/*
439 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
440 */
441static boolean_t
442mclmeta_ctor(void *obj, void *private, int ocflags)
443{
444 struct mbcluster *cl = obj;
445 void *buf;
446
447 if (ocflags & M_NOWAIT)
448 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
449 else
450 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
451 if (buf == NULL)
452 return (FALSE);
453 cl->mcl_refs = 0;
454 cl->mcl_data = buf;
455 return (TRUE);
456}
457
458static void
459mclmeta_dtor(void *obj, void *private)
460{
461 struct mbcluster *mcl = obj;
462
463 KKASSERT(mcl->mcl_refs == 0);
464 kfree(mcl->mcl_data, M_MBUFCL);
465}
466
467static void
468linkcluster(struct mbuf *m, struct mbcluster *cl)
469{
470 /*
471 * Add the cluster to the mbuf. The caller will detect that the
472 * mbuf now has an attached cluster.
473 */
474 m->m_ext.ext_arg = cl;
475 m->m_ext.ext_buf = cl->mcl_data;
476 m->m_ext.ext_ref = m_mclref;
477 m->m_ext.ext_free = m_mclfree;
478 m->m_ext.ext_size = MCLBYTES;
479 atomic_add_int(&cl->mcl_refs, 1);
480
481 m->m_data = m->m_ext.ext_buf;
482 m->m_flags |= M_EXT | M_EXT_CLUSTER;
483}
484
485static boolean_t
486mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
487{
488 struct mbuf *m = obj;
489 struct mbcluster *cl;
490
491 mbufphdr_ctor(obj, private, ocflags);
492 cl = objcache_get(mclmeta_cache, ocflags);
493 if (cl == NULL) {
494 ++mbstat[mycpu->gd_cpuid].m_drops;
495 return (FALSE);
496 }
497 m->m_flags |= M_CLCACHE;
498 linkcluster(m, cl);
499 return (TRUE);
500}
501
502static boolean_t
503mbufcluster_ctor(void *obj, void *private, int ocflags)
504{
505 struct mbuf *m = obj;
506 struct mbcluster *cl;
507
508 mbuf_ctor(obj, private, ocflags);
509 cl = objcache_get(mclmeta_cache, ocflags);
510 if (cl == NULL) {
511 ++mbstat[mycpu->gd_cpuid].m_drops;
512 return (FALSE);
513 }
514 m->m_flags |= M_CLCACHE;
515 linkcluster(m, cl);
516 return (TRUE);
517}
518
519/*
520 * Used for both the cluster and cluster PHDR caches.
521 *
522 * The mbuf may have lost its cluster due to sharing, deal
523 * with the situation by checking M_EXT.
524 */
525static void
526mbufcluster_dtor(void *obj, void *private)
527{
528 struct mbuf *m = obj;
529 struct mbcluster *mcl;
530
531 if (m->m_flags & M_EXT) {
532 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
533 mcl = m->m_ext.ext_arg;
534 KKASSERT(mcl->mcl_refs == 1);
535 mcl->mcl_refs = 0;
536 objcache_put(mclmeta_cache, mcl);
537 }
538}
539
540struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
541struct objcache_malloc_args mclmeta_malloc_args =
542 { sizeof(struct mbcluster), M_MCLMETA };
543
544/* ARGSUSED*/
545static void
546mbinit(void *dummy)
547{
548 int mb_limit, cl_limit;
549 int limit;
550 int i;
551
552 /*
553 * Initialize statistics
554 */
555 for (i = 0; i < ncpus; i++) {
556 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
557 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
558 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
559 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
560 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
561 }
562
563 /*
564 * Create objtect caches and save cluster limits, which will
565 * be used to adjust backing kmalloc pools' limit later.
566 */
567
568 mb_limit = cl_limit = 0;
569
570 limit = nmbufs;
571 mbuf_cache = objcache_create("mbuf", &limit, 0,
572 mbuf_ctor, NULL, NULL,
573 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
574 mb_limit += limit;
575
576 limit = nmbufs;
577 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
578 mbufphdr_ctor, NULL, NULL,
579 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
580 mb_limit += limit;
581
582 cl_limit = nmbclusters;
583 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
584 mclmeta_ctor, mclmeta_dtor, NULL,
585 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
586
587 limit = nmbclusters;
588 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
589 mbufcluster_ctor, mbufcluster_dtor, NULL,
590 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
591 mb_limit += limit;
592
593 limit = nmbclusters;
594 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
595 &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
596 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
597 mb_limit += limit;
598
599 /*
600 * Adjust backing kmalloc pools' limit
601 *
602 * NOTE: We raise the limit by another 1/8 to take the effect
603 * of loosememuse into account.
604 */
605 cl_limit += cl_limit / 8;
606 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
607 mclmeta_malloc_args.objsize * cl_limit);
608 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
609
610 mb_limit += mb_limit / 8;
611 kmalloc_raise_limit(mbuf_malloc_args.mtype,
612 mbuf_malloc_args.objsize * mb_limit);
613}
614
615/*
616 * Return the number of references to this mbuf's data. 0 is returned
617 * if the mbuf is not M_EXT, a reference count is returned if it is
618 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
619 */
620int
621m_sharecount(struct mbuf *m)
622{
623 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
624 case 0:
625 return (0);
626 case M_EXT:
627 return (99);
628 case M_EXT | M_EXT_CLUSTER:
629 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
630 }
631 /* NOTREACHED */
632 return (0); /* to shut up compiler */
633}
634
635/*
636 * change mbuf to new type
637 */
638void
639m_chtype(struct mbuf *m, int type)
640{
641 struct globaldata *gd = mycpu;
642
643 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
644 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
645 atomic_set_short_nonlocked(&m->m_type, type);
646}
647
648static void
649m_reclaim(void)
650{
651 struct domain *dp;
652 struct protosw *pr;
653
654 kprintf("Debug: m_reclaim() called\n");
655
656 SLIST_FOREACH(dp, &domains, dom_next) {
657 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
658 if (pr->pr_drain)
659 (*pr->pr_drain)();
660 }
661 }
662 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
663}
664
665static void __inline
666updatestats(struct mbuf *m, int type)
667{
668 struct globaldata *gd = mycpu;
669
670 m->m_type = type;
671 mbuftrack(m);
672 KKASSERT(m->m_next == NULL);
673 KKASSERT(m->m_nextpkt == NULL);
674
675 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
676 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
677
678}
679
680/*
681 * Allocate an mbuf.
682 */
683struct mbuf *
684m_get(int how, int type)
685{
686 struct mbuf *m;
687 int ntries = 0;
688 int ocf = MBTOM(how);
689
690retryonce:
691
692 m = objcache_get(mbuf_cache, ocf);
693
694 if (m == NULL) {
695 if ((how & MB_TRYWAIT) && ntries++ == 0) {
696 struct objcache *reclaimlist[] = {
697 mbufphdr_cache,
698 mbufcluster_cache,
699 mbufphdrcluster_cache
700 };
701 const int nreclaims = __arysize(reclaimlist);
702
703 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
704 m_reclaim();
705 goto retryonce;
706 }
707 ++mbstat[mycpu->gd_cpuid].m_drops;
708 return (NULL);
709 }
710
711 updatestats(m, type);
712 return (m);
713}
714
715struct mbuf *
716m_gethdr(int how, int type)
717{
718 struct mbuf *m;
719 int ocf = MBTOM(how);
720 int ntries = 0;
721
722retryonce:
723
724 m = objcache_get(mbufphdr_cache, ocf);
725
726 if (m == NULL) {
727 if ((how & MB_TRYWAIT) && ntries++ == 0) {
728 struct objcache *reclaimlist[] = {
729 mbuf_cache,
730 mbufcluster_cache, mbufphdrcluster_cache
731 };
732 const int nreclaims = __arysize(reclaimlist);
733
734 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
735 m_reclaim();
736 goto retryonce;
737 }
738 ++mbstat[mycpu->gd_cpuid].m_drops;
739 return (NULL);
740 }
741
742 updatestats(m, type);
743 return (m);
744}
745
746/*
747 * Get a mbuf (not a mbuf cluster!) and zero it.
748 * Deprecated.
749 */
750struct mbuf *
751m_getclr(int how, int type)
752{
753 struct mbuf *m;
754
755 m = m_get(how, type);
756 if (m != NULL)
757 bzero(m->m_data, MLEN);
758 return (m);
759}
760
761/*
762 * Returns an mbuf with an attached cluster.
763 * Because many network drivers use this kind of buffers a lot, it is
764 * convenient to keep a small pool of free buffers of this kind.
765 * Even a small size such as 10 gives about 10% improvement in the
766 * forwarding rate in a bridge or router.
767 */
768struct mbuf *
769m_getcl(int how, short type, int flags)
770{
771 struct mbuf *m;
772 int ocflags = MBTOM(how);
773 int ntries = 0;
774
775retryonce:
776
777 if (flags & M_PKTHDR)
778 m = objcache_get(mbufphdrcluster_cache, ocflags);
779 else
780 m = objcache_get(mbufcluster_cache, ocflags);
781
782 if (m == NULL) {
783 if ((how & MB_TRYWAIT) && ntries++ == 0) {
784 struct objcache *reclaimlist[1];
785
786 if (flags & M_PKTHDR)
787 reclaimlist[0] = mbufcluster_cache;
788 else
789 reclaimlist[0] = mbufphdrcluster_cache;
790 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
791 m_reclaim();
792 goto retryonce;
793 }
794 ++mbstat[mycpu->gd_cpuid].m_drops;
795 return (NULL);
796 }
797
798 m->m_type = type;
799
800 mbuftrack(m);
801
802 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
803 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
804 return (m);
805}
806
807/*
808 * Allocate chain of requested length.
809 */
810struct mbuf *
811m_getc(int len, int how, int type)
812{
813 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
814 int nsize;
815
816 while (len > 0) {
817 n = m_getl(len, how, type, 0, &nsize);
818 if (n == NULL)
819 goto failed;
820 n->m_len = 0;
821 *ntail = n;
822 ntail = &n->m_next;
823 len -= nsize;
824 }
825 return (nfirst);
826
827failed:
828 m_freem(nfirst);
829 return (NULL);
830}
831
832/*
833 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
834 * and return a pointer to the head of the allocated chain. If m0 is
835 * non-null, then we assume that it is a single mbuf or an mbuf chain to
836 * which we want len bytes worth of mbufs and/or clusters attached, and so
837 * if we succeed in allocating it, we will just return a pointer to m0.
838 *
839 * If we happen to fail at any point during the allocation, we will free
840 * up everything we have already allocated and return NULL.
841 *
842 * Deprecated. Use m_getc() and m_cat() instead.
843 */
844struct mbuf *
845m_getm(struct mbuf *m0, int len, int type, int how)
846{
847 struct mbuf *nfirst;
848
849 nfirst = m_getc(len, how, type);
850
851 if (m0 != NULL) {
852 m_last(m0)->m_next = nfirst;
853 return (m0);
854 }
855
856 return (nfirst);
857}
858
859/*
860 * Adds a cluster to a normal mbuf, M_EXT is set on success.
861 * Deprecated. Use m_getcl() instead.
862 */
863void
864m_mclget(struct mbuf *m, int how)
865{
866 struct mbcluster *mcl;
867
868 KKASSERT((m->m_flags & M_EXT) == 0);
869 mcl = objcache_get(mclmeta_cache, MBTOM(how));
870 if (mcl != NULL) {
871 linkcluster(m, mcl);
872 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters,
873 1);
874 } else {
875 ++mbstat[mycpu->gd_cpuid].m_drops;
876 }
877}
878
879/*
880 * Updates to mbcluster must be MPSAFE. Only an entity which already has
881 * a reference to the cluster can ref it, so we are in no danger of
882 * racing an add with a subtract. But the operation must still be atomic
883 * since multiple entities may have a reference on the cluster.
884 *
885 * m_mclfree() is almost the same but it must contend with two entities
886 * freeing the cluster at the same time.
887 */
888static void
889m_mclref(void *arg)
890{
891 struct mbcluster *mcl = arg;
892
893 atomic_add_int(&mcl->mcl_refs, 1);
894}
895
896/*
897 * When dereferencing a cluster we have to deal with a N->0 race, where
898 * N entities free their references simultaniously. To do this we use
899 * atomic_fetchadd_int().
900 */
901static void
902m_mclfree(void *arg)
903{
904 struct mbcluster *mcl = arg;
905
906 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
907 objcache_put(mclmeta_cache, mcl);
908}
909
910/*
911 * Free a single mbuf and any associated external storage. The successor,
912 * if any, is returned.
913 *
914 * We do need to check non-first mbuf for m_aux, since some of existing
915 * code does not call M_PREPEND properly.
916 * (example: call to bpf_mtap from drivers)
917 */
918struct mbuf *
919m_free(struct mbuf *m)
920{
921 struct mbuf *n;
922 struct globaldata *gd = mycpu;
923
924 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
925 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
926 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
927
928 n = m->m_next;
929
930 /*
931 * Make sure the mbuf is in constructed state before returning it
932 * to the objcache.
933 */
934 m->m_next = NULL;
935 mbufuntrack(m);
936#ifdef notyet
937 KKASSERT(m->m_nextpkt == NULL);
938#else
939 if (m->m_nextpkt != NULL) {
940 static int afewtimes = 10;
941
942 if (afewtimes-- > 0) {
943 kprintf("mfree: m->m_nextpkt != NULL\n");
944 print_backtrace(-1);
945 }
946 m->m_nextpkt = NULL;
947 }
948#endif
949 if (m->m_flags & M_PKTHDR) {
950 m_tag_delete_chain(m); /* eliminate XXX JH */
951 }
952
953 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
954
955 /*
956 * Clean the M_PKTHDR state so we can return the mbuf to its original
957 * cache. This is based on the PHCACHE flag which tells us whether
958 * the mbuf was originally allocated out of a packet-header cache
959 * or a non-packet-header cache.
960 */
961 if (m->m_flags & M_PHCACHE) {
962 m->m_flags |= M_PKTHDR;
963 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
964 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
965 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
966 SLIST_INIT(&m->m_pkthdr.tags);
967 }
968
969 /*
970 * Handle remaining flags combinations. M_CLCACHE tells us whether
971 * the mbuf was originally allocated from a cluster cache or not,
972 * and is totally separate from whether the mbuf is currently
973 * associated with a cluster.
974 */
975 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
976 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
977 /*
978 * mbuf+cluster cache case. The mbuf was allocated from the
979 * combined mbuf_cluster cache and can be returned to the
980 * cache if the cluster hasn't been shared.
981 */
982 if (m_sharecount(m) == 1) {
983 /*
984 * The cluster has not been shared, we can just
985 * reset the data pointer and return the mbuf
986 * to the cluster cache. Note that the reference
987 * count is left intact (it is still associated with
988 * an mbuf).
989 */
990 m->m_data = m->m_ext.ext_buf;
991 if (m->m_flags & M_PHCACHE)
992 objcache_put(mbufphdrcluster_cache, m);
993 else
994 objcache_put(mbufcluster_cache, m);
995 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
996 } else {
997 /*
998 * Hell. Someone else has a ref on this cluster,
999 * we have to disconnect it which means we can't
1000 * put it back into the mbufcluster_cache, we
1001 * have to destroy the mbuf.
1002 *
1003 * Other mbuf references to the cluster will typically
1004 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1005 *
1006 * XXX we could try to connect another cluster to
1007 * it.
1008 */
1009 m->m_ext.ext_free(m->m_ext.ext_arg);
1010 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1011 if (m->m_flags & M_PHCACHE)
1012 objcache_dtor(mbufphdrcluster_cache, m);
1013 else
1014 objcache_dtor(mbufcluster_cache, m);
1015 }
1016 break;
1017 case M_EXT | M_EXT_CLUSTER:
1018 /*
1019 * Normal cluster associated with an mbuf that was allocated
1020 * from the normal mbuf pool rather then the cluster pool.
1021 * The cluster has to be independantly disassociated from the
1022 * mbuf.
1023 */
1024 if (m_sharecount(m) == 1)
1025 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
1026 /* fall through */
1027 case M_EXT:
1028 /*
1029 * Normal cluster association case, disconnect the cluster from
1030 * the mbuf. The cluster may or may not be custom.
1031 */
1032 m->m_ext.ext_free(m->m_ext.ext_arg);
1033 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1034 /* fall through */
1035 case 0:
1036 /*
1037 * return the mbuf to the mbuf cache.
1038 */
1039 if (m->m_flags & M_PHCACHE) {
1040 m->m_data = m->m_pktdat;
1041 objcache_put(mbufphdr_cache, m);
1042 } else {
1043 m->m_data = m->m_dat;
1044 objcache_put(mbuf_cache, m);
1045 }
1046 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
1047 break;
1048 default:
1049 if (!panicstr)
1050 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1051 break;
1052 }
1053 return (n);
1054}
1055
1056void
1057m_freem(struct mbuf *m)
1058{
1059 while (m)
1060 m = m_free(m);
1061}
1062
1063/*
1064 * mbuf utility routines
1065 */
1066
1067/*
1068 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1069 * copy junk along.
1070 */
1071struct mbuf *
1072m_prepend(struct mbuf *m, int len, int how)
1073{
1074 struct mbuf *mn;
1075
1076 if (m->m_flags & M_PKTHDR)
1077 mn = m_gethdr(how, m->m_type);
1078 else
1079 mn = m_get(how, m->m_type);
1080 if (mn == NULL) {
1081 m_freem(m);
1082 return (NULL);
1083 }
1084 if (m->m_flags & M_PKTHDR)
1085 M_MOVE_PKTHDR(mn, m);
1086 mn->m_next = m;
1087 m = mn;
1088 if (len < MHLEN)
1089 MH_ALIGN(m, len);
1090 m->m_len = len;
1091 return (m);
1092}
1093
1094/*
1095 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1096 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1097 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1098 * Note that the copy is read-only, because clusters are not copied,
1099 * only their reference counts are incremented.
1100 */
1101struct mbuf *
1102m_copym(const struct mbuf *m, int off0, int len, int wait)
1103{
1104 struct mbuf *n, **np;
1105 int off = off0;
1106 struct mbuf *top;
1107 int copyhdr = 0;
1108
1109 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1110 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1111 if (off == 0 && (m->m_flags & M_PKTHDR))
1112 copyhdr = 1;
1113 while (off > 0) {
1114 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1115 if (off < m->m_len)
1116 break;
1117 off -= m->m_len;
1118 m = m->m_next;
1119 }
1120 np = &top;
1121 top = NULL;
1122 while (len > 0) {
1123 if (m == NULL) {
1124 KASSERT(len == M_COPYALL,
1125 ("m_copym, length > size of mbuf chain"));
1126 break;
1127 }
1128 /*
1129 * Because we are sharing any cluster attachment below,
1130 * be sure to get an mbuf that does not have a cluster
1131 * associated with it.
1132 */
1133 if (copyhdr)
1134 n = m_gethdr(wait, m->m_type);
1135 else
1136 n = m_get(wait, m->m_type);
1137 *np = n;
1138 if (n == NULL)
1139 goto nospace;
1140 if (copyhdr) {
1141 if (!m_dup_pkthdr(n, m, wait))
1142 goto nospace;
1143 if (len == M_COPYALL)
1144 n->m_pkthdr.len -= off0;
1145 else
1146 n->m_pkthdr.len = len;
1147 copyhdr = 0;
1148 }
1149 n->m_len = min(len, m->m_len - off);
1150 if (m->m_flags & M_EXT) {
1151 KKASSERT((n->m_flags & M_EXT) == 0);
1152 n->m_data = m->m_data + off;
1153 m->m_ext.ext_ref(m->m_ext.ext_arg);
1154 n->m_ext = m->m_ext;
1155 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1156 } else {
1157 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1158 (unsigned)n->m_len);
1159 }
1160 if (len != M_COPYALL)
1161 len -= n->m_len;
1162 off = 0;
1163 m = m->m_next;
1164 np = &n->m_next;
1165 }
1166 if (top == NULL)
1167 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1168 return (top);
1169nospace:
1170 m_freem(top);
1171 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1172 return (NULL);
1173}
1174
1175/*
1176 * Copy an entire packet, including header (which must be present).
1177 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1178 * Note that the copy is read-only, because clusters are not copied,
1179 * only their reference counts are incremented.
1180 * Preserve alignment of the first mbuf so if the creator has left
1181 * some room at the beginning (e.g. for inserting protocol headers)
1182 * the copies also have the room available.
1183 */
1184struct mbuf *
1185m_copypacket(struct mbuf *m, int how)
1186{
1187 struct mbuf *top, *n, *o;
1188
1189 n = m_gethdr(how, m->m_type);
1190 top = n;
1191 if (!n)
1192 goto nospace;
1193
1194 if (!m_dup_pkthdr(n, m, how))
1195 goto nospace;
1196 n->m_len = m->m_len;
1197 if (m->m_flags & M_EXT) {
1198 KKASSERT((n->m_flags & M_EXT) == 0);
1199 n->m_data = m->m_data;
1200 m->m_ext.ext_ref(m->m_ext.ext_arg);
1201 n->m_ext = m->m_ext;
1202 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1203 } else {
1204 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1205 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1206 }
1207
1208 m = m->m_next;
1209 while (m) {
1210 o = m_get(how, m->m_type);
1211 if (!o)
1212 goto nospace;
1213
1214 n->m_next = o;
1215 n = n->m_next;
1216
1217 n->m_len = m->m_len;
1218 if (m->m_flags & M_EXT) {
1219 KKASSERT((n->m_flags & M_EXT) == 0);
1220 n->m_data = m->m_data;
1221 m->m_ext.ext_ref(m->m_ext.ext_arg);
1222 n->m_ext = m->m_ext;
1223 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1224 } else {
1225 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1226 }
1227
1228 m = m->m_next;
1229 }
1230 return top;
1231nospace:
1232 m_freem(top);
1233 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1234 return (NULL);
1235}
1236
1237/*
1238 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1239 * continuing for "len" bytes, into the indicated buffer.
1240 */
1241void
1242m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
1243{
1244 unsigned count;
1245
1246 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1247 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1248 while (off > 0) {
1249 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1250 if (off < m->m_len)
1251 break;
1252 off -= m->m_len;
1253 m = m->m_next;
1254 }
1255 while (len > 0) {
1256 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1257 count = min(m->m_len - off, len);
1258 bcopy(mtod(m, caddr_t) + off, cp, count);
1259 len -= count;
1260 cp += count;
1261 off = 0;
1262 m = m->m_next;
1263 }
1264}
1265
1266/*
1267 * Copy a packet header mbuf chain into a completely new chain, including
1268 * copying any mbuf clusters. Use this instead of m_copypacket() when
1269 * you need a writable copy of an mbuf chain.
1270 */
1271struct mbuf *
1272m_dup(struct mbuf *m, int how)
1273{
1274 struct mbuf **p, *top = NULL;
1275 int remain, moff, nsize;
1276
1277 /* Sanity check */
1278 if (m == NULL)
1279 return (NULL);
1280 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
1281
1282 /* While there's more data, get a new mbuf, tack it on, and fill it */
1283 remain = m->m_pkthdr.len;
1284 moff = 0;
1285 p = &top;
1286 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1287 struct mbuf *n;
1288
1289 /* Get the next new mbuf */
1290 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1291 &nsize);
1292 if (n == NULL)
1293 goto nospace;
1294 if (top == NULL)
1295 if (!m_dup_pkthdr(n, m, how))
1296 goto nospace0;
1297
1298 /* Link it into the new chain */
1299 *p = n;
1300 p = &n->m_next;
1301
1302 /* Copy data from original mbuf(s) into new mbuf */
1303 n->m_len = 0;
1304 while (n->m_len < nsize && m != NULL) {
1305 int chunk = min(nsize - n->m_len, m->m_len - moff);
1306
1307 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1308 moff += chunk;
1309 n->m_len += chunk;
1310 remain -= chunk;
1311 if (moff == m->m_len) {
1312 m = m->m_next;
1313 moff = 0;
1314 }
1315 }
1316
1317 /* Check correct total mbuf length */
1318 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1319 ("%s: bogus m_pkthdr.len", __func__));
1320 }
1321 return (top);
1322
1323nospace:
1324 m_freem(top);
1325nospace0:
1326 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1327 return (NULL);
1328}
1329
1330/*
1331 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1332 * copying any mbuf clusters. This is typically used to realign a data
1333 * chain by nfs_realign().
1334 *
1335 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT
1336 * and NULL can be returned if MB_DONTWAIT is passed.
1337 *
1338 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1339 * cluster mbufs can exhaust our supply of mbufs.
1340 */
1341struct mbuf *
1342m_dup_data(struct mbuf *m, int how)
1343{
1344 struct mbuf **p, *n, *top = NULL;
1345 int mlen, moff, chunk, gsize, nsize;
1346
1347 /*
1348 * Degenerate case
1349 */
1350 if (m == NULL)
1351 return (NULL);
1352
1353 /*
1354 * Optimize the mbuf allocation but do not get too carried away.
1355 */
1356 if (m->m_next || m->m_len > MLEN)
1357 gsize = MCLBYTES;
1358 else
1359 gsize = MLEN;
1360
1361 /* Chain control */
1362 p = &top;
1363 n = NULL;
1364 nsize = 0;
1365
1366 /*
1367 * Scan the mbuf chain until nothing is left, the new mbuf chain
1368 * will be allocated on the fly as needed.
1369 */
1370 while (m) {
1371 mlen = m->m_len;
1372 moff = 0;
1373
1374 while (mlen) {
1375 KKASSERT(m->m_type == MT_DATA);
1376 if (n == NULL) {
1377 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1378 n->m_len = 0;
1379 if (n == NULL)
1380 goto nospace;
1381 *p = n;
1382 p = &n->m_next;
1383 }
1384 chunk = imin(mlen, nsize);
1385 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1386 mlen -= chunk;
1387 moff += chunk;
1388 n->m_len += chunk;
1389 nsize -= chunk;
1390 if (nsize == 0)
1391 n = NULL;
1392 }
1393 m = m->m_next;
1394 }
1395 *p = NULL;
1396 return(top);
1397nospace:
1398 *p = NULL;
1399 m_freem(top);
1400 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1401 return (NULL);
1402}
1403
1404/*
1405 * Concatenate mbuf chain n to m.
1406 * Both chains must be of the same type (e.g. MT_DATA).
1407 * Any m_pkthdr is not updated.
1408 */
1409void
1410m_cat(struct mbuf *m, struct mbuf *n)
1411{
1412 m = m_last(m);
1413 while (n) {
1414 if (m->m_flags & M_EXT ||
1415 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1416 /* just join the two chains */
1417 m->m_next = n;
1418 return;
1419 }
1420 /* splat the data from one into the other */
1421 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1422 (u_int)n->m_len);
1423 m->m_len += n->m_len;
1424 n = m_free(n);
1425 }
1426}
1427
1428void
1429m_adj(struct mbuf *mp, int req_len)
1430{
1431 int len = req_len;
1432 struct mbuf *m;
1433 int count;
1434
1435 if ((m = mp) == NULL)
1436 return;
1437 if (len >= 0) {
1438 /*
1439 * Trim from head.
1440 */
1441 while (m != NULL && len > 0) {
1442 if (m->m_len <= len) {
1443 len -= m->m_len;
1444 m->m_len = 0;
1445 m = m->m_next;
1446 } else {
1447 m->m_len -= len;
1448 m->m_data += len;
1449 len = 0;
1450 }
1451 }
1452 m = mp;
1453 if (mp->m_flags & M_PKTHDR)
1454 m->m_pkthdr.len -= (req_len - len);
1455 } else {
1456 /*
1457 * Trim from tail. Scan the mbuf chain,
1458 * calculating its length and finding the last mbuf.
1459 * If the adjustment only affects this mbuf, then just
1460 * adjust and return. Otherwise, rescan and truncate
1461 * after the remaining size.
1462 */
1463 len = -len;
1464 count = 0;
1465 for (;;) {
1466 count += m->m_len;
1467 if (m->m_next == NULL)
1468 break;
1469 m = m->m_next;
1470 }
1471 if (m->m_len >= len) {
1472 m->m_len -= len;
1473 if (mp->m_flags & M_PKTHDR)
1474 mp->m_pkthdr.len -= len;
1475 return;
1476 }
1477 count -= len;
1478 if (count < 0)
1479 count = 0;
1480 /*
1481 * Correct length for chain is "count".
1482 * Find the mbuf with last data, adjust its length,
1483 * and toss data from remaining mbufs on chain.
1484 */
1485 m = mp;
1486 if (m->m_flags & M_PKTHDR)
1487 m->m_pkthdr.len = count;
1488 for (; m; m = m->m_next) {
1489 if (m->m_len >= count) {
1490 m->m_len = count;
1491 break;
1492 }
1493 count -= m->m_len;
1494 }
1495 while (m->m_next)
1496 (m = m->m_next) ->m_len = 0;
1497 }
1498}
1499
1500/*
1501 * Set the m_data pointer of a newly-allocated mbuf
1502 * to place an object of the specified size at the
1503 * end of the mbuf, longword aligned.
1504 */
1505void
1506m_align(struct mbuf *m, int len)
1507{
1508 int adjust;
1509
1510 if (m->m_flags & M_EXT)
1511 adjust = m->m_ext.ext_size - len;
1512 else if (m->m_flags & M_PKTHDR)
1513 adjust = MHLEN - len;
1514 else
1515 adjust = MLEN - len;
1516 m->m_data += adjust &~ (sizeof(long)-1);
1517}
1518
1519/*
1520 * Rearrange an mbuf chain so that len bytes are contiguous
1521 * and in the data area of an mbuf (so that mtod will work for a structure
1522 * of size len). Returns the resulting mbuf chain on success, frees it and
1523 * returns null on failure. If there is room, it will add up to
1524 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1525 * avoid being called next time.
1526 */
1527struct mbuf *
1528m_pullup(struct mbuf *n, int len)
1529{
1530 struct mbuf *m;
1531 int count;
1532 int space;
1533
1534 /*
1535 * If first mbuf has no cluster, and has room for len bytes
1536 * without shifting current data, pullup into it,
1537 * otherwise allocate a new mbuf to prepend to the chain.
1538 */
1539 if (!(n->m_flags & M_EXT) &&
1540 n->m_data + len < &n->m_dat[MLEN] &&
1541 n->m_next) {
1542 if (n->m_len >= len)
1543 return (n);
1544 m = n;
1545 n = n->m_next;
1546 len -= m->m_len;
1547 } else {
1548 if (len > MHLEN)
1549 goto bad;
1550 if (n->m_flags & M_PKTHDR)
1551 m = m_gethdr(MB_DONTWAIT, n->m_type);
1552 else
1553 m = m_get(MB_DONTWAIT, n->m_type);
1554 if (m == NULL)
1555 goto bad;
1556 m->m_len = 0;
1557 if (n->m_flags & M_PKTHDR)
1558 M_MOVE_PKTHDR(m, n);
1559 }
1560 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1561 do {
1562 count = min(min(max(len, max_protohdr), space), n->m_len);
1563 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1564 (unsigned)count);
1565 len -= count;
1566 m->m_len += count;
1567 n->m_len -= count;
1568 space -= count;
1569 if (n->m_len)
1570 n->m_data += count;
1571 else
1572 n = m_free(n);
1573 } while (len > 0 && n);
1574 if (len > 0) {
1575 m_free(m);
1576 goto bad;
1577 }
1578 m->m_next = n;
1579 return (m);
1580bad:
1581 m_freem(n);
1582 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1583 return (NULL);
1584}
1585
1586/*
1587 * Partition an mbuf chain in two pieces, returning the tail --
1588 * all but the first len0 bytes. In case of failure, it returns NULL and
1589 * attempts to restore the chain to its original state.
1590 *
1591 * Note that the resulting mbufs might be read-only, because the new
1592 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1593 * the "breaking point" happens to lie within a cluster mbuf. Use the
1594 * M_WRITABLE() macro to check for this case.
1595 */
1596struct mbuf *
1597m_split(struct mbuf *m0, int len0, int wait)
1598{
1599 struct mbuf *m, *n;
1600 unsigned len = len0, remain;
1601
1602 for (m = m0; m && len > m->m_len; m = m->m_next)
1603 len -= m->m_len;
1604 if (m == NULL)
1605 return (NULL);
1606 remain = m->m_len - len;
1607 if (m0->m_flags & M_PKTHDR) {
1608 n = m_gethdr(wait, m0->m_type);
1609 if (n == NULL)
1610 return (NULL);
1611 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1612 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1613 m0->m_pkthdr.len = len0;
1614 if (m->m_flags & M_EXT)
1615 goto extpacket;
1616 if (remain > MHLEN) {
1617 /* m can't be the lead packet */
1618 MH_ALIGN(n, 0);
1619 n->m_next = m_split(m, len, wait);
1620 if (n->m_next == NULL) {
1621 m_free(n);
1622 return (NULL);
1623 } else {
1624 n->m_len = 0;
1625 return (n);
1626 }
1627 } else
1628 MH_ALIGN(n, remain);
1629 } else if (remain == 0) {
1630 n = m->m_next;
1631 m->m_next = 0;
1632 return (n);
1633 } else {
1634 n = m_get(wait, m->m_type);
1635 if (n == NULL)
1636 return (NULL);
1637 M_ALIGN(n, remain);
1638 }
1639extpacket:
1640 if (m->m_flags & M_EXT) {
1641 KKASSERT((n->m_flags & M_EXT) == 0);
1642 n->m_data = m->m_data + len;
1643 m->m_ext.ext_ref(m->m_ext.ext_arg);
1644 n->m_ext = m->m_ext;
1645 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
1646 } else {
1647 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1648 }
1649 n->m_len = remain;
1650 m->m_len = len;
1651 n->m_next = m->m_next;
1652 m->m_next = 0;
1653 return (n);
1654}
1655
1656/*
1657 * Routine to copy from device local memory into mbufs.
1658 * Note: "offset" is ill-defined and always called as 0, so ignore it.
1659 */
1660struct mbuf *
1661m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1662 void (*copy)(volatile const void *from, volatile void *to, size_t length))
1663{
1664 struct mbuf *m, *mfirst = NULL, **mtail;
1665 int nsize, flags;
1666
1667 if (copy == NULL)
1668 copy = bcopy;
1669 mtail = &mfirst;
1670 flags = M_PKTHDR;
1671
1672 while (len > 0) {
1673 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1674 if (m == NULL) {
1675 m_freem(mfirst);
1676 return (NULL);
1677 }
1678 m->m_len = min(len, nsize);
1679
1680 if (flags & M_PKTHDR) {
1681 if (len + max_linkhdr <= nsize)
1682 m->m_data += max_linkhdr;
1683 m->m_pkthdr.rcvif = ifp;
1684 m->m_pkthdr.len = len;
1685 flags = 0;
1686 }
1687
1688 copy(buf, m->m_data, (unsigned)m->m_len);
1689 buf += m->m_len;
1690 len -= m->m_len;
1691 *mtail = m;
1692 mtail = &m->m_next;
1693 }
1694
1695 return (mfirst);
1696}
1697
1698/*
1699 * Routine to pad mbuf to the specified length 'padto'.
1700 */
1701int
1702m_devpad(struct mbuf *m, int padto)
1703{
1704 struct mbuf *last = NULL;
1705 int padlen;
1706
1707 if (padto <= m->m_pkthdr.len)
1708 return 0;
1709
1710 padlen = padto - m->m_pkthdr.len;
1711
1712 /* if there's only the packet-header and we can pad there, use it. */
1713 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1714 last = m;
1715 } else {
1716 /*
1717 * Walk packet chain to find last mbuf. We will either
1718 * pad there, or append a new mbuf and pad it
1719 */
1720 for (last = m; last->m_next != NULL; last = last->m_next)
1721 ; /* EMPTY */
1722
1723 /* `last' now points to last in chain. */
1724 if (M_TRAILINGSPACE(last) < padlen) {
1725 struct mbuf *n;
1726
1727 /* Allocate new empty mbuf, pad it. Compact later. */
1728 MGET(n, MB_DONTWAIT, MT_DATA);
1729 if (n == NULL)
1730 return ENOBUFS;
1731 n->m_len = 0;
1732 last->m_next = n;
1733 last = n;
1734 }
1735 }
1736 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1737 KKASSERT(M_WRITABLE(last));
1738
1739 /* Now zero the pad area */
1740 bzero(mtod(last, char *) + last->m_len, padlen);
1741 last->m_len += padlen;
1742 m->m_pkthdr.len += padlen;
1743 return 0;
1744}
1745
1746/*
1747 * Copy data from a buffer back into the indicated mbuf chain,
1748 * starting "off" bytes from the beginning, extending the mbuf
1749 * chain if necessary.
1750 */
1751void
1752m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
1753{
1754 int mlen;
1755 struct mbuf *m = m0, *n;
1756 int totlen = 0;
1757
1758 if (m0 == NULL)
1759 return;
1760 while (off > (mlen = m->m_len)) {
1761 off -= mlen;
1762 totlen += mlen;
1763 if (m->m_next == NULL) {
1764 n = m_getclr(MB_DONTWAIT, m->m_type);
1765 if (n == NULL)
1766 goto out;
1767 n->m_len = min(MLEN, len + off);
1768 m->m_next = n;
1769 }
1770 m = m->m_next;
1771 }
1772 while (len > 0) {
1773 mlen = min (m->m_len - off, len);
1774 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1775 cp += mlen;
1776 len -= mlen;
1777 mlen += off;
1778 off = 0;
1779 totlen += mlen;
1780 if (len == 0)
1781 break;
1782 if (m->m_next == NULL) {
1783 n = m_get(MB_DONTWAIT, m->m_type);
1784 if (n == NULL)
1785 break;
1786 n->m_len = min(MLEN, len);
1787 m->m_next = n;
1788 }
1789 m = m->m_next;
1790 }
1791out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1792 m->m_pkthdr.len = totlen;
1793}
1794
1795/*
1796 * Append the specified data to the indicated mbuf chain,
1797 * Extend the mbuf chain if the new data does not fit in
1798 * existing space.
1799 *
1800 * Return 1 if able to complete the job; otherwise 0.
1801 */
1802int
1803m_append(struct mbuf *m0, int len, c_caddr_t cp)
1804{
1805 struct mbuf *m, *n;
1806 int remainder, space;
1807
1808 for (m = m0; m->m_next != NULL; m = m->m_next)
1809 ;
1810 remainder = len;
1811 space = M_TRAILINGSPACE(m);
1812 if (space > 0) {
1813 /*
1814 * Copy into available space.
1815 */
1816 if (space > remainder)
1817 space = remainder;
1818 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1819 m->m_len += space;
1820 cp += space, remainder -= space;
1821 }
1822 while (remainder > 0) {
1823 /*
1824 * Allocate a new mbuf; could check space
1825 * and allocate a cluster instead.
1826 */
1827 n = m_get(MB_DONTWAIT, m->m_type);
1828 if (n == NULL)
1829 break;
1830 n->m_len = min(MLEN, remainder);
1831 bcopy(cp, mtod(n, caddr_t), n->m_len);
1832 cp += n->m_len, remainder -= n->m_len;
1833 m->m_next = n;
1834 m = n;
1835 }
1836 if (m0->m_flags & M_PKTHDR)
1837 m0->m_pkthdr.len += len - remainder;
1838 return (remainder == 0);
1839}
1840
1841/*
1842 * Apply function f to the data in an mbuf chain starting "off" bytes from
1843 * the beginning, continuing for "len" bytes.
1844 */
1845int
1846m_apply(struct mbuf *m, int off, int len,
1847 int (*f)(void *, void *, u_int), void *arg)
1848{
1849 u_int count;
1850 int rval;
1851
1852 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1853 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1854 while (off > 0) {
1855 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1856 if (off < m->m_len)
1857 break;
1858 off -= m->m_len;
1859 m = m->m_next;
1860 }
1861 while (len > 0) {
1862 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1863 count = min(m->m_len - off, len);
1864 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1865 if (rval)
1866 return (rval);
1867 len -= count;
1868 off = 0;
1869 m = m->m_next;
1870 }
1871 return (0);
1872}
1873
1874/*
1875 * Return a pointer to mbuf/offset of location in mbuf chain.
1876 */
1877struct mbuf *
1878m_getptr(struct mbuf *m, int loc, int *off)
1879{
1880
1881 while (loc >= 0) {
1882 /* Normal end of search. */
1883 if (m->m_len > loc) {
1884 *off = loc;
1885 return (m);
1886 } else {
1887 loc -= m->m_len;
1888 if (m->m_next == NULL) {
1889 if (loc == 0) {
1890 /* Point at the end of valid data. */
1891 *off = m->m_len;
1892 return (m);
1893 }
1894 return (NULL);
1895 }
1896 m = m->m_next;
1897 }
1898 }
1899 return (NULL);
1900}
1901
1902void
1903m_print(const struct mbuf *m)
1904{
1905 int len;
1906 const struct mbuf *m2;
1907
1908 len = m->m_pkthdr.len;
1909 m2 = m;
1910 while (len) {
1911 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1912 len -= m2->m_len;
1913 m2 = m2->m_next;
1914 }
1915 return;
1916}
1917
1918/*
1919 * "Move" mbuf pkthdr from "from" to "to".
1920 * "from" must have M_PKTHDR set, and "to" must be empty.
1921 */
1922void
1923m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1924{
1925 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
1926
1927 to->m_flags |= from->m_flags & M_COPYFLAGS;
1928 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1929 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1930}
1931
1932/*
1933 * Duplicate "from"'s mbuf pkthdr in "to".
1934 * "from" must have M_PKTHDR set, and "to" must be empty.
1935 * In particular, this does a deep copy of the packet tags.
1936 */
1937int
1938m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
1939{
1940 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1941
1942 to->m_flags = (from->m_flags & M_COPYFLAGS) |
1943 (to->m_flags & ~M_COPYFLAGS);
1944 to->m_pkthdr = from->m_pkthdr;
1945 SLIST_INIT(&to->m_pkthdr.tags);
1946 return (m_tag_copy_chain(to, from, how));
1947}
1948
1949/*
1950 * Defragment a mbuf chain, returning the shortest possible
1951 * chain of mbufs and clusters. If allocation fails and
1952 * this cannot be completed, NULL will be returned, but
1953 * the passed in chain will be unchanged. Upon success,
1954 * the original chain will be freed, and the new chain
1955 * will be returned.
1956 *
1957 * If a non-packet header is passed in, the original
1958 * mbuf (chain?) will be returned unharmed.
1959 *
1960 * m_defrag_nofree doesn't free the passed in mbuf.
1961 */
1962struct mbuf *
1963m_defrag(struct mbuf *m0, int how)
1964{
1965 struct mbuf *m_new;
1966
1967 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
1968 return (NULL);
1969 if (m_new != m0)
1970 m_freem(m0);
1971 return (m_new);
1972}
1973
1974struct mbuf *
1975m_defrag_nofree(struct mbuf *m0, int how)
1976{
1977 struct mbuf *m_new = NULL, *m_final = NULL;
1978 int progress = 0, length, nsize;
1979
1980 if (!(m0->m_flags & M_PKTHDR))
1981 return (m0);
1982
1983#ifdef MBUF_STRESS_TEST
1984 if (m_defragrandomfailures) {
1985 int temp = karc4random() & 0xff;
1986 if (temp == 0xba)
1987 goto nospace;
1988 }
1989#endif
1990
1991 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
1992 if (m_final == NULL)
1993 goto nospace;
1994 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
1995
1996 if (m_dup_pkthdr(m_final, m0, how) == 0)
1997 goto nospace;
1998
1999 m_new = m_final;
2000
2001 while (progress < m0->m_pkthdr.len) {
2002 length = m0->m_pkthdr.len - progress;
2003 if (length > MCLBYTES)
2004 length = MCLBYTES;
2005
2006 if (m_new == NULL) {
2007 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
2008 if (m_new == NULL)
2009 goto nospace;
2010 }
2011
2012 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2013 progress += length;
2014 m_new->m_len = length;
2015 if (m_new != m_final)
2016 m_cat(m_final, m_new);
2017 m_new = NULL;
2018 }
2019 if (m0->m_next == NULL)
2020 m_defraguseless++;
2021 m_defragpackets++;
2022 m_defragbytes += m_final->m_pkthdr.len;
2023 return (m_final);
2024nospace:
2025 m_defragfailure++;
2026 if (m_new)
2027 m_free(m_new);
2028 m_freem(m_final);
2029 return (NULL);
2030}
2031
2032/*
2033 * Move data from uio into mbufs.
2034 */
2035struct mbuf *
2036m_uiomove(struct uio *uio)
2037{
2038 struct mbuf *m; /* current working mbuf */
2039 struct mbuf *head = NULL; /* result mbuf chain */
2040 struct mbuf **mp = &head;
2041 int flags = M_PKTHDR;
2042 int nsize;
2043 int error;
2044 int resid;
2045
2046 do {
2047 if (uio->uio_resid > INT_MAX)
2048 resid = INT_MAX;
2049 else
2050 resid = (int)uio->uio_resid;
2051 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
2052 if (flags) {
2053 m->m_pkthdr.len = 0;
2054 /* Leave room for protocol headers. */
2055 if (resid < MHLEN)
2056 MH_ALIGN(m, resid);
2057 flags = 0;
2058 }
2059 m->m_len = imin(nsize, resid);
2060 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
2061 if (error) {
2062 m_free(m);
2063 goto failed;
2064 }
2065 *mp = m;
2066 mp = &m->m_next;
2067 head->m_pkthdr.len += m->m_len;
2068 } while (uio->uio_resid > 0);
2069
2070 return (head);
2071
2072failed:
2073 m_freem(head);
2074 return (NULL);
2075}
2076
2077struct mbuf *
2078m_last(struct mbuf *m)
2079{
2080 while (m->m_next)
2081 m = m->m_next;
2082 return (m);
2083}
2084
2085/*
2086 * Return the number of bytes in an mbuf chain.
2087 * If lastm is not NULL, also return the last mbuf.
2088 */
2089u_int
2090m_lengthm(struct mbuf *m, struct mbuf **lastm)
2091{
2092 u_int len = 0;
2093 struct mbuf *prev = m;
2094
2095 while (m) {
2096 len += m->m_len;
2097 prev = m;
2098 m = m->m_next;
2099 }
2100 if (lastm != NULL)
2101 *lastm = prev;
2102 return (len);
2103}
2104
2105/*
2106 * Like m_lengthm(), except also keep track of mbuf usage.
2107 */
2108u_int
2109m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2110{
2111 u_int len = 0, mbcnt = 0;
2112 struct mbuf *prev = m;
2113
2114 while (m) {
2115 len += m->m_len;
2116 mbcnt += MSIZE;
2117 if (m->m_flags & M_EXT)
2118 mbcnt += m->m_ext.ext_size;
2119 prev = m;
2120 m = m->m_next;
2121 }
2122 if (lastm != NULL)
2123 *lastm = prev;
2124 *pmbcnt = mbcnt;
2125 return (len);
2126}