Import vm_page_alloc manpage from FreeBSD and add DFly-specifics.
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
5bd48c1d
MD
2 * (MPSAFE)
3 *
0c33f36d 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey M. Hsu.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
66d6c637 36/*
984263bc
MD
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
8a3125c6 68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc 69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
3f98f485 70 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
984263bc
MD
71 */
72
73#include "opt_param.h"
74#include "opt_mbuf_stress_test.h"
75#include <sys/param.h>
76#include <sys/systm.h>
4e23f366 77#include <sys/file.h>
984263bc
MD
78#include <sys/malloc.h>
79#include <sys/mbuf.h>
80#include <sys/kernel.h>
81#include <sys/sysctl.h>
82#include <sys/domain.h>
7b6f875f 83#include <sys/objcache.h>
e9fa4b60 84#include <sys/tree.h>
984263bc 85#include <sys/protosw.h>
0c33f36d 86#include <sys/uio.h>
ef0fdad1 87#include <sys/thread.h>
a2a5ad0d 88#include <sys/globaldata.h>
5bd48c1d 89
90775e29 90#include <sys/thread2.h>
5bd48c1d 91#include <sys/spinlock2.h>
984263bc 92
1d16b2b5 93#include <machine/atomic.h>
e54488bb 94#include <machine/limits.h>
1d16b2b5 95
984263bc
MD
96#include <vm/vm.h>
97#include <vm/vm_kern.h>
98#include <vm/vm_extern.h>
99
100#ifdef INVARIANTS
101#include <machine/cpu.h>
102#endif
103
90775e29
MD
104/*
105 * mbuf cluster meta-data
106 */
7b6f875f 107struct mbcluster {
90775e29
MD
108 int32_t mcl_refs;
109 void *mcl_data;
7b6f875f 110};
90775e29 111
e9fa4b60
MD
112/*
113 * mbuf tracking for debugging purposes
114 */
115#ifdef MBUF_DEBUG
116
117static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
118
119struct mbctrack;
120RB_HEAD(mbuf_rb_tree, mbtrack);
121RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
122
123struct mbtrack {
124 RB_ENTRY(mbtrack) rb_node;
125 int trackid;
126 struct mbuf *m;
127};
128
129static int
130mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
131{
132 if (mb1->m < mb2->m)
133 return(-1);
134 if (mb1->m > mb2->m)
135 return(1);
136 return(0);
137}
138
139RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
140
141struct mbuf_rb_tree mbuf_track_root;
5bd48c1d 142static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
e9fa4b60
MD
143
144static void
145mbuftrack(struct mbuf *m)
146{
147 struct mbtrack *mbt;
148
e9fa4b60 149 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
5bd48c1d 150 spin_lock(&mbuf_track_spin);
e9fa4b60 151 mbt->m = m;
5bd48c1d
MD
152 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
153 spin_unlock(&mbuf_track_spin);
e9fa4b60 154 panic("mbuftrack: mbuf %p already being tracked\n", m);
5bd48c1d
MD
155 }
156 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
157}
158
159static void
160mbufuntrack(struct mbuf *m)
161{
162 struct mbtrack *mbt;
163
5bd48c1d 164 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
165 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
166 if (mbt == NULL) {
5bd48c1d
MD
167 spin_unlock(&mbuf_track_spin);
168 panic("mbufuntrack: mbuf %p was not tracked\n", m);
e9fa4b60
MD
169 } else {
170 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
6cef7136 171 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
172 kfree(mbt, M_MTRACK);
173 }
e9fa4b60
MD
174}
175
176void
177mbuftrackid(struct mbuf *m, int trackid)
178{
179 struct mbtrack *mbt;
180 struct mbuf *n;
181
5bd48c1d 182 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
183 while (m) {
184 n = m->m_nextpkt;
185 while (m) {
186 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
5bd48c1d
MD
187 if (mbt == NULL) {
188 spin_unlock(&mbuf_track_spin);
189 panic("mbuftrackid: mbuf %p not tracked", m);
190 }
191 mbt->trackid = trackid;
e9fa4b60
MD
192 m = m->m_next;
193 }
194 m = n;
195 }
5bd48c1d 196 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
197}
198
199static int
200mbuftrack_callback(struct mbtrack *mbt, void *arg)
201{
202 struct sysctl_req *req = arg;
203 char buf[64];
204 int error;
205
206 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
207
5bd48c1d 208 spin_unlock(&mbuf_track_spin);
e9fa4b60 209 error = SYSCTL_OUT(req, buf, strlen(buf));
5bd48c1d 210 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
211 if (error)
212 return(-error);
213 return(0);
214}
215
216static int
217mbuftrack_show(SYSCTL_HANDLER_ARGS)
218{
219 int error;
220
5bd48c1d 221 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
222 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
223 mbuftrack_callback, req);
5bd48c1d 224 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
225 return (-error);
226}
227SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
228 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
229
230#else
231
232#define mbuftrack(m)
233#define mbufuntrack(m)
234
235#endif
236
7b6f875f 237static void mbinit(void *);
ba39e2e0 238SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
984263bc 239
4c1e2509 240static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
90775e29 241
4c1e2509 242static struct mbstat mbstat[SMP_MAXCPU];
984263bc
MD
243int max_linkhdr;
244int max_protohdr;
245int max_hdr;
246int max_datalen;
247int m_defragpackets;
248int m_defragbytes;
249int m_defraguseless;
250int m_defragfailure;
251#ifdef MBUF_STRESS_TEST
252int m_defragrandomfailures;
253#endif
254
7b6f875f
JH
255struct objcache *mbuf_cache, *mbufphdr_cache;
256struct objcache *mclmeta_cache;
257struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
258
984263bc
MD
259int nmbclusters;
260int nmbufs;
984263bc 261
984263bc
MD
262SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
263 &max_linkhdr, 0, "");
264SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
265 &max_protohdr, 0, "");
266SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
267SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
268 &max_datalen, 0, "");
269SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
270 &mbuf_wait, 0, "");
4c1e2509
JT
271static int do_mbstat(SYSCTL_HANDLER_ARGS);
272
273SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
274 0, 0, do_mbstat, "S,mbstat", "");
275
276static int do_mbtypes(SYSCTL_HANDLER_ARGS);
277
278SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
279 0, 0, do_mbtypes, "LU", "");
280
281static int
282do_mbstat(SYSCTL_HANDLER_ARGS)
283{
284 struct mbstat mbstat_total;
285 struct mbstat *mbstat_totalp;
286 int i;
287
288 bzero(&mbstat_total, sizeof(mbstat_total));
289 mbstat_totalp = &mbstat_total;
290
291 for (i = 0; i < ncpus; i++)
292 {
293 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
294 mbstat_total.m_clusters += mbstat[i].m_clusters;
295 mbstat_total.m_spare += mbstat[i].m_spare;
296 mbstat_total.m_clfree += mbstat[i].m_clfree;
297 mbstat_total.m_drops += mbstat[i].m_drops;
298 mbstat_total.m_wait += mbstat[i].m_wait;
299 mbstat_total.m_drain += mbstat[i].m_drain;
300 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
301 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
302
303 }
304 /*
305 * The following fields are not cumulative fields so just
306 * get their values once.
307 */
308 mbstat_total.m_msize = mbstat[0].m_msize;
309 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
310 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
311 mbstat_total.m_mlen = mbstat[0].m_mlen;
312 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
313
314 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
315}
316
317static int
318do_mbtypes(SYSCTL_HANDLER_ARGS)
319{
320 u_long totals[MT_NTYPES];
321 int i, j;
322
323 for (i = 0; i < MT_NTYPES; i++)
324 totals[i] = 0;
325
326 for (i = 0; i < ncpus; i++)
327 {
328 for (j = 0; j < MT_NTYPES; j++)
329 totals[j] += mbtypes[i][j];
330 }
331
332 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
333}
18c48b9c
MD
334
335/*
336 * These are read-only because we do not currently have any code
337 * to adjust the objcache limits after the fact. The variables
338 * may only be set as boot-time tunables.
339 */
340SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
984263bc 341 &nmbclusters, 0, "Maximum number of mbuf clusters available");
18c48b9c 342SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
984263bc 343 "Maximum number of mbufs available");
7b6f875f 344
984263bc
MD
345SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
346 &m_defragpackets, 0, "");
347SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
348 &m_defragbytes, 0, "");
349SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
350 &m_defraguseless, 0, "");
351SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
352 &m_defragfailure, 0, "");
353#ifdef MBUF_STRESS_TEST
354SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
355 &m_defragrandomfailures, 0, "");
356#endif
357
90775e29
MD
358static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
359static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
7b6f875f 360static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
90775e29
MD
361
362static void m_reclaim (void);
90775e29
MD
363static void m_mclref(void *arg);
364static void m_mclfree(void *arg);
984263bc 365
4e23f366
MD
366/*
367 * NOTE: Default NMBUFS must take into account a possible DOS attack
368 * using fd passing on unix domain sockets.
369 */
984263bc
MD
370#ifndef NMBCLUSTERS
371#define NMBCLUSTERS (512 + maxusers * 16)
372#endif
373#ifndef NMBUFS
4e23f366 374#define NMBUFS (nmbclusters * 2 + maxfiles)
984263bc
MD
375#endif
376
377/*
378 * Perform sanity checks of tunables declared above.
379 */
380static void
381tunable_mbinit(void *dummy)
382{
984263bc
MD
383 /*
384 * This has to be done before VM init.
385 */
386 nmbclusters = NMBCLUSTERS;
387 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
388 nmbufs = NMBUFS;
389 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
390 /* Sanity checks */
391 if (nmbufs < nmbclusters * 2)
392 nmbufs = nmbclusters * 2;
984263bc 393}
ba39e2e0
MD
394SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
395 tunable_mbinit, NULL);
984263bc
MD
396
397/* "number of clusters of pages" */
398#define NCL_INIT 1
399
400#define NMB_INIT 16
401
7b6f875f
JH
402/*
403 * The mbuf object cache only guarantees that m_next and m_nextpkt are
404 * NULL and that m_data points to the beginning of the data area. In
405 * particular, m_len and m_pkthdr.len are uninitialized. It is the
406 * responsibility of the caller to initialize those fields before use.
407 */
408
409static boolean_t __inline
410mbuf_ctor(void *obj, void *private, int ocflags)
984263bc 411{
7b6f875f 412 struct mbuf *m = obj;
984263bc 413
7b6f875f
JH
414 m->m_next = NULL;
415 m->m_nextpkt = NULL;
416 m->m_data = m->m_dat;
417 m->m_flags = 0;
418
419 return (TRUE);
984263bc
MD
420}
421
422/*
7b6f875f 423 * Initialize the mbuf and the packet header fields.
984263bc 424 */
7b6f875f
JH
425static boolean_t
426mbufphdr_ctor(void *obj, void *private, int ocflags)
984263bc 427{
7b6f875f 428 struct mbuf *m = obj;
984263bc 429
7b6f875f
JH
430 m->m_next = NULL;
431 m->m_nextpkt = NULL;
432 m->m_data = m->m_pktdat;
77e294a1 433 m->m_flags = M_PKTHDR | M_PHCACHE;
984263bc 434
7b6f875f
JH
435 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
436 SLIST_INIT(&m->m_pkthdr.tags);
437 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
438 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
439
440 return (TRUE);
984263bc
MD
441}
442
443/*
7b6f875f 444 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
984263bc 445 */
7b6f875f
JH
446static boolean_t
447mclmeta_ctor(void *obj, void *private, int ocflags)
984263bc 448{
7b6f875f
JH
449 struct mbcluster *cl = obj;
450 void *buf;
451
452 if (ocflags & M_NOWAIT)
efda3bd0 453 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
7b6f875f 454 else
efda3bd0 455 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
7b6f875f
JH
456 if (buf == NULL)
457 return (FALSE);
77e294a1 458 cl->mcl_refs = 0;
7b6f875f
JH
459 cl->mcl_data = buf;
460 return (TRUE);
461}
984263bc 462
c3ef87ca
MD
463static void
464mclmeta_dtor(void *obj, void *private)
465{
466 struct mbcluster *mcl = obj;
467
468 KKASSERT(mcl->mcl_refs == 0);
efda3bd0 469 kfree(mcl->mcl_data, M_MBUFCL);
c3ef87ca
MD
470}
471
7b6f875f
JH
472static void
473linkcluster(struct mbuf *m, struct mbcluster *cl)
474{
984263bc 475 /*
7b6f875f
JH
476 * Add the cluster to the mbuf. The caller will detect that the
477 * mbuf now has an attached cluster.
984263bc 478 */
7b6f875f
JH
479 m->m_ext.ext_arg = cl;
480 m->m_ext.ext_buf = cl->mcl_data;
481 m->m_ext.ext_ref = m_mclref;
482 m->m_ext.ext_free = m_mclfree;
483 m->m_ext.ext_size = MCLBYTES;
df8d1020 484 atomic_add_int(&cl->mcl_refs, 1);
984263bc 485
7b6f875f
JH
486 m->m_data = m->m_ext.ext_buf;
487 m->m_flags |= M_EXT | M_EXT_CLUSTER;
984263bc
MD
488}
489
7b6f875f
JH
490static boolean_t
491mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
492{
493 struct mbuf *m = obj;
494 struct mbcluster *cl;
495
496 mbufphdr_ctor(obj, private, ocflags);
497 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
498 if (cl == NULL) {
499 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 500 return (FALSE);
a5955b15 501 }
77e294a1 502 m->m_flags |= M_CLCACHE;
7b6f875f
JH
503 linkcluster(m, cl);
504 return (TRUE);
505}
984263bc 506
7b6f875f
JH
507static boolean_t
508mbufcluster_ctor(void *obj, void *private, int ocflags)
984263bc 509{
7b6f875f
JH
510 struct mbuf *m = obj;
511 struct mbcluster *cl;
512
513 mbuf_ctor(obj, private, ocflags);
514 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
515 if (cl == NULL) {
516 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 517 return (FALSE);
a5955b15 518 }
77e294a1 519 m->m_flags |= M_CLCACHE;
7b6f875f
JH
520 linkcluster(m, cl);
521 return (TRUE);
522}
984263bc 523
77e294a1
MD
524/*
525 * Used for both the cluster and cluster PHDR caches.
526 *
527 * The mbuf may have lost its cluster due to sharing, deal
528 * with the situation by checking M_EXT.
529 */
7b6f875f
JH
530static void
531mbufcluster_dtor(void *obj, void *private)
984263bc 532{
7b6f875f 533 struct mbuf *m = obj;
77e294a1 534 struct mbcluster *mcl;
984263bc 535
77e294a1
MD
536 if (m->m_flags & M_EXT) {
537 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
538 mcl = m->m_ext.ext_arg;
539 KKASSERT(mcl->mcl_refs == 1);
540 mcl->mcl_refs = 0;
541 objcache_put(mclmeta_cache, mcl);
542 }
984263bc
MD
543}
544
7b6f875f
JH
545struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
546struct objcache_malloc_args mclmeta_malloc_args =
547 { sizeof(struct mbcluster), M_MCLMETA };
548
549/* ARGSUSED*/
90775e29 550static void
7b6f875f 551mbinit(void *dummy)
984263bc 552{
6f21e2f4 553 int mb_limit, cl_limit;
0aa16b5d 554 int limit;
4c1e2509
JT
555 int i;
556
0aa16b5d
SZ
557 /*
558 * Initialize statistics
559 */
560 for (i = 0; i < ncpus; i++) {
4c1e2509
JT
561 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
562 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
563 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
564 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
565 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
566 }
984263bc 567
0aa16b5d
SZ
568 /*
569 * Create objtect caches and save cluster limits, which will
570 * be used to adjust backing kmalloc pools' limit later.
571 */
572
6f21e2f4 573 mb_limit = cl_limit = 0;
0aa16b5d
SZ
574
575 limit = nmbufs;
576 mbuf_cache = objcache_create("mbuf", &limit, 0,
5b7da64a 577 mbuf_ctor, NULL, NULL,
7b6f875f 578 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 579 mb_limit += limit;
0aa16b5d
SZ
580
581 limit = nmbufs;
582 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
5b7da64a 583 mbufphdr_ctor, NULL, NULL,
7b6f875f 584 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 585 mb_limit += limit;
0aa16b5d
SZ
586
587 cl_limit = nmbclusters;
588 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
7b6f875f
JH
589 mclmeta_ctor, mclmeta_dtor, NULL,
590 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
0aa16b5d
SZ
591
592 limit = nmbclusters;
593 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
7b6f875f
JH
594 mbufcluster_ctor, mbufcluster_dtor, NULL,
595 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 596 mb_limit += limit;
0aa16b5d
SZ
597
598 limit = nmbclusters;
7b6f875f 599 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
0aa16b5d 600 &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
7b6f875f 601 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 602 mb_limit += limit;
0aa16b5d
SZ
603
604 /*
605 * Adjust backing kmalloc pools' limit
3f98f485
SZ
606 *
607 * NOTE: We raise the limit by another 1/8 to take the effect
608 * of loosememuse into account.
0aa16b5d 609 */
3f98f485 610 cl_limit += cl_limit / 8;
0aa16b5d
SZ
611 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
612 mclmeta_malloc_args.objsize * cl_limit);
613 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
614
3f98f485 615 mb_limit += mb_limit / 8;
0aa16b5d
SZ
616 kmalloc_raise_limit(mbuf_malloc_args.mtype,
617 mbuf_malloc_args.objsize * mb_limit);
90775e29 618}
984263bc 619
90775e29
MD
620/*
621 * Return the number of references to this mbuf's data. 0 is returned
622 * if the mbuf is not M_EXT, a reference count is returned if it is
7b6f875f 623 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
90775e29
MD
624 */
625int
626m_sharecount(struct mbuf *m)
627{
7b6f875f
JH
628 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
629 case 0:
630 return (0);
631 case M_EXT:
632 return (99);
633 case M_EXT | M_EXT_CLUSTER:
634 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
635 }
636 /* NOTREACHED */
637 return (0); /* to shut up compiler */
90775e29
MD
638}
639
640/*
641 * change mbuf to new type
642 */
643void
644m_chtype(struct mbuf *m, int type)
645{
4c1e2509
JT
646 struct globaldata *gd = mycpu;
647
648 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
649 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
650 atomic_set_short_nonlocked(&m->m_type, type);
984263bc
MD
651}
652
984263bc 653static void
8a3125c6 654m_reclaim(void)
984263bc 655{
1fd87d54
RG
656 struct domain *dp;
657 struct protosw *pr;
984263bc 658
5bd48c1d
MD
659 kprintf("Debug: m_reclaim() called\n");
660
9c70fe43 661 SLIST_FOREACH(dp, &domains, dom_next) {
8a3125c6 662 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
663 if (pr->pr_drain)
664 (*pr->pr_drain)();
8a3125c6
MD
665 }
666 }
4c1e2509 667 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
984263bc
MD
668}
669
7b6f875f
JH
670static void __inline
671updatestats(struct mbuf *m, int type)
672{
4c1e2509 673 struct globaldata *gd = mycpu;
7b6f875f 674
fcd1202a 675 m->m_type = type;
e9fa4b60 676 mbuftrack(m);
982f999d
MD
677#ifdef MBUF_DEBUG
678 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
679 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
680#endif
4c1e2509
JT
681
682 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
683 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
684
7b6f875f
JH
685}
686
984263bc 687/*
7b6f875f 688 * Allocate an mbuf.
984263bc
MD
689 */
690struct mbuf *
8a3125c6 691m_get(int how, int type)
984263bc 692{
12496bdf 693 struct mbuf *m;
7b6f875f
JH
694 int ntries = 0;
695 int ocf = MBTOM(how);
12496bdf 696
7b6f875f
JH
697retryonce:
698
699 m = objcache_get(mbuf_cache, ocf);
700
701 if (m == NULL) {
702 if ((how & MB_TRYWAIT) && ntries++ == 0) {
703 struct objcache *reclaimlist[] = {
704 mbufphdr_cache,
5bd48c1d
MD
705 mbufcluster_cache,
706 mbufphdrcluster_cache
7b6f875f 707 };
a3034532 708 const int nreclaims = NELEM(reclaimlist);
7b6f875f
JH
709
710 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
711 m_reclaim();
712 goto retryonce;
c6339e39 713 }
a5955b15 714 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 715 return (NULL);
12496bdf 716 }
982f999d
MD
717#ifdef MBUF_DEBUG
718 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
719#endif
5bd08532 720 m->m_len = 0;
c6339e39 721
7b6f875f 722 updatestats(m, type);
984263bc
MD
723 return (m);
724}
725
726struct mbuf *
8a3125c6 727m_gethdr(int how, int type)
984263bc 728{
12496bdf 729 struct mbuf *m;
7b6f875f
JH
730 int ocf = MBTOM(how);
731 int ntries = 0;
12496bdf 732
7b6f875f
JH
733retryonce:
734
735 m = objcache_get(mbufphdr_cache, ocf);
736
737 if (m == NULL) {
738 if ((how & MB_TRYWAIT) && ntries++ == 0) {
739 struct objcache *reclaimlist[] = {
740 mbuf_cache,
741 mbufcluster_cache, mbufphdrcluster_cache
742 };
a3034532 743 const int nreclaims = NELEM(reclaimlist);
7b6f875f
JH
744
745 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
746 m_reclaim();
747 goto retryonce;
c6339e39 748 }
a5955b15 749 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 750 return (NULL);
12496bdf 751 }
982f999d
MD
752#ifdef MBUF_DEBUG
753 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
754#endif
5bd08532
MD
755 m->m_len = 0;
756 m->m_pkthdr.len = 0;
c6339e39 757
7b6f875f 758 updatestats(m, type);
984263bc
MD
759 return (m);
760}
761
7b6f875f
JH
762/*
763 * Get a mbuf (not a mbuf cluster!) and zero it.
764 * Deprecated.
765 */
984263bc 766struct mbuf *
8a3125c6 767m_getclr(int how, int type)
984263bc 768{
1fd87d54 769 struct mbuf *m;
984263bc 770
7b6f875f
JH
771 m = m_get(how, type);
772 if (m != NULL)
773 bzero(m->m_data, MLEN);
984263bc
MD
774 return (m);
775}
776
777/*
7b6f875f 778 * Returns an mbuf with an attached cluster.
984263bc
MD
779 * Because many network drivers use this kind of buffers a lot, it is
780 * convenient to keep a small pool of free buffers of this kind.
781 * Even a small size such as 10 gives about 10% improvement in the
782 * forwarding rate in a bridge or router.
984263bc 783 */
984263bc
MD
784struct mbuf *
785m_getcl(int how, short type, int flags)
786{
7b6f875f
JH
787 struct mbuf *m;
788 int ocflags = MBTOM(how);
789 int ntries = 0;
984263bc 790
7b6f875f
JH
791retryonce:
792
793 if (flags & M_PKTHDR)
794 m = objcache_get(mbufphdrcluster_cache, ocflags);
795 else
796 m = objcache_get(mbufcluster_cache, ocflags);
797
798 if (m == NULL) {
799 if ((how & MB_TRYWAIT) && ntries++ == 0) {
800 struct objcache *reclaimlist[1];
801
802 if (flags & M_PKTHDR)
803 reclaimlist[0] = mbufcluster_cache;
804 else
805 reclaimlist[0] = mbufphdrcluster_cache;
806 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
807 m_reclaim();
808 goto retryonce;
984263bc 809 }
a5955b15 810 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 811 return (NULL);
984263bc 812 }
7b6f875f 813
982f999d
MD
814#ifdef MBUF_DEBUG
815 KASSERT(m->m_data == m->m_ext.ext_buf,
816 ("mbuf %p: bad m_data in get", m));
817#endif
7b6f875f 818 m->m_type = type;
5bd08532
MD
819 m->m_len = 0;
820 m->m_pkthdr.len = 0; /* just do it unconditonally */
7b6f875f 821
e9fa4b60 822 mbuftrack(m);
4c1e2509
JT
823
824 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
825 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
7b6f875f 826 return (m);
984263bc
MD
827}
828
829/*
50503f0f
JH
830 * Allocate chain of requested length.
831 */
832struct mbuf *
833m_getc(int len, int how, int type)
834{
835 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
836 int nsize;
837
838 while (len > 0) {
839 n = m_getl(len, how, type, 0, &nsize);
840 if (n == NULL)
841 goto failed;
842 n->m_len = 0;
843 *ntail = n;
844 ntail = &n->m_next;
845 len -= nsize;
846 }
847 return (nfirst);
848
849failed:
850 m_freem(nfirst);
851 return (NULL);
852}
853
854/*
855 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
856 * and return a pointer to the head of the allocated chain. If m0 is
984263bc
MD
857 * non-null, then we assume that it is a single mbuf or an mbuf chain to
858 * which we want len bytes worth of mbufs and/or clusters attached, and so
50503f0f 859 * if we succeed in allocating it, we will just return a pointer to m0.
984263bc
MD
860 *
861 * If we happen to fail at any point during the allocation, we will free
862 * up everything we have already allocated and return NULL.
863 *
50503f0f 864 * Deprecated. Use m_getc() and m_cat() instead.
984263bc
MD
865 */
866struct mbuf *
dc14b0a9 867m_getm(struct mbuf *m0, int len, int type, int how)
984263bc 868{
50503f0f 869 struct mbuf *nfirst;
984263bc 870
50503f0f 871 nfirst = m_getc(len, how, type);
984263bc 872
50503f0f
JH
873 if (m0 != NULL) {
874 m_last(m0)->m_next = nfirst;
875 return (m0);
984263bc
MD
876 }
877
50503f0f 878 return (nfirst);
984263bc
MD
879}
880
881/*
7b6f875f
JH
882 * Adds a cluster to a normal mbuf, M_EXT is set on success.
883 * Deprecated. Use m_getcl() instead.
b6650ec0 884 */
90775e29
MD
885void
886m_mclget(struct mbuf *m, int how)
b6650ec0 887{
7b6f875f 888 struct mbcluster *mcl;
b6650ec0 889
77e294a1 890 KKASSERT((m->m_flags & M_EXT) == 0);
7b6f875f 891 mcl = objcache_get(mclmeta_cache, MBTOM(how));
c3ef87ca
MD
892 if (mcl != NULL) {
893 linkcluster(m, mcl);
5bd48c1d
MD
894 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters,
895 1);
a5955b15
MD
896 } else {
897 ++mbstat[mycpu->gd_cpuid].m_drops;
c3ef87ca 898 }
b6650ec0
MD
899}
900
df8d1020
MD
901/*
902 * Updates to mbcluster must be MPSAFE. Only an entity which already has
903 * a reference to the cluster can ref it, so we are in no danger of
904 * racing an add with a subtract. But the operation must still be atomic
905 * since multiple entities may have a reference on the cluster.
906 *
907 * m_mclfree() is almost the same but it must contend with two entities
5bd48c1d 908 * freeing the cluster at the same time.
df8d1020 909 */
90775e29 910static void
7b6f875f 911m_mclref(void *arg)
b6650ec0 912{
7b6f875f 913 struct mbcluster *mcl = arg;
90775e29 914
7b6f875f 915 atomic_add_int(&mcl->mcl_refs, 1);
b6650ec0
MD
916}
917
1d16b2b5
MD
918/*
919 * When dereferencing a cluster we have to deal with a N->0 race, where
920 * N entities free their references simultaniously. To do this we use
dee87a60 921 * atomic_fetchadd_int().
1d16b2b5 922 */
90775e29 923static void
7b6f875f 924m_mclfree(void *arg)
b6650ec0 925{
7b6f875f 926 struct mbcluster *mcl = arg;
90775e29 927
dee87a60 928 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
77e294a1 929 objcache_put(mclmeta_cache, mcl);
b6650ec0
MD
930}
931
932/*
b6650ec0
MD
933 * Free a single mbuf and any associated external storage. The successor,
934 * if any, is returned.
984263bc 935 *
b6650ec0 936 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
937 * code does not call M_PREPEND properly.
938 * (example: call to bpf_mtap from drivers)
939 */
982f999d
MD
940
941#ifdef MBUF_DEBUG
942
943struct mbuf *
944_m_free(struct mbuf *m, const char *func)
945
946#else
947
984263bc 948struct mbuf *
b6650ec0 949m_free(struct mbuf *m)
982f999d
MD
950
951#endif
984263bc 952{
b6650ec0 953 struct mbuf *n;
4c1e2509 954 struct globaldata *gd = mycpu;
b6650ec0 955
361af367 956 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
f3f0fc49 957 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
4c1e2509 958 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
90775e29 959
7b6f875f 960 n = m->m_next;
90775e29
MD
961
962 /*
7b6f875f
JH
963 * Make sure the mbuf is in constructed state before returning it
964 * to the objcache.
90775e29 965 */
90775e29 966 m->m_next = NULL;
e9fa4b60 967 mbufuntrack(m);
982f999d
MD
968#ifdef MBUF_DEBUG
969 m->m_hdr.mh_lastfunc = func;
970#endif
7b6f875f
JH
971#ifdef notyet
972 KKASSERT(m->m_nextpkt == NULL);
973#else
974 if (m->m_nextpkt != NULL) {
7b6f875f
JH
975 static int afewtimes = 10;
976
977 if (afewtimes-- > 0) {
6ea70f76 978 kprintf("mfree: m->m_nextpkt != NULL\n");
7ce2998e 979 print_backtrace(-1);
90775e29 980 }
7b6f875f
JH
981 m->m_nextpkt = NULL;
982 }
983#endif
984 if (m->m_flags & M_PKTHDR) {
7b6f875f 985 m_tag_delete_chain(m); /* eliminate XXX JH */
77e294a1
MD
986 }
987
988 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
989
990 /*
991 * Clean the M_PKTHDR state so we can return the mbuf to its original
992 * cache. This is based on the PHCACHE flag which tells us whether
993 * the mbuf was originally allocated out of a packet-header cache
994 * or a non-packet-header cache.
995 */
996 if (m->m_flags & M_PHCACHE) {
997 m->m_flags |= M_PKTHDR;
998 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
7b6f875f
JH
999 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
1000 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
6b1d6bed 1001 SLIST_INIT(&m->m_pkthdr.tags);
90775e29 1002 }
7b6f875f 1003
77e294a1
MD
1004 /*
1005 * Handle remaining flags combinations. M_CLCACHE tells us whether
1006 * the mbuf was originally allocated from a cluster cache or not,
1007 * and is totally separate from whether the mbuf is currently
1008 * associated with a cluster.
1009 */
77e294a1
MD
1010 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1011 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1012 /*
1013 * mbuf+cluster cache case. The mbuf was allocated from the
1014 * combined mbuf_cluster cache and can be returned to the
1015 * cache if the cluster hasn't been shared.
1016 */
1017 if (m_sharecount(m) == 1) {
1018 /*
1019 * The cluster has not been shared, we can just
1020 * reset the data pointer and return the mbuf
1021 * to the cluster cache. Note that the reference
1022 * count is left intact (it is still associated with
1023 * an mbuf).
1024 */
1025 m->m_data = m->m_ext.ext_buf;
1026 if (m->m_flags & M_PHCACHE)
1027 objcache_put(mbufphdrcluster_cache, m);
1028 else
1029 objcache_put(mbufcluster_cache, m);
4c1e2509 1030 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1031 } else {
1032 /*
1033 * Hell. Someone else has a ref on this cluster,
1034 * we have to disconnect it which means we can't
1035 * put it back into the mbufcluster_cache, we
1036 * have to destroy the mbuf.
1037 *
cb086467
MD
1038 * Other mbuf references to the cluster will typically
1039 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1040 *
77e294a1
MD
1041 * XXX we could try to connect another cluster to
1042 * it.
1043 */
7b6f875f
JH
1044 m->m_ext.ext_free(m->m_ext.ext_arg);
1045 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
77e294a1
MD
1046 if (m->m_flags & M_PHCACHE)
1047 objcache_dtor(mbufphdrcluster_cache, m);
1048 else
1049 objcache_dtor(mbufcluster_cache, m);
7b6f875f 1050 }
77e294a1
MD
1051 break;
1052 case M_EXT | M_EXT_CLUSTER:
1053 /*
1054 * Normal cluster associated with an mbuf that was allocated
1055 * from the normal mbuf pool rather then the cluster pool.
1056 * The cluster has to be independantly disassociated from the
1057 * mbuf.
1058 */
cb086467 1059 if (m_sharecount(m) == 1)
4c1e2509 1060 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1061 /* fall through */
1062 case M_EXT:
1063 /*
1064 * Normal cluster association case, disconnect the cluster from
1065 * the mbuf. The cluster may or may not be custom.
1066 */
1067 m->m_ext.ext_free(m->m_ext.ext_arg);
1068 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1069 /* fall through */
1070 case 0:
1071 /*
1072 * return the mbuf to the mbuf cache.
1073 */
1074 if (m->m_flags & M_PHCACHE) {
7b6f875f
JH
1075 m->m_data = m->m_pktdat;
1076 objcache_put(mbufphdr_cache, m);
90775e29 1077 } else {
7b6f875f
JH
1078 m->m_data = m->m_dat;
1079 objcache_put(mbuf_cache, m);
90775e29 1080 }
4c1e2509 1081 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
77e294a1
MD
1082 break;
1083 default:
1084 if (!panicstr)
1085 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1086 break;
b6650ec0 1087 }
984263bc
MD
1088 return (n);
1089}
1090
982f999d
MD
1091#ifdef MBUF_DEBUG
1092
1093void
1094_m_freem(struct mbuf *m, const char *func)
1095{
1096 while (m)
1097 m = _m_free(m, func);
1098}
1099
1100#else
1101
984263bc 1102void
b6650ec0 1103m_freem(struct mbuf *m)
984263bc 1104{
90775e29
MD
1105 while (m)
1106 m = m_free(m);
984263bc
MD
1107}
1108
982f999d
MD
1109#endif
1110
984263bc 1111/*
df80f2ea 1112 * mbuf utility routines
984263bc
MD
1113 */
1114
1115/*
7b6f875f 1116 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
984263bc
MD
1117 * copy junk along.
1118 */
1119struct mbuf *
8a3125c6 1120m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
1121{
1122 struct mbuf *mn;
1123
c3ef87ca
MD
1124 if (m->m_flags & M_PKTHDR)
1125 mn = m_gethdr(how, m->m_type);
1126 else
1127 mn = m_get(how, m->m_type);
7b6f875f 1128 if (mn == NULL) {
984263bc 1129 m_freem(m);
7b6f875f 1130 return (NULL);
984263bc
MD
1131 }
1132 if (m->m_flags & M_PKTHDR)
1133 M_MOVE_PKTHDR(mn, m);
1134 mn->m_next = m;
1135 m = mn;
1136 if (len < MHLEN)
1137 MH_ALIGN(m, len);
1138 m->m_len = len;
1139 return (m);
1140}
1141
1142/*
1143 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1144 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 1145 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
1146 * Note that the copy is read-only, because clusters are not copied,
1147 * only their reference counts are incremented.
1148 */
984263bc 1149struct mbuf *
8a3125c6 1150m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 1151{
1fd87d54
RG
1152 struct mbuf *n, **np;
1153 int off = off0;
984263bc
MD
1154 struct mbuf *top;
1155 int copyhdr = 0;
1156
1157 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1158 KASSERT(len >= 0, ("m_copym, negative len %d", len));
5bd48c1d 1159 if (off == 0 && (m->m_flags & M_PKTHDR))
984263bc
MD
1160 copyhdr = 1;
1161 while (off > 0) {
1162 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1163 if (off < m->m_len)
1164 break;
1165 off -= m->m_len;
1166 m = m->m_next;
1167 }
1168 np = &top;
5bd48c1d 1169 top = NULL;
984263bc 1170 while (len > 0) {
7b6f875f 1171 if (m == NULL) {
984263bc
MD
1172 KASSERT(len == M_COPYALL,
1173 ("m_copym, length > size of mbuf chain"));
1174 break;
1175 }
c3ef87ca
MD
1176 /*
1177 * Because we are sharing any cluster attachment below,
1178 * be sure to get an mbuf that does not have a cluster
1179 * associated with it.
1180 */
1181 if (copyhdr)
1182 n = m_gethdr(wait, m->m_type);
1183 else
1184 n = m_get(wait, m->m_type);
984263bc 1185 *np = n;
7b6f875f 1186 if (n == NULL)
984263bc
MD
1187 goto nospace;
1188 if (copyhdr) {
1189 if (!m_dup_pkthdr(n, m, wait))
1190 goto nospace;
1191 if (len == M_COPYALL)
1192 n->m_pkthdr.len -= off0;
1193 else
1194 n->m_pkthdr.len = len;
1195 copyhdr = 0;
1196 }
1197 n->m_len = min(len, m->m_len - off);
1198 if (m->m_flags & M_EXT) {
c3ef87ca 1199 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1200 n->m_data = m->m_data + off;
7b6f875f 1201 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1202 n->m_ext = m->m_ext;
b542cd49 1203 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
7eccf245 1204 } else {
984263bc
MD
1205 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1206 (unsigned)n->m_len);
7eccf245 1207 }
984263bc
MD
1208 if (len != M_COPYALL)
1209 len -= n->m_len;
1210 off = 0;
1211 m = m->m_next;
1212 np = &n->m_next;
1213 }
7b6f875f 1214 if (top == NULL)
4c1e2509 1215 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
984263bc
MD
1216 return (top);
1217nospace:
1218 m_freem(top);
4c1e2509 1219 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1220 return (NULL);
984263bc
MD
1221}
1222
1223/*
1224 * Copy an entire packet, including header (which must be present).
1225 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1226 * Note that the copy is read-only, because clusters are not copied,
1227 * only their reference counts are incremented.
1228 * Preserve alignment of the first mbuf so if the creator has left
1229 * some room at the beginning (e.g. for inserting protocol headers)
1230 * the copies also have the room available.
1231 */
1232struct mbuf *
8a3125c6 1233m_copypacket(struct mbuf *m, int how)
984263bc
MD
1234{
1235 struct mbuf *top, *n, *o;
1236
7f3602fe 1237 n = m_gethdr(how, m->m_type);
984263bc
MD
1238 top = n;
1239 if (!n)
1240 goto nospace;
1241
1242 if (!m_dup_pkthdr(n, m, how))
1243 goto nospace;
1244 n->m_len = m->m_len;
1245 if (m->m_flags & M_EXT) {
c3ef87ca 1246 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1247 n->m_data = m->m_data;
7b6f875f 1248 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1249 n->m_ext = m->m_ext;
b542cd49 1250 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1251 } else {
1252 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1253 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1254 }
1255
1256 m = m->m_next;
1257 while (m) {
7b6f875f 1258 o = m_get(how, m->m_type);
984263bc
MD
1259 if (!o)
1260 goto nospace;
1261
1262 n->m_next = o;
1263 n = n->m_next;
1264
1265 n->m_len = m->m_len;
1266 if (m->m_flags & M_EXT) {
c3ef87ca 1267 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1268 n->m_data = m->m_data;
7b6f875f 1269 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1270 n->m_ext = m->m_ext;
b542cd49 1271 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1272 } else {
1273 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1274 }
1275
1276 m = m->m_next;
1277 }
1278 return top;
1279nospace:
1280 m_freem(top);
4c1e2509 1281 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1282 return (NULL);
984263bc
MD
1283}
1284
1285/*
1286 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1287 * continuing for "len" bytes, into the indicated buffer.
1288 */
1289void
8a3125c6 1290m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1291{
1fd87d54 1292 unsigned count;
984263bc
MD
1293
1294 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1295 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1296 while (off > 0) {
1297 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1298 if (off < m->m_len)
1299 break;
1300 off -= m->m_len;
1301 m = m->m_next;
1302 }
1303 while (len > 0) {
1304 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1305 count = min(m->m_len - off, len);
1306 bcopy(mtod(m, caddr_t) + off, cp, count);
1307 len -= count;
1308 cp += count;
1309 off = 0;
1310 m = m->m_next;
1311 }
1312}
1313
1314/*
1315 * Copy a packet header mbuf chain into a completely new chain, including
1316 * copying any mbuf clusters. Use this instead of m_copypacket() when
1317 * you need a writable copy of an mbuf chain.
1318 */
1319struct mbuf *
8a3125c6 1320m_dup(struct mbuf *m, int how)
984263bc
MD
1321{
1322 struct mbuf **p, *top = NULL;
1323 int remain, moff, nsize;
1324
1325 /* Sanity check */
1326 if (m == NULL)
50503f0f 1327 return (NULL);
5e2195bf 1328 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
984263bc
MD
1329
1330 /* While there's more data, get a new mbuf, tack it on, and fill it */
1331 remain = m->m_pkthdr.len;
1332 moff = 0;
1333 p = &top;
1334 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1335 struct mbuf *n;
1336
1337 /* Get the next new mbuf */
50503f0f
JH
1338 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1339 &nsize);
984263bc
MD
1340 if (n == NULL)
1341 goto nospace;
50503f0f 1342 if (top == NULL)
984263bc 1343 if (!m_dup_pkthdr(n, m, how))
50503f0f 1344 goto nospace0;
984263bc
MD
1345
1346 /* Link it into the new chain */
1347 *p = n;
1348 p = &n->m_next;
1349
1350 /* Copy data from original mbuf(s) into new mbuf */
50503f0f 1351 n->m_len = 0;
984263bc
MD
1352 while (n->m_len < nsize && m != NULL) {
1353 int chunk = min(nsize - n->m_len, m->m_len - moff);
1354
1355 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1356 moff += chunk;
1357 n->m_len += chunk;
1358 remain -= chunk;
1359 if (moff == m->m_len) {
1360 m = m->m_next;
1361 moff = 0;
1362 }
1363 }
1364
1365 /* Check correct total mbuf length */
1366 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
50503f0f 1367 ("%s: bogus m_pkthdr.len", __func__));
984263bc
MD
1368 }
1369 return (top);
1370
1371nospace:
1372 m_freem(top);
50503f0f 1373nospace0:
4c1e2509 1374 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
50503f0f 1375 return (NULL);
984263bc
MD
1376}
1377
3bf6fec3
MD
1378/*
1379 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1380 * copying any mbuf clusters. This is typically used to realign a data
1381 * chain by nfs_realign().
1382 *
1383 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT
1384 * and NULL can be returned if MB_DONTWAIT is passed.
1385 *
1386 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1387 * cluster mbufs can exhaust our supply of mbufs.
1388 */
1389struct mbuf *
1390m_dup_data(struct mbuf *m, int how)
1391{
1392 struct mbuf **p, *n, *top = NULL;
1393 int mlen, moff, chunk, gsize, nsize;
1394
1395 /*
1396 * Degenerate case
1397 */
1398 if (m == NULL)
1399 return (NULL);
1400
1401 /*
1402 * Optimize the mbuf allocation but do not get too carried away.
1403 */
1404 if (m->m_next || m->m_len > MLEN)
1405 gsize = MCLBYTES;
1406 else
1407 gsize = MLEN;
1408
1409 /* Chain control */
1410 p = &top;
1411 n = NULL;
1412 nsize = 0;
1413
1414 /*
1415 * Scan the mbuf chain until nothing is left, the new mbuf chain
1416 * will be allocated on the fly as needed.
1417 */
1418 while (m) {
1419 mlen = m->m_len;
1420 moff = 0;
1421
1422 while (mlen) {
1423 KKASSERT(m->m_type == MT_DATA);
1424 if (n == NULL) {
1425 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1426 n->m_len = 0;
1427 if (n == NULL)
1428 goto nospace;
1429 *p = n;
1430 p = &n->m_next;
1431 }
1432 chunk = imin(mlen, nsize);
1433 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1434 mlen -= chunk;
1435 moff += chunk;
1436 n->m_len += chunk;
1437 nsize -= chunk;
1438 if (nsize == 0)
1439 n = NULL;
1440 }
1441 m = m->m_next;
1442 }
1443 *p = NULL;
1444 return(top);
1445nospace:
1446 *p = NULL;
1447 m_freem(top);
1448 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1449 return (NULL);
1450}
1451
984263bc
MD
1452/*
1453 * Concatenate mbuf chain n to m.
1454 * Both chains must be of the same type (e.g. MT_DATA).
1455 * Any m_pkthdr is not updated.
1456 */
1457void
8a3125c6 1458m_cat(struct mbuf *m, struct mbuf *n)
984263bc 1459{
50503f0f 1460 m = m_last(m);
984263bc
MD
1461 while (n) {
1462 if (m->m_flags & M_EXT ||
1463 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1464 /* just join the two chains */
1465 m->m_next = n;
1466 return;
1467 }
1468 /* splat the data from one into the other */
1469 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1470 (u_int)n->m_len);
1471 m->m_len += n->m_len;
1472 n = m_free(n);
1473 }
1474}
1475
1476void
8a3125c6 1477m_adj(struct mbuf *mp, int req_len)
984263bc 1478{
1fd87d54
RG
1479 int len = req_len;
1480 struct mbuf *m;
1481 int count;
984263bc
MD
1482
1483 if ((m = mp) == NULL)
1484 return;
1485 if (len >= 0) {
1486 /*
1487 * Trim from head.
1488 */
1489 while (m != NULL && len > 0) {
1490 if (m->m_len <= len) {
1491 len -= m->m_len;
1492 m->m_len = 0;
1493 m = m->m_next;
1494 } else {
1495 m->m_len -= len;
1496 m->m_data += len;
1497 len = 0;
1498 }
1499 }
1500 m = mp;
1501 if (mp->m_flags & M_PKTHDR)
1502 m->m_pkthdr.len -= (req_len - len);
1503 } else {
1504 /*
1505 * Trim from tail. Scan the mbuf chain,
1506 * calculating its length and finding the last mbuf.
1507 * If the adjustment only affects this mbuf, then just
1508 * adjust and return. Otherwise, rescan and truncate
1509 * after the remaining size.
1510 */
1511 len = -len;
1512 count = 0;
1513 for (;;) {
1514 count += m->m_len;
60233e58 1515 if (m->m_next == NULL)
984263bc
MD
1516 break;
1517 m = m->m_next;
1518 }
1519 if (m->m_len >= len) {
1520 m->m_len -= len;
1521 if (mp->m_flags & M_PKTHDR)
1522 mp->m_pkthdr.len -= len;
1523 return;
1524 }
1525 count -= len;
1526 if (count < 0)
1527 count = 0;
1528 /*
1529 * Correct length for chain is "count".
1530 * Find the mbuf with last data, adjust its length,
1531 * and toss data from remaining mbufs on chain.
1532 */
1533 m = mp;
1534 if (m->m_flags & M_PKTHDR)
1535 m->m_pkthdr.len = count;
1536 for (; m; m = m->m_next) {
1537 if (m->m_len >= count) {
1538 m->m_len = count;
1539 break;
1540 }
1541 count -= m->m_len;
1542 }
1543 while (m->m_next)
1544 (m = m->m_next) ->m_len = 0;
1545 }
1546}
1547
a3768f58
RP
1548/*
1549 * Set the m_data pointer of a newly-allocated mbuf
1550 * to place an object of the specified size at the
1551 * end of the mbuf, longword aligned.
1552 */
1553void
1554m_align(struct mbuf *m, int len)
1555{
1556 int adjust;
1557
1558 if (m->m_flags & M_EXT)
1559 adjust = m->m_ext.ext_size - len;
1560 else if (m->m_flags & M_PKTHDR)
1561 adjust = MHLEN - len;
1562 else
1563 adjust = MLEN - len;
1564 m->m_data += adjust &~ (sizeof(long)-1);
1565}
1566
984263bc 1567/*
7b6f875f 1568 * Rearrange an mbuf chain so that len bytes are contiguous
9e4465af
MD
1569 * and in the data area of an mbuf (so that mtod will work for a structure
1570 * of size len). Returns the resulting mbuf chain on success, frees it and
1571 * returns null on failure. If there is room, it will add up to
1572 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1573 * avoid being called next time.
984263bc 1574 */
984263bc 1575struct mbuf *
8a3125c6 1576m_pullup(struct mbuf *n, int len)
984263bc 1577{
1fd87d54
RG
1578 struct mbuf *m;
1579 int count;
984263bc
MD
1580 int space;
1581
1582 /*
1583 * If first mbuf has no cluster, and has room for len bytes
1584 * without shifting current data, pullup into it,
1585 * otherwise allocate a new mbuf to prepend to the chain.
1586 */
7b6f875f
JH
1587 if (!(n->m_flags & M_EXT) &&
1588 n->m_data + len < &n->m_dat[MLEN] &&
1589 n->m_next) {
984263bc
MD
1590 if (n->m_len >= len)
1591 return (n);
1592 m = n;
1593 n = n->m_next;
1594 len -= m->m_len;
1595 } else {
1596 if (len > MHLEN)
1597 goto bad;
c3ef87ca
MD
1598 if (n->m_flags & M_PKTHDR)
1599 m = m_gethdr(MB_DONTWAIT, n->m_type);
1600 else
1601 m = m_get(MB_DONTWAIT, n->m_type);
7b6f875f 1602 if (m == NULL)
984263bc
MD
1603 goto bad;
1604 m->m_len = 0;
1605 if (n->m_flags & M_PKTHDR)
1606 M_MOVE_PKTHDR(m, n);
1607 }
1608 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1609 do {
1610 count = min(min(max(len, max_protohdr), space), n->m_len);
1611 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1612 (unsigned)count);
1613 len -= count;
1614 m->m_len += count;
1615 n->m_len -= count;
1616 space -= count;
1617 if (n->m_len)
1618 n->m_data += count;
1619 else
1620 n = m_free(n);
1621 } while (len > 0 && n);
1622 if (len > 0) {
7b6f875f 1623 m_free(m);
984263bc
MD
1624 goto bad;
1625 }
1626 m->m_next = n;
1627 return (m);
1628bad:
1629 m_freem(n);
4c1e2509 1630 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1631 return (NULL);
984263bc
MD
1632}
1633
1634/*
1635 * Partition an mbuf chain in two pieces, returning the tail --
1636 * all but the first len0 bytes. In case of failure, it returns NULL and
1637 * attempts to restore the chain to its original state.
1638 *
1639 * Note that the resulting mbufs might be read-only, because the new
1640 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1641 * the "breaking point" happens to lie within a cluster mbuf. Use the
1642 * M_WRITABLE() macro to check for this case.
1643 */
1644struct mbuf *
8a3125c6 1645m_split(struct mbuf *m0, int len0, int wait)
984263bc 1646{
1fd87d54 1647 struct mbuf *m, *n;
984263bc
MD
1648 unsigned len = len0, remain;
1649
1650 for (m = m0; m && len > m->m_len; m = m->m_next)
1651 len -= m->m_len;
7b6f875f
JH
1652 if (m == NULL)
1653 return (NULL);
984263bc
MD
1654 remain = m->m_len - len;
1655 if (m0->m_flags & M_PKTHDR) {
7b6f875f
JH
1656 n = m_gethdr(wait, m0->m_type);
1657 if (n == NULL)
1658 return (NULL);
984263bc
MD
1659 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1660 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1661 m0->m_pkthdr.len = len0;
1662 if (m->m_flags & M_EXT)
1663 goto extpacket;
1664 if (remain > MHLEN) {
1665 /* m can't be the lead packet */
1666 MH_ALIGN(n, 0);
1667 n->m_next = m_split(m, len, wait);
7b6f875f
JH
1668 if (n->m_next == NULL) {
1669 m_free(n);
1670 return (NULL);
984263bc
MD
1671 } else {
1672 n->m_len = 0;
1673 return (n);
1674 }
1675 } else
1676 MH_ALIGN(n, remain);
1677 } else if (remain == 0) {
1678 n = m->m_next;
1679 m->m_next = 0;
1680 return (n);
1681 } else {
7b6f875f
JH
1682 n = m_get(wait, m->m_type);
1683 if (n == NULL)
1684 return (NULL);
984263bc
MD
1685 M_ALIGN(n, remain);
1686 }
1687extpacket:
1688 if (m->m_flags & M_EXT) {
c3ef87ca 1689 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1690 n->m_data = m->m_data + len;
7b6f875f 1691 m->m_ext.ext_ref(m->m_ext.ext_arg);
7eccf245 1692 n->m_ext = m->m_ext;
b542cd49 1693 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1694 } else {
1695 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1696 }
1697 n->m_len = remain;
1698 m->m_len = len;
1699 n->m_next = m->m_next;
1700 m->m_next = 0;
1701 return (n);
1702}
50503f0f 1703
984263bc
MD
1704/*
1705 * Routine to copy from device local memory into mbufs.
50503f0f 1706 * Note: "offset" is ill-defined and always called as 0, so ignore it.
984263bc
MD
1707 */
1708struct mbuf *
50503f0f
JH
1709m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1710 void (*copy)(volatile const void *from, volatile void *to, size_t length))
984263bc 1711{
50503f0f
JH
1712 struct mbuf *m, *mfirst = NULL, **mtail;
1713 int nsize, flags;
1714
1715 if (copy == NULL)
1716 copy = bcopy;
1717 mtail = &mfirst;
1718 flags = M_PKTHDR;
1719
1720 while (len > 0) {
1721 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1722 if (m == NULL) {
1723 m_freem(mfirst);
1724 return (NULL);
984263bc 1725 }
50503f0f
JH
1726 m->m_len = min(len, nsize);
1727
1728 if (flags & M_PKTHDR) {
1729 if (len + max_linkhdr <= nsize)
1730 m->m_data += max_linkhdr;
1731 m->m_pkthdr.rcvif = ifp;
1732 m->m_pkthdr.len = len;
1733 flags = 0;
984263bc 1734 }
50503f0f
JH
1735
1736 copy(buf, m->m_data, (unsigned)m->m_len);
1737 buf += m->m_len;
1738 len -= m->m_len;
1739 *mtail = m;
1740 mtail = &m->m_next;
984263bc 1741 }
50503f0f
JH
1742
1743 return (mfirst);
984263bc
MD
1744}
1745
cf12ba3c
SZ
1746/*
1747 * Routine to pad mbuf to the specified length 'padto'.
1748 */
1749int
1750m_devpad(struct mbuf *m, int padto)
1751{
1752 struct mbuf *last = NULL;
1753 int padlen;
1754
1755 if (padto <= m->m_pkthdr.len)
1756 return 0;
1757
1758 padlen = padto - m->m_pkthdr.len;
1759
1760 /* if there's only the packet-header and we can pad there, use it. */
1761 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1762 last = m;
1763 } else {
1764 /*
1765 * Walk packet chain to find last mbuf. We will either
1766 * pad there, or append a new mbuf and pad it
1767 */
1768 for (last = m; last->m_next != NULL; last = last->m_next)
1769 ; /* EMPTY */
1770
1771 /* `last' now points to last in chain. */
1772 if (M_TRAILINGSPACE(last) < padlen) {
1773 struct mbuf *n;
1774
1775 /* Allocate new empty mbuf, pad it. Compact later. */
1776 MGET(n, MB_DONTWAIT, MT_DATA);
1777 if (n == NULL)
1778 return ENOBUFS;
1779 n->m_len = 0;
1780 last->m_next = n;
1781 last = n;
1782 }
1783 }
1784 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1785 KKASSERT(M_WRITABLE(last));
1786
1787 /* Now zero the pad area */
1788 bzero(mtod(last, char *) + last->m_len, padlen);
1789 last->m_len += padlen;
1790 m->m_pkthdr.len += padlen;
1791 return 0;
1792}
1793
984263bc
MD
1794/*
1795 * Copy data from a buffer back into the indicated mbuf chain,
1796 * starting "off" bytes from the beginning, extending the mbuf
1797 * chain if necessary.
1798 */
1799void
8a3125c6 1800m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1801{
1fd87d54
RG
1802 int mlen;
1803 struct mbuf *m = m0, *n;
984263bc
MD
1804 int totlen = 0;
1805
7b6f875f 1806 if (m0 == NULL)
984263bc
MD
1807 return;
1808 while (off > (mlen = m->m_len)) {
1809 off -= mlen;
1810 totlen += mlen;
7b6f875f 1811 if (m->m_next == NULL) {
74f1caca 1812 n = m_getclr(MB_DONTWAIT, m->m_type);
7b6f875f 1813 if (n == NULL)
984263bc
MD
1814 goto out;
1815 n->m_len = min(MLEN, len + off);
1816 m->m_next = n;
1817 }
1818 m = m->m_next;
1819 }
1820 while (len > 0) {
1821 mlen = min (m->m_len - off, len);
1822 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1823 cp += mlen;
1824 len -= mlen;
1825 mlen += off;
1826 off = 0;
1827 totlen += mlen;
1828 if (len == 0)
1829 break;
7b6f875f 1830 if (m->m_next == NULL) {
74f1caca 1831 n = m_get(MB_DONTWAIT, m->m_type);
7b6f875f 1832 if (n == NULL)
984263bc
MD
1833 break;
1834 n->m_len = min(MLEN, len);
1835 m->m_next = n;
1836 }
1837 m = m->m_next;
1838 }
1839out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1840 m->m_pkthdr.len = totlen;
1841}
1842
bf2cc98c
RP
1843/*
1844 * Append the specified data to the indicated mbuf chain,
1845 * Extend the mbuf chain if the new data does not fit in
1846 * existing space.
1847 *
1848 * Return 1 if able to complete the job; otherwise 0.
1849 */
1850int
1851m_append(struct mbuf *m0, int len, c_caddr_t cp)
1852{
1853 struct mbuf *m, *n;
1854 int remainder, space;
1855
1856 for (m = m0; m->m_next != NULL; m = m->m_next)
1857 ;
1858 remainder = len;
1859 space = M_TRAILINGSPACE(m);
1860 if (space > 0) {
1861 /*
1862 * Copy into available space.
1863 */
1864 if (space > remainder)
1865 space = remainder;
1866 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1867 m->m_len += space;
1868 cp += space, remainder -= space;
1869 }
1870 while (remainder > 0) {
1871 /*
1872 * Allocate a new mbuf; could check space
1873 * and allocate a cluster instead.
1874 */
1875 n = m_get(MB_DONTWAIT, m->m_type);
1876 if (n == NULL)
1877 break;
1878 n->m_len = min(MLEN, remainder);
1879 bcopy(cp, mtod(n, caddr_t), n->m_len);
1880 cp += n->m_len, remainder -= n->m_len;
1881 m->m_next = n;
1882 m = n;
1883 }
1884 if (m0->m_flags & M_PKTHDR)
1885 m0->m_pkthdr.len += len - remainder;
1886 return (remainder == 0);
1887}
1888
920c9f10
AH
1889/*
1890 * Apply function f to the data in an mbuf chain starting "off" bytes from
1891 * the beginning, continuing for "len" bytes.
1892 */
1893int
1894m_apply(struct mbuf *m, int off, int len,
1895 int (*f)(void *, void *, u_int), void *arg)
1896{
1897 u_int count;
1898 int rval;
1899
1900 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1901 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1902 while (off > 0) {
1903 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1904 if (off < m->m_len)
1905 break;
1906 off -= m->m_len;
1907 m = m->m_next;
1908 }
1909 while (len > 0) {
1910 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1911 count = min(m->m_len - off, len);
1912 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1913 if (rval)
1914 return (rval);
1915 len -= count;
1916 off = 0;
1917 m = m->m_next;
1918 }
1919 return (0);
1920}
1921
1922/*
1923 * Return a pointer to mbuf/offset of location in mbuf chain.
1924 */
1925struct mbuf *
1926m_getptr(struct mbuf *m, int loc, int *off)
1927{
1928
1929 while (loc >= 0) {
1930 /* Normal end of search. */
1931 if (m->m_len > loc) {
1932 *off = loc;
1933 return (m);
1934 } else {
1935 loc -= m->m_len;
1936 if (m->m_next == NULL) {
1937 if (loc == 0) {
1938 /* Point at the end of valid data. */
1939 *off = m->m_len;
1940 return (m);
1941 }
1942 return (NULL);
1943 }
1944 m = m->m_next;
1945 }
1946 }
1947 return (NULL);
1948}
1949
984263bc
MD
1950void
1951m_print(const struct mbuf *m)
1952{
1953 int len;
1954 const struct mbuf *m2;
1955
1956 len = m->m_pkthdr.len;
1957 m2 = m;
1958 while (len) {
6ea70f76 1959 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
984263bc
MD
1960 len -= m2->m_len;
1961 m2 = m2->m_next;
1962 }
1963 return;
1964}
1965
1966/*
1967 * "Move" mbuf pkthdr from "from" to "to".
1968 * "from" must have M_PKTHDR set, and "to" must be empty.
1969 */
1970void
1971m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1972{
e0d05288 1973 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
984263bc 1974
77e294a1 1975 to->m_flags |= from->m_flags & M_COPYFLAGS;
984263bc
MD
1976 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1977 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
984263bc
MD
1978}
1979
1980/*
1981 * Duplicate "from"'s mbuf pkthdr in "to".
1982 * "from" must have M_PKTHDR set, and "to" must be empty.
1983 * In particular, this does a deep copy of the packet tags.
1984 */
1985int
f15db79e 1986m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc 1987{
7f3602fe
JH
1988 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1989
4bac35fc 1990 to->m_flags = (from->m_flags & M_COPYFLAGS) |
c4da22e4 1991 (to->m_flags & ~M_COPYFLAGS);
984263bc
MD
1992 to->m_pkthdr = from->m_pkthdr;
1993 SLIST_INIT(&to->m_pkthdr.tags);
1994 return (m_tag_copy_chain(to, from, how));
1995}
1996
1997/*
1998 * Defragment a mbuf chain, returning the shortest possible
1999 * chain of mbufs and clusters. If allocation fails and
2000 * this cannot be completed, NULL will be returned, but
2001 * the passed in chain will be unchanged. Upon success,
2002 * the original chain will be freed, and the new chain
2003 * will be returned.
2004 *
2005 * If a non-packet header is passed in, the original
2006 * mbuf (chain?) will be returned unharmed.
c8f5127a
JS
2007 *
2008 * m_defrag_nofree doesn't free the passed in mbuf.
984263bc
MD
2009 */
2010struct mbuf *
2011m_defrag(struct mbuf *m0, int how)
c8f5127a
JS
2012{
2013 struct mbuf *m_new;
2014
2015 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2016 return (NULL);
2017 if (m_new != m0)
2018 m_freem(m0);
2019 return (m_new);
2020}
2021
2022struct mbuf *
2023m_defrag_nofree(struct mbuf *m0, int how)
984263bc
MD
2024{
2025 struct mbuf *m_new = NULL, *m_final = NULL;
61721e90 2026 int progress = 0, length, nsize;
984263bc
MD
2027
2028 if (!(m0->m_flags & M_PKTHDR))
2029 return (m0);
2030
2031#ifdef MBUF_STRESS_TEST
2032 if (m_defragrandomfailures) {
0ced1954 2033 int temp = karc4random() & 0xff;
984263bc
MD
2034 if (temp == 0xba)
2035 goto nospace;
2036 }
2037#endif
2038
61721e90 2039 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
984263bc
MD
2040 if (m_final == NULL)
2041 goto nospace;
61721e90 2042 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
984263bc 2043
3641b7ca 2044 if (m_dup_pkthdr(m_final, m0, how) == 0)
984263bc
MD
2045 goto nospace;
2046
2047 m_new = m_final;
2048
2049 while (progress < m0->m_pkthdr.len) {
2050 length = m0->m_pkthdr.len - progress;
2051 if (length > MCLBYTES)
2052 length = MCLBYTES;
2053
2054 if (m_new == NULL) {
61721e90 2055 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
984263bc
MD
2056 if (m_new == NULL)
2057 goto nospace;
2058 }
2059
2060 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2061 progress += length;
2062 m_new->m_len = length;
2063 if (m_new != m_final)
2064 m_cat(m_final, m_new);
2065 m_new = NULL;
2066 }
2067 if (m0->m_next == NULL)
2068 m_defraguseless++;
984263bc 2069 m_defragpackets++;
c8f5127a
JS
2070 m_defragbytes += m_final->m_pkthdr.len;
2071 return (m_final);
984263bc
MD
2072nospace:
2073 m_defragfailure++;
2074 if (m_new)
2075 m_free(m_new);
61721e90 2076 m_freem(m_final);
984263bc
MD
2077 return (NULL);
2078}
0c33f36d
JH
2079
2080/*
2081 * Move data from uio into mbufs.
0c33f36d
JH
2082 */
2083struct mbuf *
e12241e1 2084m_uiomove(struct uio *uio)
0c33f36d 2085{
0c33f36d 2086 struct mbuf *m; /* current working mbuf */
e12241e1
JH
2087 struct mbuf *head = NULL; /* result mbuf chain */
2088 struct mbuf **mp = &head;
e54488bb
MD
2089 int flags = M_PKTHDR;
2090 int nsize;
2091 int error;
2092 int resid;
0c33f36d 2093
0c33f36d 2094 do {
e54488bb
MD
2095 if (uio->uio_resid > INT_MAX)
2096 resid = INT_MAX;
2097 else
2098 resid = (int)uio->uio_resid;
e12241e1 2099 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
61721e90
JH
2100 if (flags) {
2101 m->m_pkthdr.len = 0;
2102 /* Leave room for protocol headers. */
2103 if (resid < MHLEN)
2104 MH_ALIGN(m, resid);
2105 flags = 0;
0c33f36d 2106 }
e54488bb 2107 m->m_len = imin(nsize, resid);
61721e90 2108 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
0c33f36d
JH
2109 if (error) {
2110 m_free(m);
2111 goto failed;
2112 }
0c33f36d
JH
2113 *mp = m;
2114 mp = &m->m_next;
61721e90 2115 head->m_pkthdr.len += m->m_len;
e54488bb 2116 } while (uio->uio_resid > 0);
0c33f36d
JH
2117
2118 return (head);
2119
2120failed:
61721e90 2121 m_freem(head);
0c33f36d
JH
2122 return (NULL);
2123}
df80f2ea 2124
50503f0f
JH
2125struct mbuf *
2126m_last(struct mbuf *m)
2127{
2128 while (m->m_next)
2129 m = m->m_next;
2130 return (m);
2131}
2132
df80f2ea
JH
2133/*
2134 * Return the number of bytes in an mbuf chain.
2135 * If lastm is not NULL, also return the last mbuf.
2136 */
2137u_int
2138m_lengthm(struct mbuf *m, struct mbuf **lastm)
2139{
2140 u_int len = 0;
2141 struct mbuf *prev = m;
2142
2143 while (m) {
2144 len += m->m_len;
2145 prev = m;
2146 m = m->m_next;
2147 }
2148 if (lastm != NULL)
2149 *lastm = prev;
2150 return (len);
2151}
2152
2153/*
2154 * Like m_lengthm(), except also keep track of mbuf usage.
2155 */
2156u_int
2157m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2158{
2159 u_int len = 0, mbcnt = 0;
2160 struct mbuf *prev = m;
2161
2162 while (m) {
2163 len += m->m_len;
2164 mbcnt += MSIZE;
2165 if (m->m_flags & M_EXT)
2166 mbcnt += m->m_ext.ext_size;
2167 prev = m;
2168 m = m->m_next;
2169 }
2170 if (lastm != NULL)
2171 *lastm = prev;
2172 *pmbcnt = mbcnt;
2173 return (len);
2174}