mbuf - Add MJUMPAGESIZE mbuf cluster support.
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
5bd48c1d
MD
2 * (MPSAFE)
3 *
0c33f36d 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey M. Hsu.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36/*
984263bc
MD
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
8a3125c6 68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc 69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
3f98f485 70 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
984263bc
MD
71 */
72
73#include "opt_param.h"
74#include "opt_mbuf_stress_test.h"
75#include <sys/param.h>
76#include <sys/systm.h>
4e23f366 77#include <sys/file.h>
984263bc
MD
78#include <sys/malloc.h>
79#include <sys/mbuf.h>
80#include <sys/kernel.h>
81#include <sys/sysctl.h>
82#include <sys/domain.h>
7b6f875f 83#include <sys/objcache.h>
e9fa4b60 84#include <sys/tree.h>
984263bc 85#include <sys/protosw.h>
0c33f36d 86#include <sys/uio.h>
ef0fdad1 87#include <sys/thread.h>
a2a5ad0d 88#include <sys/globaldata.h>
5bd48c1d 89
90775e29 90#include <sys/thread2.h>
5bd48c1d 91#include <sys/spinlock2.h>
984263bc 92
1d16b2b5 93#include <machine/atomic.h>
e54488bb 94#include <machine/limits.h>
1d16b2b5 95
984263bc
MD
96#include <vm/vm.h>
97#include <vm/vm_kern.h>
98#include <vm/vm_extern.h>
99
100#ifdef INVARIANTS
101#include <machine/cpu.h>
102#endif
103
90775e29
MD
104/*
105 * mbuf cluster meta-data
106 */
7b6f875f 107struct mbcluster {
90775e29
MD
108 int32_t mcl_refs;
109 void *mcl_data;
7b6f875f 110};
90775e29 111
e9fa4b60
MD
112/*
113 * mbuf tracking for debugging purposes
114 */
115#ifdef MBUF_DEBUG
116
117static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
118
119struct mbctrack;
120RB_HEAD(mbuf_rb_tree, mbtrack);
121RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
122
123struct mbtrack {
124 RB_ENTRY(mbtrack) rb_node;
125 int trackid;
126 struct mbuf *m;
127};
128
129static int
130mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
131{
132 if (mb1->m < mb2->m)
133 return(-1);
134 if (mb1->m > mb2->m)
135 return(1);
136 return(0);
137}
138
139RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
140
141struct mbuf_rb_tree mbuf_track_root;
5bd48c1d 142static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
e9fa4b60
MD
143
144static void
145mbuftrack(struct mbuf *m)
146{
147 struct mbtrack *mbt;
148
e9fa4b60 149 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
5bd48c1d 150 spin_lock(&mbuf_track_spin);
e9fa4b60 151 mbt->m = m;
5bd48c1d
MD
152 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
153 spin_unlock(&mbuf_track_spin);
e9fa4b60 154 panic("mbuftrack: mbuf %p already being tracked\n", m);
5bd48c1d
MD
155 }
156 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
157}
158
159static void
160mbufuntrack(struct mbuf *m)
161{
162 struct mbtrack *mbt;
163
5bd48c1d 164 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
165 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
166 if (mbt == NULL) {
5bd48c1d
MD
167 spin_unlock(&mbuf_track_spin);
168 panic("mbufuntrack: mbuf %p was not tracked\n", m);
e9fa4b60
MD
169 } else {
170 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
6cef7136 171 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
172 kfree(mbt, M_MTRACK);
173 }
e9fa4b60
MD
174}
175
176void
177mbuftrackid(struct mbuf *m, int trackid)
178{
179 struct mbtrack *mbt;
180 struct mbuf *n;
181
5bd48c1d 182 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
183 while (m) {
184 n = m->m_nextpkt;
185 while (m) {
186 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
5bd48c1d
MD
187 if (mbt == NULL) {
188 spin_unlock(&mbuf_track_spin);
189 panic("mbuftrackid: mbuf %p not tracked", m);
190 }
191 mbt->trackid = trackid;
e9fa4b60
MD
192 m = m->m_next;
193 }
194 m = n;
195 }
5bd48c1d 196 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
197}
198
199static int
200mbuftrack_callback(struct mbtrack *mbt, void *arg)
201{
202 struct sysctl_req *req = arg;
203 char buf[64];
204 int error;
205
206 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
207
5bd48c1d 208 spin_unlock(&mbuf_track_spin);
e9fa4b60 209 error = SYSCTL_OUT(req, buf, strlen(buf));
5bd48c1d 210 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
211 if (error)
212 return(-error);
213 return(0);
214}
215
216static int
217mbuftrack_show(SYSCTL_HANDLER_ARGS)
218{
219 int error;
220
5bd48c1d 221 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
222 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
223 mbuftrack_callback, req);
5bd48c1d 224 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
225 return (-error);
226}
227SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
228 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
229
230#else
231
232#define mbuftrack(m)
233#define mbufuntrack(m)
234
235#endif
236
7b6f875f 237static void mbinit(void *);
ba39e2e0 238SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
984263bc 239
4c1e2509 240static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
90775e29 241
4c1e2509 242static struct mbstat mbstat[SMP_MAXCPU];
984263bc
MD
243int max_linkhdr;
244int max_protohdr;
245int max_hdr;
246int max_datalen;
247int m_defragpackets;
248int m_defragbytes;
249int m_defraguseless;
250int m_defragfailure;
251#ifdef MBUF_STRESS_TEST
252int m_defragrandomfailures;
253#endif
254
7b6f875f 255struct objcache *mbuf_cache, *mbufphdr_cache;
33dbeae8 256struct objcache *mclmeta_cache, *mjclmeta_cache;
7b6f875f 257struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
33dbeae8 258struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache;
7b6f875f 259
984263bc
MD
260int nmbclusters;
261int nmbufs;
984263bc 262
984263bc 263SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
093e85dc 264 &max_linkhdr, 0, "Max size of a link-level header");
984263bc 265SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
093e85dc
SG
266 &max_protohdr, 0, "Max size of a protocol header");
267SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0,
268 "Max size of link+protocol headers");
984263bc 269SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
093e85dc 270 &max_datalen, 0, "Max data payload size without headers");
984263bc 271SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
093e85dc 272 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations");
4c1e2509
JT
273static int do_mbstat(SYSCTL_HANDLER_ARGS);
274
275SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
093e85dc 276 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics");
4c1e2509
JT
277
278static int do_mbtypes(SYSCTL_HANDLER_ARGS);
279
280SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
281 0, 0, do_mbtypes, "LU", "");
282
283static int
284do_mbstat(SYSCTL_HANDLER_ARGS)
285{
286 struct mbstat mbstat_total;
287 struct mbstat *mbstat_totalp;
288 int i;
289
290 bzero(&mbstat_total, sizeof(mbstat_total));
291 mbstat_totalp = &mbstat_total;
292
293 for (i = 0; i < ncpus; i++)
294 {
295 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
296 mbstat_total.m_clusters += mbstat[i].m_clusters;
297 mbstat_total.m_spare += mbstat[i].m_spare;
298 mbstat_total.m_clfree += mbstat[i].m_clfree;
299 mbstat_total.m_drops += mbstat[i].m_drops;
300 mbstat_total.m_wait += mbstat[i].m_wait;
301 mbstat_total.m_drain += mbstat[i].m_drain;
302 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
303 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
304
305 }
306 /*
307 * The following fields are not cumulative fields so just
308 * get their values once.
309 */
310 mbstat_total.m_msize = mbstat[0].m_msize;
311 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
312 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
313 mbstat_total.m_mlen = mbstat[0].m_mlen;
314 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
315
316 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
317}
318
319static int
320do_mbtypes(SYSCTL_HANDLER_ARGS)
321{
322 u_long totals[MT_NTYPES];
323 int i, j;
324
325 for (i = 0; i < MT_NTYPES; i++)
326 totals[i] = 0;
327
328 for (i = 0; i < ncpus; i++)
329 {
330 for (j = 0; j < MT_NTYPES; j++)
331 totals[j] += mbtypes[i][j];
332 }
333
334 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
335}
18c48b9c
MD
336
337/*
338 * These are read-only because we do not currently have any code
339 * to adjust the objcache limits after the fact. The variables
340 * may only be set as boot-time tunables.
341 */
342SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
984263bc 343 &nmbclusters, 0, "Maximum number of mbuf clusters available");
18c48b9c 344SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
984263bc 345 "Maximum number of mbufs available");
7b6f875f 346
984263bc 347SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
093e85dc 348 &m_defragpackets, 0, "Number of defragment packets");
984263bc 349SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
093e85dc 350 &m_defragbytes, 0, "Number of defragment bytes");
984263bc 351SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
093e85dc 352 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations");
984263bc 353SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
093e85dc 354 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations");
984263bc
MD
355#ifdef MBUF_STRESS_TEST
356SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
357 &m_defragrandomfailures, 0, "");
358#endif
359
90775e29
MD
360static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
361static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
33dbeae8 362static MALLOC_DEFINE(M_MJBUFCL, "mbufcl", "mbufcl");
7b6f875f 363static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
33dbeae8 364static MALLOC_DEFINE(M_MJCLMETA, "mjclmeta", "mjclmeta");
90775e29
MD
365
366static void m_reclaim (void);
90775e29
MD
367static void m_mclref(void *arg);
368static void m_mclfree(void *arg);
984263bc 369
4e23f366
MD
370/*
371 * NOTE: Default NMBUFS must take into account a possible DOS attack
372 * using fd passing on unix domain sockets.
373 */
984263bc
MD
374#ifndef NMBCLUSTERS
375#define NMBCLUSTERS (512 + maxusers * 16)
376#endif
377#ifndef NMBUFS
4e23f366 378#define NMBUFS (nmbclusters * 2 + maxfiles)
984263bc
MD
379#endif
380
381/*
382 * Perform sanity checks of tunables declared above.
383 */
384static void
385tunable_mbinit(void *dummy)
386{
984263bc
MD
387 /*
388 * This has to be done before VM init.
389 */
390 nmbclusters = NMBCLUSTERS;
391 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
392 nmbufs = NMBUFS;
393 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
394 /* Sanity checks */
395 if (nmbufs < nmbclusters * 2)
396 nmbufs = nmbclusters * 2;
984263bc 397}
ba39e2e0
MD
398SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
399 tunable_mbinit, NULL);
984263bc
MD
400
401/* "number of clusters of pages" */
402#define NCL_INIT 1
403
404#define NMB_INIT 16
405
7b6f875f
JH
406/*
407 * The mbuf object cache only guarantees that m_next and m_nextpkt are
408 * NULL and that m_data points to the beginning of the data area. In
409 * particular, m_len and m_pkthdr.len are uninitialized. It is the
410 * responsibility of the caller to initialize those fields before use.
411 */
412
413static boolean_t __inline
414mbuf_ctor(void *obj, void *private, int ocflags)
984263bc 415{
7b6f875f 416 struct mbuf *m = obj;
984263bc 417
7b6f875f
JH
418 m->m_next = NULL;
419 m->m_nextpkt = NULL;
420 m->m_data = m->m_dat;
421 m->m_flags = 0;
422
423 return (TRUE);
984263bc
MD
424}
425
426/*
7b6f875f 427 * Initialize the mbuf and the packet header fields.
984263bc 428 */
7b6f875f
JH
429static boolean_t
430mbufphdr_ctor(void *obj, void *private, int ocflags)
984263bc 431{
7b6f875f 432 struct mbuf *m = obj;
984263bc 433
7b6f875f
JH
434 m->m_next = NULL;
435 m->m_nextpkt = NULL;
436 m->m_data = m->m_pktdat;
77e294a1 437 m->m_flags = M_PKTHDR | M_PHCACHE;
984263bc 438
7b6f875f
JH
439 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
440 SLIST_INIT(&m->m_pkthdr.tags);
441 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
442 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
443
444 return (TRUE);
984263bc
MD
445}
446
447/*
7b6f875f 448 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
984263bc 449 */
7b6f875f
JH
450static boolean_t
451mclmeta_ctor(void *obj, void *private, int ocflags)
984263bc 452{
7b6f875f
JH
453 struct mbcluster *cl = obj;
454 void *buf;
455
456 if (ocflags & M_NOWAIT)
efda3bd0 457 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
7b6f875f 458 else
efda3bd0 459 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
7b6f875f
JH
460 if (buf == NULL)
461 return (FALSE);
77e294a1 462 cl->mcl_refs = 0;
7b6f875f
JH
463 cl->mcl_data = buf;
464 return (TRUE);
465}
984263bc 466
33dbeae8
JT
467static boolean_t
468mjclmeta_ctor(void *obj, void *private, int ocflags)
469{
470 struct mbcluster *cl = obj;
471 void *buf;
472
473 if (ocflags & M_NOWAIT)
474 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO);
475 else
476 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO);
477 if (buf == NULL)
478 return (FALSE);
479 cl->mcl_refs = 0;
480 cl->mcl_data = buf;
481 return (TRUE);
482}
483
7b6f875f 484static void
c3ef87ca
MD
485mclmeta_dtor(void *obj, void *private)
486{
487 struct mbcluster *mcl = obj;
488
489 KKASSERT(mcl->mcl_refs == 0);
efda3bd0 490 kfree(mcl->mcl_data, M_MBUFCL);
c3ef87ca
MD
491}
492
493static void
33dbeae8 494linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
7b6f875f 495{
984263bc 496 /*
7b6f875f
JH
497 * Add the cluster to the mbuf. The caller will detect that the
498 * mbuf now has an attached cluster.
984263bc 499 */
7b6f875f
JH
500 m->m_ext.ext_arg = cl;
501 m->m_ext.ext_buf = cl->mcl_data;
502 m->m_ext.ext_ref = m_mclref;
503 m->m_ext.ext_free = m_mclfree;
33dbeae8 504 m->m_ext.ext_size = size;
df8d1020 505 atomic_add_int(&cl->mcl_refs, 1);
984263bc 506
7b6f875f
JH
507 m->m_data = m->m_ext.ext_buf;
508 m->m_flags |= M_EXT | M_EXT_CLUSTER;
984263bc
MD
509}
510
33dbeae8
JT
511static void
512linkcluster(struct mbuf *m, struct mbcluster *cl)
513{
514 linkjcluster(m, cl, MCLBYTES);
515}
516
7b6f875f
JH
517static boolean_t
518mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
519{
520 struct mbuf *m = obj;
521 struct mbcluster *cl;
522
523 mbufphdr_ctor(obj, private, ocflags);
524 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
525 if (cl == NULL) {
526 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 527 return (FALSE);
a5955b15 528 }
77e294a1 529 m->m_flags |= M_CLCACHE;
7b6f875f
JH
530 linkcluster(m, cl);
531 return (TRUE);
532}
984263bc 533
7b6f875f 534static boolean_t
33dbeae8
JT
535mbufphdrjcluster_ctor(void *obj, void *private, int ocflags)
536{
537 struct mbuf *m = obj;
538 struct mbcluster *cl;
539
540 mbufphdr_ctor(obj, private, ocflags);
541 cl = objcache_get(mjclmeta_cache, ocflags);
542 if (cl == NULL) {
543 ++mbstat[mycpu->gd_cpuid].m_drops;
544 return (FALSE);
545 }
546 m->m_flags |= M_CLCACHE;
547 linkjcluster(m, cl, MJUMPAGESIZE);
548 return (TRUE);
549}
550
551static boolean_t
7b6f875f 552mbufcluster_ctor(void *obj, void *private, int ocflags)
984263bc 553{
7b6f875f
JH
554 struct mbuf *m = obj;
555 struct mbcluster *cl;
556
557 mbuf_ctor(obj, private, ocflags);
558 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
559 if (cl == NULL) {
560 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 561 return (FALSE);
a5955b15 562 }
77e294a1 563 m->m_flags |= M_CLCACHE;
7b6f875f
JH
564 linkcluster(m, cl);
565 return (TRUE);
566}
984263bc 567
33dbeae8
JT
568static boolean_t
569mbufjcluster_ctor(void *obj, void *private, int ocflags)
570{
571 struct mbuf *m = obj;
572 struct mbcluster *cl;
573
574 mbuf_ctor(obj, private, ocflags);
575 cl = objcache_get(mjclmeta_cache, ocflags);
576 if (cl == NULL) {
577 ++mbstat[mycpu->gd_cpuid].m_drops;
578 return (FALSE);
579 }
580 m->m_flags |= M_CLCACHE;
581 linkjcluster(m, cl, MJUMPAGESIZE);
582 return (TRUE);
583}
584
77e294a1
MD
585/*
586 * Used for both the cluster and cluster PHDR caches.
587 *
588 * The mbuf may have lost its cluster due to sharing, deal
589 * with the situation by checking M_EXT.
590 */
7b6f875f
JH
591static void
592mbufcluster_dtor(void *obj, void *private)
984263bc 593{
7b6f875f 594 struct mbuf *m = obj;
77e294a1 595 struct mbcluster *mcl;
984263bc 596
77e294a1
MD
597 if (m->m_flags & M_EXT) {
598 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
599 mcl = m->m_ext.ext_arg;
600 KKASSERT(mcl->mcl_refs == 1);
601 mcl->mcl_refs = 0;
33dbeae8
JT
602 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES)
603 objcache_put(mjclmeta_cache, mcl);
604 else
605 objcache_put(mclmeta_cache, mcl);
77e294a1 606 }
984263bc
MD
607}
608
7b6f875f
JH
609struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
610struct objcache_malloc_args mclmeta_malloc_args =
611 { sizeof(struct mbcluster), M_MCLMETA };
612
613/* ARGSUSED*/
90775e29 614static void
7b6f875f 615mbinit(void *dummy)
984263bc 616{
6f21e2f4 617 int mb_limit, cl_limit;
0aa16b5d 618 int limit;
4c1e2509
JT
619 int i;
620
0aa16b5d
SZ
621 /*
622 * Initialize statistics
623 */
624 for (i = 0; i < ncpus; i++) {
4c1e2509
JT
625 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
626 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
33dbeae8 627 atomic_set_long_nonlocked(&mbstat[i].m_mjumpagesize, MJUMPAGESIZE);
4c1e2509
JT
628 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
629 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
630 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
631 }
984263bc 632
0aa16b5d
SZ
633 /*
634 * Create objtect caches and save cluster limits, which will
635 * be used to adjust backing kmalloc pools' limit later.
636 */
637
6f21e2f4 638 mb_limit = cl_limit = 0;
0aa16b5d
SZ
639
640 limit = nmbufs;
641 mbuf_cache = objcache_create("mbuf", &limit, 0,
5b7da64a 642 mbuf_ctor, NULL, NULL,
7b6f875f 643 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 644 mb_limit += limit;
0aa16b5d
SZ
645
646 limit = nmbufs;
647 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
5b7da64a 648 mbufphdr_ctor, NULL, NULL,
7b6f875f 649 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 650 mb_limit += limit;
0aa16b5d
SZ
651
652 cl_limit = nmbclusters;
653 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
7b6f875f
JH
654 mclmeta_ctor, mclmeta_dtor, NULL,
655 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
0aa16b5d 656
33dbeae8
JT
657 cl_limit = nmbclusters;
658 mjclmeta_cache = objcache_create("jcluster mbuf", &cl_limit, 0,
659 mjclmeta_ctor, mclmeta_dtor, NULL,
660 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
661
0aa16b5d
SZ
662 limit = nmbclusters;
663 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
7b6f875f
JH
664 mbufcluster_ctor, mbufcluster_dtor, NULL,
665 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 666 mb_limit += limit;
0aa16b5d
SZ
667
668 limit = nmbclusters;
7b6f875f 669 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
0aa16b5d 670 &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
7b6f875f 671 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 672 mb_limit += limit;
0aa16b5d 673
33dbeae8
JT
674 limit = nmbclusters;
675 mbufjcluster_cache = objcache_create("mbuf + jcluster", &limit, 0,
676 mbufjcluster_ctor, mbufcluster_dtor, NULL,
677 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
678 mb_limit += limit;
679
680 limit = nmbclusters;
681 mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster",
682 &limit, 64, mbufphdrjcluster_ctor, mbufcluster_dtor, NULL,
683 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
684 mb_limit += limit;
685
0aa16b5d
SZ
686 /*
687 * Adjust backing kmalloc pools' limit
3f98f485
SZ
688 *
689 * NOTE: We raise the limit by another 1/8 to take the effect
690 * of loosememuse into account.
0aa16b5d 691 */
3f98f485 692 cl_limit += cl_limit / 8;
0aa16b5d
SZ
693 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
694 mclmeta_malloc_args.objsize * cl_limit);
33dbeae8
JT
695 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit * 3/4 + MJUMPAGESIZE * cl_limit / 4);
696 /*kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);*/
0aa16b5d 697
3f98f485 698 mb_limit += mb_limit / 8;
0aa16b5d
SZ
699 kmalloc_raise_limit(mbuf_malloc_args.mtype,
700 mbuf_malloc_args.objsize * mb_limit);
90775e29 701}
984263bc 702
90775e29
MD
703/*
704 * Return the number of references to this mbuf's data. 0 is returned
705 * if the mbuf is not M_EXT, a reference count is returned if it is
7b6f875f 706 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
90775e29
MD
707 */
708int
709m_sharecount(struct mbuf *m)
710{
7b6f875f
JH
711 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
712 case 0:
713 return (0);
714 case M_EXT:
715 return (99);
716 case M_EXT | M_EXT_CLUSTER:
717 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
718 }
719 /* NOTREACHED */
720 return (0); /* to shut up compiler */
90775e29
MD
721}
722
723/*
724 * change mbuf to new type
725 */
726void
727m_chtype(struct mbuf *m, int type)
728{
4c1e2509
JT
729 struct globaldata *gd = mycpu;
730
731 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
732 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
733 atomic_set_short_nonlocked(&m->m_type, type);
984263bc
MD
734}
735
984263bc 736static void
8a3125c6 737m_reclaim(void)
984263bc 738{
1fd87d54
RG
739 struct domain *dp;
740 struct protosw *pr;
984263bc 741
5bd48c1d
MD
742 kprintf("Debug: m_reclaim() called\n");
743
9c70fe43 744 SLIST_FOREACH(dp, &domains, dom_next) {
8a3125c6 745 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
746 if (pr->pr_drain)
747 (*pr->pr_drain)();
8a3125c6
MD
748 }
749 }
4c1e2509 750 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
984263bc
MD
751}
752
7b6f875f
JH
753static void __inline
754updatestats(struct mbuf *m, int type)
755{
4c1e2509 756 struct globaldata *gd = mycpu;
7b6f875f 757
fcd1202a 758 m->m_type = type;
e9fa4b60 759 mbuftrack(m);
982f999d
MD
760#ifdef MBUF_DEBUG
761 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
762 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
763#endif
4c1e2509
JT
764
765 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
766 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
767
7b6f875f
JH
768}
769
984263bc 770/*
7b6f875f 771 * Allocate an mbuf.
984263bc
MD
772 */
773struct mbuf *
8a3125c6 774m_get(int how, int type)
984263bc 775{
12496bdf 776 struct mbuf *m;
7b6f875f
JH
777 int ntries = 0;
778 int ocf = MBTOM(how);
12496bdf 779
7b6f875f
JH
780retryonce:
781
782 m = objcache_get(mbuf_cache, ocf);
783
784 if (m == NULL) {
785 if ((how & MB_TRYWAIT) && ntries++ == 0) {
786 struct objcache *reclaimlist[] = {
787 mbufphdr_cache,
5bd48c1d 788 mbufcluster_cache,
33dbeae8
JT
789 mbufphdrcluster_cache,
790 mbufjcluster_cache,
791 mbufphdrjcluster_cache
7b6f875f 792 };
a3034532 793 const int nreclaims = NELEM(reclaimlist);
7b6f875f
JH
794
795 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
796 m_reclaim();
797 goto retryonce;
c6339e39 798 }
a5955b15 799 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 800 return (NULL);
12496bdf 801 }
982f999d
MD
802#ifdef MBUF_DEBUG
803 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
804#endif
5bd08532 805 m->m_len = 0;
c6339e39 806
7b6f875f 807 updatestats(m, type);
984263bc
MD
808 return (m);
809}
810
811struct mbuf *
8a3125c6 812m_gethdr(int how, int type)
984263bc 813{
12496bdf 814 struct mbuf *m;
7b6f875f
JH
815 int ocf = MBTOM(how);
816 int ntries = 0;
12496bdf 817
7b6f875f
JH
818retryonce:
819
820 m = objcache_get(mbufphdr_cache, ocf);
821
822 if (m == NULL) {
823 if ((how & MB_TRYWAIT) && ntries++ == 0) {
824 struct objcache *reclaimlist[] = {
825 mbuf_cache,
33dbeae8
JT
826 mbufcluster_cache, mbufphdrcluster_cache,
827 mbufjcluster_cache, mbufphdrjcluster_cache
7b6f875f 828 };
a3034532 829 const int nreclaims = NELEM(reclaimlist);
7b6f875f
JH
830
831 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
832 m_reclaim();
833 goto retryonce;
c6339e39 834 }
a5955b15 835 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 836 return (NULL);
12496bdf 837 }
982f999d
MD
838#ifdef MBUF_DEBUG
839 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
840#endif
5bd08532
MD
841 m->m_len = 0;
842 m->m_pkthdr.len = 0;
c6339e39 843
7b6f875f 844 updatestats(m, type);
984263bc
MD
845 return (m);
846}
847
7b6f875f
JH
848/*
849 * Get a mbuf (not a mbuf cluster!) and zero it.
850 * Deprecated.
851 */
984263bc 852struct mbuf *
8a3125c6 853m_getclr(int how, int type)
984263bc 854{
1fd87d54 855 struct mbuf *m;
984263bc 856
7b6f875f
JH
857 m = m_get(how, type);
858 if (m != NULL)
859 bzero(m->m_data, MLEN);
984263bc
MD
860 return (m);
861}
862
33dbeae8
JT
863struct mbuf *
864m_getjcl(int how, short type, int flags, size_t size)
865{
866 struct mbuf *m = NULL;
867 int ocflags = MBTOM(how);
868 int ntries = 0;
869
870retryonce:
871
872 if (flags & M_PKTHDR)
873 m = objcache_get(mbufphdrjcluster_cache, ocflags);
874 else
875 m = objcache_get(mbufjcluster_cache, ocflags);
876
877 if (m == NULL) {
878 if ((how & MB_TRYWAIT) && ntries++ == 0) {
879 struct objcache *reclaimlist[1];
880
881 if (flags & M_PKTHDR)
882 reclaimlist[0] = mbufjcluster_cache;
883 else
884 reclaimlist[0] = mbufphdrjcluster_cache;
885 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
886 m_reclaim();
887 goto retryonce;
888 }
889 ++mbstat[mycpu->gd_cpuid].m_drops;
890 return (NULL);
891 }
892
893#ifdef MBUF_DEBUG
894 KASSERT(m->m_data == m->m_ext.ext_buf,
895 ("mbuf %p: bad m_data in get", m));
896#endif
897 m->m_type = type;
898 m->m_len = 0;
899 m->m_pkthdr.len = 0; /* just do it unconditonally */
900
901 mbuftrack(m);
902
903 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
904 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
905 return (m);
906}
907
984263bc 908/*
7b6f875f 909 * Returns an mbuf with an attached cluster.
984263bc
MD
910 * Because many network drivers use this kind of buffers a lot, it is
911 * convenient to keep a small pool of free buffers of this kind.
912 * Even a small size such as 10 gives about 10% improvement in the
913 * forwarding rate in a bridge or router.
984263bc 914 */
984263bc
MD
915struct mbuf *
916m_getcl(int how, short type, int flags)
917{
7b6f875f
JH
918 struct mbuf *m;
919 int ocflags = MBTOM(how);
920 int ntries = 0;
984263bc 921
7b6f875f
JH
922retryonce:
923
924 if (flags & M_PKTHDR)
925 m = objcache_get(mbufphdrcluster_cache, ocflags);
926 else
927 m = objcache_get(mbufcluster_cache, ocflags);
928
929 if (m == NULL) {
930 if ((how & MB_TRYWAIT) && ntries++ == 0) {
931 struct objcache *reclaimlist[1];
932
933 if (flags & M_PKTHDR)
934 reclaimlist[0] = mbufcluster_cache;
935 else
936 reclaimlist[0] = mbufphdrcluster_cache;
937 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
938 m_reclaim();
939 goto retryonce;
984263bc 940 }
a5955b15 941 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 942 return (NULL);
984263bc 943 }
7b6f875f 944
982f999d
MD
945#ifdef MBUF_DEBUG
946 KASSERT(m->m_data == m->m_ext.ext_buf,
947 ("mbuf %p: bad m_data in get", m));
948#endif
7b6f875f 949 m->m_type = type;
5bd08532
MD
950 m->m_len = 0;
951 m->m_pkthdr.len = 0; /* just do it unconditonally */
7b6f875f 952
e9fa4b60 953 mbuftrack(m);
4c1e2509
JT
954
955 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
956 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
7b6f875f 957 return (m);
984263bc
MD
958}
959
960/*
50503f0f
JH
961 * Allocate chain of requested length.
962 */
963struct mbuf *
964m_getc(int len, int how, int type)
965{
966 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
967 int nsize;
968
969 while (len > 0) {
970 n = m_getl(len, how, type, 0, &nsize);
971 if (n == NULL)
972 goto failed;
973 n->m_len = 0;
974 *ntail = n;
975 ntail = &n->m_next;
976 len -= nsize;
977 }
978 return (nfirst);
979
980failed:
981 m_freem(nfirst);
982 return (NULL);
983}
984
985/*
986 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
987 * and return a pointer to the head of the allocated chain. If m0 is
984263bc
MD
988 * non-null, then we assume that it is a single mbuf or an mbuf chain to
989 * which we want len bytes worth of mbufs and/or clusters attached, and so
50503f0f 990 * if we succeed in allocating it, we will just return a pointer to m0.
984263bc
MD
991 *
992 * If we happen to fail at any point during the allocation, we will free
993 * up everything we have already allocated and return NULL.
994 *
50503f0f 995 * Deprecated. Use m_getc() and m_cat() instead.
984263bc
MD
996 */
997struct mbuf *
dc14b0a9 998m_getm(struct mbuf *m0, int len, int type, int how)
984263bc 999{
50503f0f 1000 struct mbuf *nfirst;
984263bc 1001
50503f0f 1002 nfirst = m_getc(len, how, type);
984263bc 1003
50503f0f
JH
1004 if (m0 != NULL) {
1005 m_last(m0)->m_next = nfirst;
1006 return (m0);
984263bc
MD
1007 }
1008
50503f0f 1009 return (nfirst);
984263bc
MD
1010}
1011
1012/*
7b6f875f
JH
1013 * Adds a cluster to a normal mbuf, M_EXT is set on success.
1014 * Deprecated. Use m_getcl() instead.
b6650ec0 1015 */
90775e29
MD
1016void
1017m_mclget(struct mbuf *m, int how)
b6650ec0 1018{
7b6f875f 1019 struct mbcluster *mcl;
b6650ec0 1020
77e294a1 1021 KKASSERT((m->m_flags & M_EXT) == 0);
7b6f875f 1022 mcl = objcache_get(mclmeta_cache, MBTOM(how));
c3ef87ca
MD
1023 if (mcl != NULL) {
1024 linkcluster(m, mcl);
5bd48c1d
MD
1025 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters,
1026 1);
a5955b15
MD
1027 } else {
1028 ++mbstat[mycpu->gd_cpuid].m_drops;
c3ef87ca 1029 }
b6650ec0
MD
1030}
1031
df8d1020
MD
1032/*
1033 * Updates to mbcluster must be MPSAFE. Only an entity which already has
1034 * a reference to the cluster can ref it, so we are in no danger of
1035 * racing an add with a subtract. But the operation must still be atomic
1036 * since multiple entities may have a reference on the cluster.
1037 *
1038 * m_mclfree() is almost the same but it must contend with two entities
5bd48c1d 1039 * freeing the cluster at the same time.
df8d1020 1040 */
90775e29 1041static void
7b6f875f 1042m_mclref(void *arg)
b6650ec0 1043{
7b6f875f 1044 struct mbcluster *mcl = arg;
90775e29 1045
7b6f875f 1046 atomic_add_int(&mcl->mcl_refs, 1);
b6650ec0
MD
1047}
1048
1d16b2b5
MD
1049/*
1050 * When dereferencing a cluster we have to deal with a N->0 race, where
1051 * N entities free their references simultaniously. To do this we use
dee87a60 1052 * atomic_fetchadd_int().
1d16b2b5 1053 */
90775e29 1054static void
7b6f875f 1055m_mclfree(void *arg)
b6650ec0 1056{
7b6f875f 1057 struct mbcluster *mcl = arg;
90775e29 1058
dee87a60 1059 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
77e294a1 1060 objcache_put(mclmeta_cache, mcl);
b6650ec0
MD
1061}
1062
7eccf245 1063/*
b6650ec0
MD
1064 * Free a single mbuf and any associated external storage. The successor,
1065 * if any, is returned.
984263bc 1066 *
b6650ec0 1067 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
1068 * code does not call M_PREPEND properly.
1069 * (example: call to bpf_mtap from drivers)
1070 */
982f999d
MD
1071
1072#ifdef MBUF_DEBUG
1073
1074struct mbuf *
1075_m_free(struct mbuf *m, const char *func)
1076
1077#else
1078
984263bc 1079struct mbuf *
b6650ec0 1080m_free(struct mbuf *m)
982f999d
MD
1081
1082#endif
984263bc 1083{
b6650ec0 1084 struct mbuf *n;
4c1e2509 1085 struct globaldata *gd = mycpu;
b6650ec0 1086
361af367 1087 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
f3f0fc49 1088 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
4c1e2509 1089 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
90775e29 1090
7b6f875f 1091 n = m->m_next;
90775e29
MD
1092
1093 /*
7b6f875f
JH
1094 * Make sure the mbuf is in constructed state before returning it
1095 * to the objcache.
90775e29 1096 */
90775e29 1097 m->m_next = NULL;
e9fa4b60 1098 mbufuntrack(m);
982f999d
MD
1099#ifdef MBUF_DEBUG
1100 m->m_hdr.mh_lastfunc = func;
1101#endif
7b6f875f
JH
1102#ifdef notyet
1103 KKASSERT(m->m_nextpkt == NULL);
1104#else
1105 if (m->m_nextpkt != NULL) {
7b6f875f
JH
1106 static int afewtimes = 10;
1107
1108 if (afewtimes-- > 0) {
6ea70f76 1109 kprintf("mfree: m->m_nextpkt != NULL\n");
7ce2998e 1110 print_backtrace(-1);
90775e29 1111 }
7b6f875f
JH
1112 m->m_nextpkt = NULL;
1113 }
1114#endif
1115 if (m->m_flags & M_PKTHDR) {
7b6f875f 1116 m_tag_delete_chain(m); /* eliminate XXX JH */
77e294a1
MD
1117 }
1118
1119 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
1120
1121 /*
1122 * Clean the M_PKTHDR state so we can return the mbuf to its original
1123 * cache. This is based on the PHCACHE flag which tells us whether
1124 * the mbuf was originally allocated out of a packet-header cache
1125 * or a non-packet-header cache.
1126 */
1127 if (m->m_flags & M_PHCACHE) {
1128 m->m_flags |= M_PKTHDR;
1129 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
7b6f875f
JH
1130 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
1131 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
6b1d6bed 1132 SLIST_INIT(&m->m_pkthdr.tags);
90775e29 1133 }
7b6f875f 1134
77e294a1
MD
1135 /*
1136 * Handle remaining flags combinations. M_CLCACHE tells us whether
1137 * the mbuf was originally allocated from a cluster cache or not,
1138 * and is totally separate from whether the mbuf is currently
1139 * associated with a cluster.
1140 */
77e294a1
MD
1141 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1142 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1143 /*
1144 * mbuf+cluster cache case. The mbuf was allocated from the
1145 * combined mbuf_cluster cache and can be returned to the
1146 * cache if the cluster hasn't been shared.
1147 */
1148 if (m_sharecount(m) == 1) {
1149 /*
1150 * The cluster has not been shared, we can just
1151 * reset the data pointer and return the mbuf
1152 * to the cluster cache. Note that the reference
1153 * count is left intact (it is still associated with
1154 * an mbuf).
1155 */
1156 m->m_data = m->m_ext.ext_buf;
33dbeae8
JT
1157 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) {
1158 if (m->m_flags & M_PHCACHE)
1159 objcache_put(mbufphdrjcluster_cache, m);
1160 else
1161 objcache_put(mbufjcluster_cache, m);
1162 } else {
1163 if (m->m_flags & M_PHCACHE)
1164 objcache_put(mbufphdrcluster_cache, m);
1165 else
1166 objcache_put(mbufcluster_cache, m);
1167 }
4c1e2509 1168 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1169 } else {
1170 /*
1171 * Hell. Someone else has a ref on this cluster,
1172 * we have to disconnect it which means we can't
1173 * put it back into the mbufcluster_cache, we
1174 * have to destroy the mbuf.
1175 *
cb086467
MD
1176 * Other mbuf references to the cluster will typically
1177 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1178 *
77e294a1
MD
1179 * XXX we could try to connect another cluster to
1180 * it.
1181 */
33dbeae8 1182
7b6f875f
JH
1183 m->m_ext.ext_free(m->m_ext.ext_arg);
1184 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
33dbeae8
JT
1185 if (m->m_ext.ext_size == MCLBYTES) {
1186 if (m->m_flags & M_PHCACHE)
1187 objcache_dtor(mbufphdrcluster_cache, m);
1188 else
1189 objcache_dtor(mbufcluster_cache, m);
1190 } else {
1191 if (m->m_flags & M_PHCACHE)
1192 objcache_dtor(mbufphdrjcluster_cache, m);
1193 else
1194 objcache_dtor(mbufjcluster_cache, m);
1195 }
7b6f875f 1196 }
77e294a1
MD
1197 break;
1198 case M_EXT | M_EXT_CLUSTER:
1199 /*
1200 * Normal cluster associated with an mbuf that was allocated
1201 * from the normal mbuf pool rather then the cluster pool.
1202 * The cluster has to be independantly disassociated from the
1203 * mbuf.
1204 */
cb086467 1205 if (m_sharecount(m) == 1)
4c1e2509 1206 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1207 /* fall through */
1208 case M_EXT:
1209 /*
1210 * Normal cluster association case, disconnect the cluster from
1211 * the mbuf. The cluster may or may not be custom.
1212 */
1213 m->m_ext.ext_free(m->m_ext.ext_arg);
1214 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1215 /* fall through */
1216 case 0:
1217 /*
1218 * return the mbuf to the mbuf cache.
1219 */
1220 if (m->m_flags & M_PHCACHE) {
7b6f875f
JH
1221 m->m_data = m->m_pktdat;
1222 objcache_put(mbufphdr_cache, m);
90775e29 1223 } else {
7b6f875f
JH
1224 m->m_data = m->m_dat;
1225 objcache_put(mbuf_cache, m);
90775e29 1226 }
4c1e2509 1227 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
77e294a1
MD
1228 break;
1229 default:
1230 if (!panicstr)
1231 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1232 break;
b6650ec0 1233 }
984263bc
MD
1234 return (n);
1235}
1236
982f999d
MD
1237#ifdef MBUF_DEBUG
1238
1239void
1240_m_freem(struct mbuf *m, const char *func)
1241{
1242 while (m)
1243 m = _m_free(m, func);
1244}
1245
1246#else
1247
984263bc 1248void
b6650ec0 1249m_freem(struct mbuf *m)
984263bc 1250{
90775e29
MD
1251 while (m)
1252 m = m_free(m);
984263bc
MD
1253}
1254
982f999d
MD
1255#endif
1256
984263bc 1257/*
df80f2ea 1258 * mbuf utility routines
984263bc
MD
1259 */
1260
1261/*
7b6f875f 1262 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
984263bc
MD
1263 * copy junk along.
1264 */
1265struct mbuf *
8a3125c6 1266m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
1267{
1268 struct mbuf *mn;
1269
c3ef87ca
MD
1270 if (m->m_flags & M_PKTHDR)
1271 mn = m_gethdr(how, m->m_type);
1272 else
1273 mn = m_get(how, m->m_type);
7b6f875f 1274 if (mn == NULL) {
984263bc 1275 m_freem(m);
7b6f875f 1276 return (NULL);
984263bc
MD
1277 }
1278 if (m->m_flags & M_PKTHDR)
1279 M_MOVE_PKTHDR(mn, m);
1280 mn->m_next = m;
1281 m = mn;
1282 if (len < MHLEN)
1283 MH_ALIGN(m, len);
1284 m->m_len = len;
1285 return (m);
1286}
1287
1288/*
1289 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1290 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 1291 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
1292 * Note that the copy is read-only, because clusters are not copied,
1293 * only their reference counts are incremented.
1294 */
984263bc 1295struct mbuf *
8a3125c6 1296m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 1297{
1fd87d54
RG
1298 struct mbuf *n, **np;
1299 int off = off0;
984263bc
MD
1300 struct mbuf *top;
1301 int copyhdr = 0;
1302
1303 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1304 KASSERT(len >= 0, ("m_copym, negative len %d", len));
5bd48c1d 1305 if (off == 0 && (m->m_flags & M_PKTHDR))
984263bc
MD
1306 copyhdr = 1;
1307 while (off > 0) {
1308 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1309 if (off < m->m_len)
1310 break;
1311 off -= m->m_len;
1312 m = m->m_next;
1313 }
1314 np = &top;
5bd48c1d 1315 top = NULL;
984263bc 1316 while (len > 0) {
7b6f875f 1317 if (m == NULL) {
984263bc
MD
1318 KASSERT(len == M_COPYALL,
1319 ("m_copym, length > size of mbuf chain"));
1320 break;
1321 }
c3ef87ca
MD
1322 /*
1323 * Because we are sharing any cluster attachment below,
1324 * be sure to get an mbuf that does not have a cluster
1325 * associated with it.
1326 */
1327 if (copyhdr)
1328 n = m_gethdr(wait, m->m_type);
1329 else
1330 n = m_get(wait, m->m_type);
984263bc 1331 *np = n;
7b6f875f 1332 if (n == NULL)
984263bc
MD
1333 goto nospace;
1334 if (copyhdr) {
1335 if (!m_dup_pkthdr(n, m, wait))
1336 goto nospace;
1337 if (len == M_COPYALL)
1338 n->m_pkthdr.len -= off0;
1339 else
1340 n->m_pkthdr.len = len;
1341 copyhdr = 0;
1342 }
1343 n->m_len = min(len, m->m_len - off);
1344 if (m->m_flags & M_EXT) {
c3ef87ca 1345 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1346 n->m_data = m->m_data + off;
7b6f875f 1347 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1348 n->m_ext = m->m_ext;
b542cd49 1349 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
7eccf245 1350 } else {
984263bc
MD
1351 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1352 (unsigned)n->m_len);
7eccf245 1353 }
984263bc
MD
1354 if (len != M_COPYALL)
1355 len -= n->m_len;
1356 off = 0;
1357 m = m->m_next;
1358 np = &n->m_next;
1359 }
7b6f875f 1360 if (top == NULL)
4c1e2509 1361 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
984263bc
MD
1362 return (top);
1363nospace:
1364 m_freem(top);
4c1e2509 1365 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1366 return (NULL);
984263bc
MD
1367}
1368
1369/*
1370 * Copy an entire packet, including header (which must be present).
1371 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1372 * Note that the copy is read-only, because clusters are not copied,
1373 * only their reference counts are incremented.
1374 * Preserve alignment of the first mbuf so if the creator has left
1375 * some room at the beginning (e.g. for inserting protocol headers)
1376 * the copies also have the room available.
1377 */
1378struct mbuf *
8a3125c6 1379m_copypacket(struct mbuf *m, int how)
984263bc
MD
1380{
1381 struct mbuf *top, *n, *o;
1382
7f3602fe 1383 n = m_gethdr(how, m->m_type);
984263bc
MD
1384 top = n;
1385 if (!n)
1386 goto nospace;
1387
1388 if (!m_dup_pkthdr(n, m, how))
1389 goto nospace;
1390 n->m_len = m->m_len;
1391 if (m->m_flags & M_EXT) {
c3ef87ca 1392 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1393 n->m_data = m->m_data;
7b6f875f 1394 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1395 n->m_ext = m->m_ext;
b542cd49 1396 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1397 } else {
1398 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1399 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1400 }
1401
1402 m = m->m_next;
1403 while (m) {
7b6f875f 1404 o = m_get(how, m->m_type);
984263bc
MD
1405 if (!o)
1406 goto nospace;
1407
1408 n->m_next = o;
1409 n = n->m_next;
1410
1411 n->m_len = m->m_len;
1412 if (m->m_flags & M_EXT) {
c3ef87ca 1413 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1414 n->m_data = m->m_data;
7b6f875f 1415 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1416 n->m_ext = m->m_ext;
b542cd49 1417 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1418 } else {
1419 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1420 }
1421
1422 m = m->m_next;
1423 }
1424 return top;
1425nospace:
1426 m_freem(top);
4c1e2509 1427 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1428 return (NULL);
984263bc
MD
1429}
1430
1431/*
1432 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1433 * continuing for "len" bytes, into the indicated buffer.
1434 */
1435void
8a3125c6 1436m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1437{
1fd87d54 1438 unsigned count;
984263bc
MD
1439
1440 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1441 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1442 while (off > 0) {
1443 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1444 if (off < m->m_len)
1445 break;
1446 off -= m->m_len;
1447 m = m->m_next;
1448 }
1449 while (len > 0) {
1450 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1451 count = min(m->m_len - off, len);
1452 bcopy(mtod(m, caddr_t) + off, cp, count);
1453 len -= count;
1454 cp += count;
1455 off = 0;
1456 m = m->m_next;
1457 }
1458}
1459
1460/*
1461 * Copy a packet header mbuf chain into a completely new chain, including
1462 * copying any mbuf clusters. Use this instead of m_copypacket() when
1463 * you need a writable copy of an mbuf chain.
1464 */
1465struct mbuf *
8a3125c6 1466m_dup(struct mbuf *m, int how)
984263bc
MD
1467{
1468 struct mbuf **p, *top = NULL;
1469 int remain, moff, nsize;
1470
1471 /* Sanity check */
1472 if (m == NULL)
50503f0f 1473 return (NULL);
5e2195bf 1474 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
984263bc
MD
1475
1476 /* While there's more data, get a new mbuf, tack it on, and fill it */
1477 remain = m->m_pkthdr.len;
1478 moff = 0;
1479 p = &top;
1480 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1481 struct mbuf *n;
1482
1483 /* Get the next new mbuf */
50503f0f
JH
1484 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1485 &nsize);
984263bc
MD
1486 if (n == NULL)
1487 goto nospace;
50503f0f 1488 if (top == NULL)
984263bc 1489 if (!m_dup_pkthdr(n, m, how))
50503f0f 1490 goto nospace0;
984263bc
MD
1491
1492 /* Link it into the new chain */
1493 *p = n;
1494 p = &n->m_next;
1495
1496 /* Copy data from original mbuf(s) into new mbuf */
50503f0f 1497 n->m_len = 0;
984263bc
MD
1498 while (n->m_len < nsize && m != NULL) {
1499 int chunk = min(nsize - n->m_len, m->m_len - moff);
1500
1501 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1502 moff += chunk;
1503 n->m_len += chunk;
1504 remain -= chunk;
1505 if (moff == m->m_len) {
1506 m = m->m_next;
1507 moff = 0;
1508 }
1509 }
1510
1511 /* Check correct total mbuf length */
1512 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
50503f0f 1513 ("%s: bogus m_pkthdr.len", __func__));
984263bc
MD
1514 }
1515 return (top);
1516
1517nospace:
1518 m_freem(top);
50503f0f 1519nospace0:
4c1e2509 1520 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
50503f0f 1521 return (NULL);
984263bc
MD
1522}
1523
1524/*
3bf6fec3
MD
1525 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1526 * copying any mbuf clusters. This is typically used to realign a data
1527 * chain by nfs_realign().
1528 *
1529 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT
1530 * and NULL can be returned if MB_DONTWAIT is passed.
1531 *
1532 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1533 * cluster mbufs can exhaust our supply of mbufs.
1534 */
1535struct mbuf *
1536m_dup_data(struct mbuf *m, int how)
1537{
1538 struct mbuf **p, *n, *top = NULL;
1539 int mlen, moff, chunk, gsize, nsize;
1540
1541 /*
1542 * Degenerate case
1543 */
1544 if (m == NULL)
1545 return (NULL);
1546
1547 /*
1548 * Optimize the mbuf allocation but do not get too carried away.
1549 */
1550 if (m->m_next || m->m_len > MLEN)
33dbeae8
JT
1551 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES)
1552 gsize = MCLBYTES;
1553 else
1554 gsize = MJUMPAGESIZE;
3bf6fec3
MD
1555 else
1556 gsize = MLEN;
1557
1558 /* Chain control */
1559 p = &top;
1560 n = NULL;
1561 nsize = 0;
1562
1563 /*
1564 * Scan the mbuf chain until nothing is left, the new mbuf chain
1565 * will be allocated on the fly as needed.
1566 */
1567 while (m) {
1568 mlen = m->m_len;
1569 moff = 0;
1570
1571 while (mlen) {
1572 KKASSERT(m->m_type == MT_DATA);
1573 if (n == NULL) {
1574 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1575 n->m_len = 0;
1576 if (n == NULL)
1577 goto nospace;
1578 *p = n;
1579 p = &n->m_next;
1580 }
1581 chunk = imin(mlen, nsize);
1582 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1583 mlen -= chunk;
1584 moff += chunk;
1585 n->m_len += chunk;
1586 nsize -= chunk;
1587 if (nsize == 0)
1588 n = NULL;
1589 }
1590 m = m->m_next;
1591 }
1592 *p = NULL;
1593 return(top);
1594nospace:
1595 *p = NULL;
1596 m_freem(top);
1597 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1598 return (NULL);
1599}
1600
1601/*
984263bc
MD
1602 * Concatenate mbuf chain n to m.
1603 * Both chains must be of the same type (e.g. MT_DATA).
1604 * Any m_pkthdr is not updated.
1605 */
1606void
8a3125c6 1607m_cat(struct mbuf *m, struct mbuf *n)
984263bc 1608{
50503f0f 1609 m = m_last(m);
984263bc
MD
1610 while (n) {
1611 if (m->m_flags & M_EXT ||
1612 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1613 /* just join the two chains */
1614 m->m_next = n;
1615 return;
1616 }
1617 /* splat the data from one into the other */
1618 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1619 (u_int)n->m_len);
1620 m->m_len += n->m_len;
1621 n = m_free(n);
1622 }
1623}
1624
1625void
8a3125c6 1626m_adj(struct mbuf *mp, int req_len)
984263bc 1627{
1fd87d54
RG
1628 int len = req_len;
1629 struct mbuf *m;
1630 int count;
984263bc
MD
1631
1632 if ((m = mp) == NULL)
1633 return;
1634 if (len >= 0) {
1635 /*
1636 * Trim from head.
1637 */
1638 while (m != NULL && len > 0) {
1639 if (m->m_len <= len) {
1640 len -= m->m_len;
1641 m->m_len = 0;
1642 m = m->m_next;
1643 } else {
1644 m->m_len -= len;
1645 m->m_data += len;
1646 len = 0;
1647 }
1648 }
1649 m = mp;
1650 if (mp->m_flags & M_PKTHDR)
1651 m->m_pkthdr.len -= (req_len - len);
1652 } else {
1653 /*
1654 * Trim from tail. Scan the mbuf chain,
1655 * calculating its length and finding the last mbuf.
1656 * If the adjustment only affects this mbuf, then just
1657 * adjust and return. Otherwise, rescan and truncate
1658 * after the remaining size.
1659 */
1660 len = -len;
1661 count = 0;
1662 for (;;) {
1663 count += m->m_len;
60233e58 1664 if (m->m_next == NULL)
984263bc
MD
1665 break;
1666 m = m->m_next;
1667 }
1668 if (m->m_len >= len) {
1669 m->m_len -= len;
1670 if (mp->m_flags & M_PKTHDR)
1671 mp->m_pkthdr.len -= len;
1672 return;
1673 }
1674 count -= len;
1675 if (count < 0)
1676 count = 0;
1677 /*
1678 * Correct length for chain is "count".
1679 * Find the mbuf with last data, adjust its length,
1680 * and toss data from remaining mbufs on chain.
1681 */
1682 m = mp;
1683 if (m->m_flags & M_PKTHDR)
1684 m->m_pkthdr.len = count;
1685 for (; m; m = m->m_next) {
1686 if (m->m_len >= count) {
1687 m->m_len = count;
1688 break;
1689 }
1690 count -= m->m_len;
1691 }
1692 while (m->m_next)
1693 (m = m->m_next) ->m_len = 0;
1694 }
1695}
1696
1697/*
a3768f58
RP
1698 * Set the m_data pointer of a newly-allocated mbuf
1699 * to place an object of the specified size at the
1700 * end of the mbuf, longword aligned.
1701 */
1702void
1703m_align(struct mbuf *m, int len)
1704{
1705 int adjust;
1706
1707 if (m->m_flags & M_EXT)
1708 adjust = m->m_ext.ext_size - len;
1709 else if (m->m_flags & M_PKTHDR)
1710 adjust = MHLEN - len;
1711 else
1712 adjust = MLEN - len;
1713 m->m_data += adjust &~ (sizeof(long)-1);
1714}
1715
1716/*
7b6f875f 1717 * Rearrange an mbuf chain so that len bytes are contiguous
9e4465af
MD
1718 * and in the data area of an mbuf (so that mtod will work for a structure
1719 * of size len). Returns the resulting mbuf chain on success, frees it and
1720 * returns null on failure. If there is room, it will add up to
1721 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1722 * avoid being called next time.
984263bc 1723 */
984263bc 1724struct mbuf *
8a3125c6 1725m_pullup(struct mbuf *n, int len)
984263bc 1726{
1fd87d54
RG
1727 struct mbuf *m;
1728 int count;
984263bc
MD
1729 int space;
1730
1731 /*
1732 * If first mbuf has no cluster, and has room for len bytes
1733 * without shifting current data, pullup into it,
1734 * otherwise allocate a new mbuf to prepend to the chain.
1735 */
7b6f875f
JH
1736 if (!(n->m_flags & M_EXT) &&
1737 n->m_data + len < &n->m_dat[MLEN] &&
1738 n->m_next) {
984263bc
MD
1739 if (n->m_len >= len)
1740 return (n);
1741 m = n;
1742 n = n->m_next;
1743 len -= m->m_len;
1744 } else {
1745 if (len > MHLEN)
1746 goto bad;
c3ef87ca
MD
1747 if (n->m_flags & M_PKTHDR)
1748 m = m_gethdr(MB_DONTWAIT, n->m_type);
1749 else
1750 m = m_get(MB_DONTWAIT, n->m_type);
7b6f875f 1751 if (m == NULL)
984263bc
MD
1752 goto bad;
1753 m->m_len = 0;
1754 if (n->m_flags & M_PKTHDR)
1755 M_MOVE_PKTHDR(m, n);
1756 }
1757 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1758 do {
1759 count = min(min(max(len, max_protohdr), space), n->m_len);
1760 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1761 (unsigned)count);
1762 len -= count;
1763 m->m_len += count;
1764 n->m_len -= count;
1765 space -= count;
1766 if (n->m_len)
1767 n->m_data += count;
1768 else
1769 n = m_free(n);
1770 } while (len > 0 && n);
1771 if (len > 0) {
7b6f875f 1772 m_free(m);
984263bc
MD
1773 goto bad;
1774 }
1775 m->m_next = n;
1776 return (m);
1777bad:
1778 m_freem(n);
4c1e2509 1779 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1780 return (NULL);
984263bc
MD
1781}
1782
1783/*
1784 * Partition an mbuf chain in two pieces, returning the tail --
1785 * all but the first len0 bytes. In case of failure, it returns NULL and
1786 * attempts to restore the chain to its original state.
1787 *
1788 * Note that the resulting mbufs might be read-only, because the new
1789 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1790 * the "breaking point" happens to lie within a cluster mbuf. Use the
1791 * M_WRITABLE() macro to check for this case.
1792 */
1793struct mbuf *
8a3125c6 1794m_split(struct mbuf *m0, int len0, int wait)
984263bc 1795{
1fd87d54 1796 struct mbuf *m, *n;
984263bc
MD
1797 unsigned len = len0, remain;
1798
1799 for (m = m0; m && len > m->m_len; m = m->m_next)
1800 len -= m->m_len;
7b6f875f
JH
1801 if (m == NULL)
1802 return (NULL);
984263bc
MD
1803 remain = m->m_len - len;
1804 if (m0->m_flags & M_PKTHDR) {
7b6f875f
JH
1805 n = m_gethdr(wait, m0->m_type);
1806 if (n == NULL)
1807 return (NULL);
984263bc
MD
1808 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1809 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1810 m0->m_pkthdr.len = len0;
1811 if (m->m_flags & M_EXT)
1812 goto extpacket;
1813 if (remain > MHLEN) {
1814 /* m can't be the lead packet */
1815 MH_ALIGN(n, 0);
1816 n->m_next = m_split(m, len, wait);
7b6f875f
JH
1817 if (n->m_next == NULL) {
1818 m_free(n);
1819 return (NULL);
984263bc
MD
1820 } else {
1821 n->m_len = 0;
1822 return (n);
1823 }
1824 } else
1825 MH_ALIGN(n, remain);
1826 } else if (remain == 0) {
1827 n = m->m_next;
1828 m->m_next = 0;
1829 return (n);
1830 } else {
7b6f875f
JH
1831 n = m_get(wait, m->m_type);
1832 if (n == NULL)
1833 return (NULL);
984263bc
MD
1834 M_ALIGN(n, remain);
1835 }
1836extpacket:
1837 if (m->m_flags & M_EXT) {
c3ef87ca 1838 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1839 n->m_data = m->m_data + len;
7b6f875f 1840 m->m_ext.ext_ref(m->m_ext.ext_arg);
7eccf245 1841 n->m_ext = m->m_ext;
b542cd49 1842 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1843 } else {
1844 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1845 }
1846 n->m_len = remain;
1847 m->m_len = len;
1848 n->m_next = m->m_next;
1849 m->m_next = 0;
1850 return (n);
1851}
50503f0f 1852
984263bc
MD
1853/*
1854 * Routine to copy from device local memory into mbufs.
50503f0f 1855 * Note: "offset" is ill-defined and always called as 0, so ignore it.
984263bc
MD
1856 */
1857struct mbuf *
50503f0f
JH
1858m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1859 void (*copy)(volatile const void *from, volatile void *to, size_t length))
984263bc 1860{
50503f0f
JH
1861 struct mbuf *m, *mfirst = NULL, **mtail;
1862 int nsize, flags;
1863
1864 if (copy == NULL)
1865 copy = bcopy;
1866 mtail = &mfirst;
1867 flags = M_PKTHDR;
1868
1869 while (len > 0) {
1870 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1871 if (m == NULL) {
1872 m_freem(mfirst);
1873 return (NULL);
984263bc 1874 }
50503f0f
JH
1875 m->m_len = min(len, nsize);
1876
1877 if (flags & M_PKTHDR) {
1878 if (len + max_linkhdr <= nsize)
1879 m->m_data += max_linkhdr;
1880 m->m_pkthdr.rcvif = ifp;
1881 m->m_pkthdr.len = len;
1882 flags = 0;
984263bc 1883 }
50503f0f
JH
1884
1885 copy(buf, m->m_data, (unsigned)m->m_len);
1886 buf += m->m_len;
1887 len -= m->m_len;
1888 *mtail = m;
1889 mtail = &m->m_next;
984263bc 1890 }
50503f0f
JH
1891
1892 return (mfirst);
984263bc
MD
1893}
1894
1895/*
cf12ba3c
SZ
1896 * Routine to pad mbuf to the specified length 'padto'.
1897 */
1898int
1899m_devpad(struct mbuf *m, int padto)
1900{
1901 struct mbuf *last = NULL;
1902 int padlen;
1903
1904 if (padto <= m->m_pkthdr.len)
1905 return 0;
1906
1907 padlen = padto - m->m_pkthdr.len;
1908
1909 /* if there's only the packet-header and we can pad there, use it. */
1910 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1911 last = m;
1912 } else {
1913 /*
1914 * Walk packet chain to find last mbuf. We will either
1915 * pad there, or append a new mbuf and pad it
1916 */
1917 for (last = m; last->m_next != NULL; last = last->m_next)
1918 ; /* EMPTY */
1919
1920 /* `last' now points to last in chain. */
1921 if (M_TRAILINGSPACE(last) < padlen) {
1922 struct mbuf *n;
1923
1924 /* Allocate new empty mbuf, pad it. Compact later. */
1925 MGET(n, MB_DONTWAIT, MT_DATA);
1926 if (n == NULL)
1927 return ENOBUFS;
1928 n->m_len = 0;
1929 last->m_next = n;
1930 last = n;
1931 }
1932 }
1933 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1934 KKASSERT(M_WRITABLE(last));
1935
1936 /* Now zero the pad area */
1937 bzero(mtod(last, char *) + last->m_len, padlen);
1938 last->m_len += padlen;
1939 m->m_pkthdr.len += padlen;
1940 return 0;
1941}
1942
1943/*
984263bc
MD
1944 * Copy data from a buffer back into the indicated mbuf chain,
1945 * starting "off" bytes from the beginning, extending the mbuf
1946 * chain if necessary.
1947 */
1948void
8a3125c6 1949m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1950{
1fd87d54
RG
1951 int mlen;
1952 struct mbuf *m = m0, *n;
984263bc
MD
1953 int totlen = 0;
1954
7b6f875f 1955 if (m0 == NULL)
984263bc
MD
1956 return;
1957 while (off > (mlen = m->m_len)) {
1958 off -= mlen;
1959 totlen += mlen;
7b6f875f 1960 if (m->m_next == NULL) {
74f1caca 1961 n = m_getclr(MB_DONTWAIT, m->m_type);
7b6f875f 1962 if (n == NULL)
984263bc
MD
1963 goto out;
1964 n->m_len = min(MLEN, len + off);
1965 m->m_next = n;
1966 }
1967 m = m->m_next;
1968 }
1969 while (len > 0) {
1970 mlen = min (m->m_len - off, len);
1971 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1972 cp += mlen;
1973 len -= mlen;
1974 mlen += off;
1975 off = 0;
1976 totlen += mlen;
1977 if (len == 0)
1978 break;
7b6f875f 1979 if (m->m_next == NULL) {
74f1caca 1980 n = m_get(MB_DONTWAIT, m->m_type);
7b6f875f 1981 if (n == NULL)
984263bc
MD
1982 break;
1983 n->m_len = min(MLEN, len);
1984 m->m_next = n;
1985 }
1986 m = m->m_next;
1987 }
1988out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1989 m->m_pkthdr.len = totlen;
1990}
1991
920c9f10 1992/*
bf2cc98c
RP
1993 * Append the specified data to the indicated mbuf chain,
1994 * Extend the mbuf chain if the new data does not fit in
1995 * existing space.
1996 *
1997 * Return 1 if able to complete the job; otherwise 0.
1998 */
1999int
2000m_append(struct mbuf *m0, int len, c_caddr_t cp)
2001{
2002 struct mbuf *m, *n;
2003 int remainder, space;
2004
2005 for (m = m0; m->m_next != NULL; m = m->m_next)
2006 ;
2007 remainder = len;
2008 space = M_TRAILINGSPACE(m);
2009 if (space > 0) {
2010 /*
2011 * Copy into available space.
2012 */
2013 if (space > remainder)
2014 space = remainder;
2015 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
2016 m->m_len += space;
2017 cp += space, remainder -= space;
2018 }
2019 while (remainder > 0) {
2020 /*
2021 * Allocate a new mbuf; could check space
2022 * and allocate a cluster instead.
2023 */
2024 n = m_get(MB_DONTWAIT, m->m_type);
2025 if (n == NULL)
2026 break;
2027 n->m_len = min(MLEN, remainder);
2028 bcopy(cp, mtod(n, caddr_t), n->m_len);
2029 cp += n->m_len, remainder -= n->m_len;
2030 m->m_next = n;
2031 m = n;
2032 }
2033 if (m0->m_flags & M_PKTHDR)
2034 m0->m_pkthdr.len += len - remainder;
2035 return (remainder == 0);
2036}
2037
2038/*
920c9f10
AH
2039 * Apply function f to the data in an mbuf chain starting "off" bytes from
2040 * the beginning, continuing for "len" bytes.
2041 */
2042int
2043m_apply(struct mbuf *m, int off, int len,
2044 int (*f)(void *, void *, u_int), void *arg)
2045{
2046 u_int count;
2047 int rval;
2048
2049 KASSERT(off >= 0, ("m_apply, negative off %d", off));
2050 KASSERT(len >= 0, ("m_apply, negative len %d", len));
2051 while (off > 0) {
2052 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2053 if (off < m->m_len)
2054 break;
2055 off -= m->m_len;
2056 m = m->m_next;
2057 }
2058 while (len > 0) {
2059 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2060 count = min(m->m_len - off, len);
2061 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
2062 if (rval)
2063 return (rval);
2064 len -= count;
2065 off = 0;
2066 m = m->m_next;
2067 }
2068 return (0);
2069}
2070
2071/*
2072 * Return a pointer to mbuf/offset of location in mbuf chain.
2073 */
2074struct mbuf *
2075m_getptr(struct mbuf *m, int loc, int *off)
2076{
2077
2078 while (loc >= 0) {
2079 /* Normal end of search. */
2080 if (m->m_len > loc) {
2081 *off = loc;
2082 return (m);
2083 } else {
2084 loc -= m->m_len;
2085 if (m->m_next == NULL) {
2086 if (loc == 0) {
2087 /* Point at the end of valid data. */
2088 *off = m->m_len;
2089 return (m);
2090 }
2091 return (NULL);
2092 }
2093 m = m->m_next;
2094 }
2095 }
2096 return (NULL);
2097}
2098
984263bc
MD
2099void
2100m_print(const struct mbuf *m)
2101{
2102 int len;
2103 const struct mbuf *m2;
2104
2105 len = m->m_pkthdr.len;
2106 m2 = m;
2107 while (len) {
6ea70f76 2108 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
984263bc
MD
2109 len -= m2->m_len;
2110 m2 = m2->m_next;
2111 }
2112 return;
2113}
2114
2115/*
2116 * "Move" mbuf pkthdr from "from" to "to".
2117 * "from" must have M_PKTHDR set, and "to" must be empty.
2118 */
2119void
2120m_move_pkthdr(struct mbuf *to, struct mbuf *from)
2121{
e0d05288 2122 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
984263bc 2123
77e294a1 2124 to->m_flags |= from->m_flags & M_COPYFLAGS;
984263bc
MD
2125 to->m_pkthdr = from->m_pkthdr; /* especially tags */
2126 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
984263bc
MD
2127}
2128
2129/*
2130 * Duplicate "from"'s mbuf pkthdr in "to".
2131 * "from" must have M_PKTHDR set, and "to" must be empty.
2132 * In particular, this does a deep copy of the packet tags.
2133 */
2134int
f15db79e 2135m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc 2136{
7f3602fe
JH
2137 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
2138
4bac35fc 2139 to->m_flags = (from->m_flags & M_COPYFLAGS) |
c4da22e4 2140 (to->m_flags & ~M_COPYFLAGS);
984263bc
MD
2141 to->m_pkthdr = from->m_pkthdr;
2142 SLIST_INIT(&to->m_pkthdr.tags);
2143 return (m_tag_copy_chain(to, from, how));
2144}
2145
2146/*
2147 * Defragment a mbuf chain, returning the shortest possible
2148 * chain of mbufs and clusters. If allocation fails and
2149 * this cannot be completed, NULL will be returned, but
2150 * the passed in chain will be unchanged. Upon success,
2151 * the original chain will be freed, and the new chain
2152 * will be returned.
2153 *
2154 * If a non-packet header is passed in, the original
2155 * mbuf (chain?) will be returned unharmed.
c8f5127a
JS
2156 *
2157 * m_defrag_nofree doesn't free the passed in mbuf.
984263bc
MD
2158 */
2159struct mbuf *
2160m_defrag(struct mbuf *m0, int how)
2161{
c8f5127a
JS
2162 struct mbuf *m_new;
2163
2164 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2165 return (NULL);
2166 if (m_new != m0)
2167 m_freem(m0);
2168 return (m_new);
2169}
2170
2171struct mbuf *
2172m_defrag_nofree(struct mbuf *m0, int how)
2173{
984263bc 2174 struct mbuf *m_new = NULL, *m_final = NULL;
61721e90 2175 int progress = 0, length, nsize;
984263bc
MD
2176
2177 if (!(m0->m_flags & M_PKTHDR))
2178 return (m0);
2179
2180#ifdef MBUF_STRESS_TEST
2181 if (m_defragrandomfailures) {
0ced1954 2182 int temp = karc4random() & 0xff;
984263bc
MD
2183 if (temp == 0xba)
2184 goto nospace;
2185 }
2186#endif
2187
61721e90 2188 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
984263bc
MD
2189 if (m_final == NULL)
2190 goto nospace;
61721e90 2191 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
984263bc 2192
3641b7ca 2193 if (m_dup_pkthdr(m_final, m0, how) == 0)
984263bc
MD
2194 goto nospace;
2195
2196 m_new = m_final;
2197
2198 while (progress < m0->m_pkthdr.len) {
2199 length = m0->m_pkthdr.len - progress;
2200 if (length > MCLBYTES)
2201 length = MCLBYTES;
2202
2203 if (m_new == NULL) {
61721e90 2204 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
984263bc
MD
2205 if (m_new == NULL)
2206 goto nospace;
2207 }
2208
2209 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2210 progress += length;
2211 m_new->m_len = length;
2212 if (m_new != m_final)
2213 m_cat(m_final, m_new);
2214 m_new = NULL;
2215 }
2216 if (m0->m_next == NULL)
2217 m_defraguseless++;
984263bc 2218 m_defragpackets++;
c8f5127a
JS
2219 m_defragbytes += m_final->m_pkthdr.len;
2220 return (m_final);
984263bc
MD
2221nospace:
2222 m_defragfailure++;
2223 if (m_new)
2224 m_free(m_new);
61721e90 2225 m_freem(m_final);
984263bc
MD
2226 return (NULL);
2227}
0c33f36d
JH
2228
2229/*
2230 * Move data from uio into mbufs.
0c33f36d
JH
2231 */
2232struct mbuf *
e12241e1 2233m_uiomove(struct uio *uio)
0c33f36d 2234{
0c33f36d 2235 struct mbuf *m; /* current working mbuf */
e12241e1
JH
2236 struct mbuf *head = NULL; /* result mbuf chain */
2237 struct mbuf **mp = &head;
e54488bb
MD
2238 int flags = M_PKTHDR;
2239 int nsize;
2240 int error;
2241 int resid;
0c33f36d 2242
0c33f36d 2243 do {
e54488bb
MD
2244 if (uio->uio_resid > INT_MAX)
2245 resid = INT_MAX;
2246 else
2247 resid = (int)uio->uio_resid;
e12241e1 2248 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
61721e90
JH
2249 if (flags) {
2250 m->m_pkthdr.len = 0;
2251 /* Leave room for protocol headers. */
2252 if (resid < MHLEN)
2253 MH_ALIGN(m, resid);
2254 flags = 0;
0c33f36d 2255 }
e54488bb 2256 m->m_len = imin(nsize, resid);
61721e90 2257 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
0c33f36d
JH
2258 if (error) {
2259 m_free(m);
2260 goto failed;
2261 }
0c33f36d
JH
2262 *mp = m;
2263 mp = &m->m_next;
61721e90 2264 head->m_pkthdr.len += m->m_len;
e54488bb 2265 } while (uio->uio_resid > 0);
0c33f36d
JH
2266
2267 return (head);
2268
2269failed:
61721e90 2270 m_freem(head);
0c33f36d
JH
2271 return (NULL);
2272}
df80f2ea 2273
50503f0f
JH
2274struct mbuf *
2275m_last(struct mbuf *m)
2276{
2277 while (m->m_next)
2278 m = m->m_next;
2279 return (m);
2280}
2281
df80f2ea
JH
2282/*
2283 * Return the number of bytes in an mbuf chain.
2284 * If lastm is not NULL, also return the last mbuf.
2285 */
2286u_int
2287m_lengthm(struct mbuf *m, struct mbuf **lastm)
2288{
2289 u_int len = 0;
2290 struct mbuf *prev = m;
2291
2292 while (m) {
2293 len += m->m_len;
2294 prev = m;
2295 m = m->m_next;
2296 }
2297 if (lastm != NULL)
2298 *lastm = prev;
2299 return (len);
2300}
2301
2302/*
2303 * Like m_lengthm(), except also keep track of mbuf usage.
2304 */
2305u_int
2306m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2307{
2308 u_int len = 0, mbcnt = 0;
2309 struct mbuf *prev = m;
2310
2311 while (m) {
2312 len += m->m_len;
2313 mbcnt += MSIZE;
2314 if (m->m_flags & M_EXT)
2315 mbcnt += m->m_ext.ext_size;
2316 prev = m;
2317 m = m->m_next;
2318 }
2319 if (lastm != NULL)
2320 *lastm = prev;
2321 *pmbcnt = mbcnt;
2322 return (len);
2323}