Remove various unneeded definitions of abs() in userland.
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
5bd48c1d
MD
2 * (MPSAFE)
3 *
0c33f36d 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey M. Hsu.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
66d6c637 36/*
984263bc
MD
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
8a3125c6 68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc
MD
69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
70 */
71
72#include "opt_param.h"
73#include "opt_mbuf_stress_test.h"
74#include <sys/param.h>
75#include <sys/systm.h>
4e23f366 76#include <sys/file.h>
984263bc
MD
77#include <sys/malloc.h>
78#include <sys/mbuf.h>
79#include <sys/kernel.h>
80#include <sys/sysctl.h>
81#include <sys/domain.h>
7b6f875f 82#include <sys/objcache.h>
e9fa4b60 83#include <sys/tree.h>
984263bc 84#include <sys/protosw.h>
0c33f36d 85#include <sys/uio.h>
ef0fdad1 86#include <sys/thread.h>
a2a5ad0d 87#include <sys/globaldata.h>
5bd48c1d 88
90775e29 89#include <sys/thread2.h>
5bd48c1d 90#include <sys/spinlock2.h>
984263bc 91
1d16b2b5 92#include <machine/atomic.h>
e54488bb 93#include <machine/limits.h>
1d16b2b5 94
984263bc
MD
95#include <vm/vm.h>
96#include <vm/vm_kern.h>
97#include <vm/vm_extern.h>
98
99#ifdef INVARIANTS
100#include <machine/cpu.h>
101#endif
102
90775e29
MD
103/*
104 * mbuf cluster meta-data
105 */
7b6f875f 106struct mbcluster {
90775e29
MD
107 int32_t mcl_refs;
108 void *mcl_data;
7b6f875f 109};
90775e29 110
e9fa4b60
MD
111/*
112 * mbuf tracking for debugging purposes
113 */
114#ifdef MBUF_DEBUG
115
116static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
117
118struct mbctrack;
119RB_HEAD(mbuf_rb_tree, mbtrack);
120RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
121
122struct mbtrack {
123 RB_ENTRY(mbtrack) rb_node;
124 int trackid;
125 struct mbuf *m;
126};
127
128static int
129mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
130{
131 if (mb1->m < mb2->m)
132 return(-1);
133 if (mb1->m > mb2->m)
134 return(1);
135 return(0);
136}
137
138RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
139
140struct mbuf_rb_tree mbuf_track_root;
5bd48c1d 141static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
e9fa4b60
MD
142
143static void
144mbuftrack(struct mbuf *m)
145{
146 struct mbtrack *mbt;
147
e9fa4b60 148 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
5bd48c1d 149 spin_lock(&mbuf_track_spin);
e9fa4b60 150 mbt->m = m;
5bd48c1d
MD
151 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
152 spin_unlock(&mbuf_track_spin);
e9fa4b60 153 panic("mbuftrack: mbuf %p already being tracked\n", m);
5bd48c1d
MD
154 }
155 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
156}
157
158static void
159mbufuntrack(struct mbuf *m)
160{
161 struct mbtrack *mbt;
162
5bd48c1d 163 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
164 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
165 if (mbt == NULL) {
5bd48c1d
MD
166 spin_unlock(&mbuf_track_spin);
167 panic("mbufuntrack: mbuf %p was not tracked\n", m);
e9fa4b60
MD
168 } else {
169 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
6cef7136 170 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
171 kfree(mbt, M_MTRACK);
172 }
e9fa4b60
MD
173}
174
175void
176mbuftrackid(struct mbuf *m, int trackid)
177{
178 struct mbtrack *mbt;
179 struct mbuf *n;
180
5bd48c1d 181 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
182 while (m) {
183 n = m->m_nextpkt;
184 while (m) {
185 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
5bd48c1d
MD
186 if (mbt == NULL) {
187 spin_unlock(&mbuf_track_spin);
188 panic("mbuftrackid: mbuf %p not tracked", m);
189 }
190 mbt->trackid = trackid;
e9fa4b60
MD
191 m = m->m_next;
192 }
193 m = n;
194 }
5bd48c1d 195 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
196}
197
198static int
199mbuftrack_callback(struct mbtrack *mbt, void *arg)
200{
201 struct sysctl_req *req = arg;
202 char buf[64];
203 int error;
204
205 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
206
5bd48c1d 207 spin_unlock(&mbuf_track_spin);
e9fa4b60 208 error = SYSCTL_OUT(req, buf, strlen(buf));
5bd48c1d 209 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
210 if (error)
211 return(-error);
212 return(0);
213}
214
215static int
216mbuftrack_show(SYSCTL_HANDLER_ARGS)
217{
218 int error;
219
5bd48c1d 220 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
221 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
222 mbuftrack_callback, req);
5bd48c1d 223 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
224 return (-error);
225}
226SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
227 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
228
229#else
230
231#define mbuftrack(m)
232#define mbufuntrack(m)
233
234#endif
235
7b6f875f 236static void mbinit(void *);
ba39e2e0 237SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
984263bc 238
4c1e2509 239static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
90775e29 240
4c1e2509 241static struct mbstat mbstat[SMP_MAXCPU];
984263bc
MD
242int max_linkhdr;
243int max_protohdr;
244int max_hdr;
245int max_datalen;
246int m_defragpackets;
247int m_defragbytes;
248int m_defraguseless;
249int m_defragfailure;
250#ifdef MBUF_STRESS_TEST
251int m_defragrandomfailures;
252#endif
253
7b6f875f 254struct objcache *mbuf_cache, *mbufphdr_cache;
94eaee9a 255struct objcache *mclmeta_cache, *mjclmeta_cache;
7b6f875f 256struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
94eaee9a 257struct objcache *mbufjcluster_cache, *mbufphdrjcluster_cache;
7b6f875f 258
984263bc
MD
259int nmbclusters;
260int nmbufs;
984263bc 261
984263bc 262SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
093e85dc 263 &max_linkhdr, 0, "Max size of a link-level header");
984263bc 264SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
093e85dc
SG
265 &max_protohdr, 0, "Max size of a protocol header");
266SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0,
267 "Max size of link+protocol headers");
984263bc 268SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
093e85dc 269 &max_datalen, 0, "Max data payload size without headers");
984263bc 270SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
093e85dc 271 &mbuf_wait, 0, "Time in ticks to sleep after failed mbuf allocations");
4c1e2509
JT
272static int do_mbstat(SYSCTL_HANDLER_ARGS);
273
274SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
093e85dc 275 0, 0, do_mbstat, "S,mbstat", "mbuf usage statistics");
4c1e2509
JT
276
277static int do_mbtypes(SYSCTL_HANDLER_ARGS);
278
279SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
280 0, 0, do_mbtypes, "LU", "");
281
282static int
283do_mbstat(SYSCTL_HANDLER_ARGS)
284{
285 struct mbstat mbstat_total;
286 struct mbstat *mbstat_totalp;
287 int i;
288
289 bzero(&mbstat_total, sizeof(mbstat_total));
290 mbstat_totalp = &mbstat_total;
291
292 for (i = 0; i < ncpus; i++)
293 {
294 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
295 mbstat_total.m_clusters += mbstat[i].m_clusters;
296 mbstat_total.m_spare += mbstat[i].m_spare;
297 mbstat_total.m_clfree += mbstat[i].m_clfree;
298 mbstat_total.m_drops += mbstat[i].m_drops;
299 mbstat_total.m_wait += mbstat[i].m_wait;
300 mbstat_total.m_drain += mbstat[i].m_drain;
301 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
302 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
303
304 }
305 /*
306 * The following fields are not cumulative fields so just
307 * get their values once.
308 */
309 mbstat_total.m_msize = mbstat[0].m_msize;
310 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
311 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
312 mbstat_total.m_mlen = mbstat[0].m_mlen;
313 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
314
315 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
316}
317
318static int
319do_mbtypes(SYSCTL_HANDLER_ARGS)
320{
321 u_long totals[MT_NTYPES];
322 int i, j;
323
324 for (i = 0; i < MT_NTYPES; i++)
325 totals[i] = 0;
326
327 for (i = 0; i < ncpus; i++)
328 {
329 for (j = 0; j < MT_NTYPES; j++)
330 totals[j] += mbtypes[i][j];
331 }
332
333 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
334}
18c48b9c
MD
335
336/*
337 * These are read-only because we do not currently have any code
338 * to adjust the objcache limits after the fact. The variables
339 * may only be set as boot-time tunables.
340 */
341SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
984263bc 342 &nmbclusters, 0, "Maximum number of mbuf clusters available");
18c48b9c 343SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
984263bc 344 "Maximum number of mbufs available");
7b6f875f 345
984263bc 346SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
093e85dc 347 &m_defragpackets, 0, "Number of defragment packets");
984263bc 348SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
093e85dc 349 &m_defragbytes, 0, "Number of defragment bytes");
984263bc 350SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
093e85dc 351 &m_defraguseless, 0, "Number of useless defragment mbuf chain operations");
984263bc 352SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
093e85dc 353 &m_defragfailure, 0, "Number of failed defragment mbuf chain operations");
984263bc
MD
354#ifdef MBUF_STRESS_TEST
355SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
356 &m_defragrandomfailures, 0, "");
357#endif
358
90775e29
MD
359static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
360static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
94eaee9a 361static MALLOC_DEFINE(M_MJBUFCL, "mbufcl", "mbufcl");
7b6f875f 362static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
94eaee9a 363static MALLOC_DEFINE(M_MJCLMETA, "mjclmeta", "mjclmeta");
90775e29
MD
364
365static void m_reclaim (void);
90775e29
MD
366static void m_mclref(void *arg);
367static void m_mclfree(void *arg);
984263bc 368
4e23f366
MD
369/*
370 * NOTE: Default NMBUFS must take into account a possible DOS attack
371 * using fd passing on unix domain sockets.
372 */
984263bc
MD
373#ifndef NMBCLUSTERS
374#define NMBCLUSTERS (512 + maxusers * 16)
375#endif
376#ifndef NMBUFS
4e23f366 377#define NMBUFS (nmbclusters * 2 + maxfiles)
984263bc
MD
378#endif
379
380/*
381 * Perform sanity checks of tunables declared above.
382 */
383static void
384tunable_mbinit(void *dummy)
385{
984263bc
MD
386 /*
387 * This has to be done before VM init.
388 */
389 nmbclusters = NMBCLUSTERS;
390 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
391 nmbufs = NMBUFS;
392 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
393 /* Sanity checks */
394 if (nmbufs < nmbclusters * 2)
395 nmbufs = nmbclusters * 2;
984263bc 396}
ba39e2e0
MD
397SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
398 tunable_mbinit, NULL);
984263bc
MD
399
400/* "number of clusters of pages" */
401#define NCL_INIT 1
402
403#define NMB_INIT 16
404
7b6f875f
JH
405/*
406 * The mbuf object cache only guarantees that m_next and m_nextpkt are
407 * NULL and that m_data points to the beginning of the data area. In
408 * particular, m_len and m_pkthdr.len are uninitialized. It is the
409 * responsibility of the caller to initialize those fields before use.
410 */
411
db11cb20 412static __inline boolean_t
7b6f875f 413mbuf_ctor(void *obj, void *private, int ocflags)
984263bc 414{
7b6f875f 415 struct mbuf *m = obj;
984263bc 416
7b6f875f
JH
417 m->m_next = NULL;
418 m->m_nextpkt = NULL;
419 m->m_data = m->m_dat;
420 m->m_flags = 0;
421
422 return (TRUE);
984263bc
MD
423}
424
425/*
7b6f875f 426 * Initialize the mbuf and the packet header fields.
984263bc 427 */
7b6f875f
JH
428static boolean_t
429mbufphdr_ctor(void *obj, void *private, int ocflags)
984263bc 430{
7b6f875f 431 struct mbuf *m = obj;
984263bc 432
7b6f875f
JH
433 m->m_next = NULL;
434 m->m_nextpkt = NULL;
435 m->m_data = m->m_pktdat;
77e294a1 436 m->m_flags = M_PKTHDR | M_PHCACHE;
984263bc 437
7b6f875f
JH
438 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
439 SLIST_INIT(&m->m_pkthdr.tags);
440 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
441 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
442
443 return (TRUE);
984263bc
MD
444}
445
446/*
7b6f875f 447 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
984263bc 448 */
7b6f875f
JH
449static boolean_t
450mclmeta_ctor(void *obj, void *private, int ocflags)
984263bc 451{
7b6f875f
JH
452 struct mbcluster *cl = obj;
453 void *buf;
454
455 if (ocflags & M_NOWAIT)
efda3bd0 456 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
7b6f875f 457 else
efda3bd0 458 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
7b6f875f
JH
459 if (buf == NULL)
460 return (FALSE);
77e294a1 461 cl->mcl_refs = 0;
7b6f875f
JH
462 cl->mcl_data = buf;
463 return (TRUE);
464}
984263bc 465
94eaee9a
JT
466static boolean_t
467mjclmeta_ctor(void *obj, void *private, int ocflags)
468{
469 struct mbcluster *cl = obj;
470 void *buf;
471
472 if (ocflags & M_NOWAIT)
473 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_NOWAIT | M_ZERO);
474 else
475 buf = kmalloc(MJUMPAGESIZE, M_MBUFCL, M_INTWAIT | M_ZERO);
476 if (buf == NULL)
477 return (FALSE);
478 cl->mcl_refs = 0;
479 cl->mcl_data = buf;
480 return (TRUE);
481}
482
c3ef87ca
MD
483static void
484mclmeta_dtor(void *obj, void *private)
485{
486 struct mbcluster *mcl = obj;
487
488 KKASSERT(mcl->mcl_refs == 0);
efda3bd0 489 kfree(mcl->mcl_data, M_MBUFCL);
c3ef87ca
MD
490}
491
7b6f875f 492static void
94eaee9a 493linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
7b6f875f 494{
984263bc 495 /*
7b6f875f
JH
496 * Add the cluster to the mbuf. The caller will detect that the
497 * mbuf now has an attached cluster.
984263bc 498 */
7b6f875f
JH
499 m->m_ext.ext_arg = cl;
500 m->m_ext.ext_buf = cl->mcl_data;
501 m->m_ext.ext_ref = m_mclref;
502 m->m_ext.ext_free = m_mclfree;
94eaee9a 503 m->m_ext.ext_size = size;
df8d1020 504 atomic_add_int(&cl->mcl_refs, 1);
984263bc 505
7b6f875f
JH
506 m->m_data = m->m_ext.ext_buf;
507 m->m_flags |= M_EXT | M_EXT_CLUSTER;
984263bc
MD
508}
509
94eaee9a
JT
510static void
511linkcluster(struct mbuf *m, struct mbcluster *cl)
512{
513 linkjcluster(m, cl, MCLBYTES);
514}
515
7b6f875f
JH
516static boolean_t
517mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
518{
519 struct mbuf *m = obj;
520 struct mbcluster *cl;
521
522 mbufphdr_ctor(obj, private, ocflags);
523 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
524 if (cl == NULL) {
525 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 526 return (FALSE);
a5955b15 527 }
77e294a1 528 m->m_flags |= M_CLCACHE;
7b6f875f
JH
529 linkcluster(m, cl);
530 return (TRUE);
531}
984263bc 532
94eaee9a
JT
533static boolean_t
534mbufphdrjcluster_ctor(void *obj, void *private, int ocflags)
535{
536 struct mbuf *m = obj;
537 struct mbcluster *cl;
538
539 mbufphdr_ctor(obj, private, ocflags);
540 cl = objcache_get(mjclmeta_cache, ocflags);
541 if (cl == NULL) {
542 ++mbstat[mycpu->gd_cpuid].m_drops;
543 return (FALSE);
544 }
545 m->m_flags |= M_CLCACHE;
546 linkjcluster(m, cl, MJUMPAGESIZE);
547 return (TRUE);
548}
549
7b6f875f
JH
550static boolean_t
551mbufcluster_ctor(void *obj, void *private, int ocflags)
984263bc 552{
7b6f875f
JH
553 struct mbuf *m = obj;
554 struct mbcluster *cl;
555
556 mbuf_ctor(obj, private, ocflags);
557 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
558 if (cl == NULL) {
559 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 560 return (FALSE);
a5955b15 561 }
77e294a1 562 m->m_flags |= M_CLCACHE;
7b6f875f
JH
563 linkcluster(m, cl);
564 return (TRUE);
565}
984263bc 566
94eaee9a
JT
567static boolean_t
568mbufjcluster_ctor(void *obj, void *private, int ocflags)
569{
570 struct mbuf *m = obj;
571 struct mbcluster *cl;
572
573 mbuf_ctor(obj, private, ocflags);
574 cl = objcache_get(mjclmeta_cache, ocflags);
575 if (cl == NULL) {
576 ++mbstat[mycpu->gd_cpuid].m_drops;
577 return (FALSE);
578 }
579 m->m_flags |= M_CLCACHE;
580 linkjcluster(m, cl, MJUMPAGESIZE);
581 return (TRUE);
582}
583
77e294a1
MD
584/*
585 * Used for both the cluster and cluster PHDR caches.
586 *
587 * The mbuf may have lost its cluster due to sharing, deal
588 * with the situation by checking M_EXT.
589 */
7b6f875f
JH
590static void
591mbufcluster_dtor(void *obj, void *private)
984263bc 592{
7b6f875f 593 struct mbuf *m = obj;
77e294a1 594 struct mbcluster *mcl;
984263bc 595
77e294a1
MD
596 if (m->m_flags & M_EXT) {
597 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
598 mcl = m->m_ext.ext_arg;
599 KKASSERT(mcl->mcl_refs == 1);
600 mcl->mcl_refs = 0;
94eaee9a
JT
601 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES)
602 objcache_put(mjclmeta_cache, mcl);
603 else
604 objcache_put(mclmeta_cache, mcl);
77e294a1 605 }
984263bc
MD
606}
607
7b6f875f
JH
608struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
609struct objcache_malloc_args mclmeta_malloc_args =
610 { sizeof(struct mbcluster), M_MCLMETA };
611
612/* ARGSUSED*/
90775e29 613static void
7b6f875f 614mbinit(void *dummy)
984263bc 615{
6f21e2f4 616 int mb_limit, cl_limit;
0aa16b5d 617 int limit;
4c1e2509
JT
618 int i;
619
0aa16b5d
SZ
620 /*
621 * Initialize statistics
622 */
623 for (i = 0; i < ncpus; i++) {
4c1e2509
JT
624 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
625 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
94eaee9a 626 atomic_set_long_nonlocked(&mbstat[i].m_mjumpagesize, MJUMPAGESIZE);
4c1e2509
JT
627 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
628 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
629 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
630 }
984263bc 631
0aa16b5d
SZ
632 /*
633 * Create objtect caches and save cluster limits, which will
634 * be used to adjust backing kmalloc pools' limit later.
635 */
636
6f21e2f4 637 mb_limit = cl_limit = 0;
0aa16b5d
SZ
638
639 limit = nmbufs;
3508d9a1
MD
640 mbuf_cache = objcache_create("mbuf",
641 &limit, 0,
5b7da64a 642 mbuf_ctor, NULL, NULL,
7b6f875f 643 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 644 mb_limit += limit;
0aa16b5d
SZ
645
646 limit = nmbufs;
3508d9a1
MD
647 mbufphdr_cache = objcache_create("mbuf pkt hdr",
648 &limit, nmbufs / 4,
5b7da64a 649 mbufphdr_ctor, NULL, NULL,
7b6f875f 650 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 651 mb_limit += limit;
0aa16b5d
SZ
652
653 cl_limit = nmbclusters;
3508d9a1
MD
654 mclmeta_cache = objcache_create("cluster mbuf",
655 &cl_limit, 0,
7b6f875f
JH
656 mclmeta_ctor, mclmeta_dtor, NULL,
657 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
0aa16b5d 658
94eaee9a 659 cl_limit = nmbclusters;
3508d9a1
MD
660 mjclmeta_cache = objcache_create("jcluster mbuf",
661 &cl_limit, 0,
94eaee9a
JT
662 mjclmeta_ctor, mclmeta_dtor, NULL,
663 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
664
0aa16b5d 665 limit = nmbclusters;
3508d9a1
MD
666 mbufcluster_cache = objcache_create("mbuf + cluster",
667 &limit, 0,
7b6f875f
JH
668 mbufcluster_ctor, mbufcluster_dtor, NULL,
669 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 670 mb_limit += limit;
0aa16b5d
SZ
671
672 limit = nmbclusters;
7b6f875f 673 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
3508d9a1
MD
674 &limit, nmbclusters / 16,
675 mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
7b6f875f 676 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 677 mb_limit += limit;
0aa16b5d 678
94eaee9a 679 limit = nmbclusters;
3508d9a1
MD
680 mbufjcluster_cache = objcache_create("mbuf + jcluster",
681 &limit, 0,
94eaee9a
JT
682 mbufjcluster_ctor, mbufcluster_dtor, NULL,
683 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
684 mb_limit += limit;
685
686 limit = nmbclusters;
687 mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster",
3508d9a1
MD
688 &limit, nmbclusters / 16,
689 mbufphdrjcluster_ctor, mbufcluster_dtor, NULL,
94eaee9a
JT
690 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
691 mb_limit += limit;
692
0aa16b5d
SZ
693 /*
694 * Adjust backing kmalloc pools' limit
3f98f485
SZ
695 *
696 * NOTE: We raise the limit by another 1/8 to take the effect
697 * of loosememuse into account.
0aa16b5d 698 */
3f98f485 699 cl_limit += cl_limit / 8;
0aa16b5d
SZ
700 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
701 mclmeta_malloc_args.objsize * cl_limit);
94eaee9a
JT
702 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit * 3/4 + MJUMPAGESIZE * cl_limit / 4);
703 /*kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);*/
0aa16b5d 704
3f98f485 705 mb_limit += mb_limit / 8;
0aa16b5d
SZ
706 kmalloc_raise_limit(mbuf_malloc_args.mtype,
707 mbuf_malloc_args.objsize * mb_limit);
90775e29 708}
984263bc 709
90775e29
MD
710/*
711 * Return the number of references to this mbuf's data. 0 is returned
712 * if the mbuf is not M_EXT, a reference count is returned if it is
7b6f875f 713 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
90775e29
MD
714 */
715int
716m_sharecount(struct mbuf *m)
717{
7b6f875f
JH
718 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
719 case 0:
720 return (0);
721 case M_EXT:
722 return (99);
723 case M_EXT | M_EXT_CLUSTER:
724 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
725 }
726 /* NOTREACHED */
727 return (0); /* to shut up compiler */
90775e29
MD
728}
729
730/*
731 * change mbuf to new type
732 */
733void
734m_chtype(struct mbuf *m, int type)
735{
4c1e2509
JT
736 struct globaldata *gd = mycpu;
737
738 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
739 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
740 atomic_set_short_nonlocked(&m->m_type, type);
984263bc
MD
741}
742
984263bc 743static void
8a3125c6 744m_reclaim(void)
984263bc 745{
1fd87d54
RG
746 struct domain *dp;
747 struct protosw *pr;
984263bc 748
5bd48c1d
MD
749 kprintf("Debug: m_reclaim() called\n");
750
9c70fe43 751 SLIST_FOREACH(dp, &domains, dom_next) {
8a3125c6 752 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
753 if (pr->pr_drain)
754 (*pr->pr_drain)();
8a3125c6
MD
755 }
756 }
4c1e2509 757 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
984263bc
MD
758}
759
db11cb20 760static __inline void
7b6f875f
JH
761updatestats(struct mbuf *m, int type)
762{
4c1e2509 763 struct globaldata *gd = mycpu;
7b6f875f 764
fcd1202a 765 m->m_type = type;
e9fa4b60 766 mbuftrack(m);
982f999d
MD
767#ifdef MBUF_DEBUG
768 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
769 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
770#endif
4c1e2509
JT
771
772 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
773 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
774
7b6f875f
JH
775}
776
984263bc 777/*
7b6f875f 778 * Allocate an mbuf.
984263bc
MD
779 */
780struct mbuf *
8a3125c6 781m_get(int how, int type)
984263bc 782{
12496bdf 783 struct mbuf *m;
7b6f875f
JH
784 int ntries = 0;
785 int ocf = MBTOM(how);
12496bdf 786
7b6f875f
JH
787retryonce:
788
789 m = objcache_get(mbuf_cache, ocf);
790
791 if (m == NULL) {
792 if ((how & MB_TRYWAIT) && ntries++ == 0) {
793 struct objcache *reclaimlist[] = {
794 mbufphdr_cache,
5bd48c1d 795 mbufcluster_cache,
94eaee9a
JT
796 mbufphdrcluster_cache,
797 mbufjcluster_cache,
798 mbufphdrjcluster_cache
7b6f875f 799 };
a3034532 800 const int nreclaims = NELEM(reclaimlist);
7b6f875f
JH
801
802 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
803 m_reclaim();
804 goto retryonce;
c6339e39 805 }
a5955b15 806 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 807 return (NULL);
12496bdf 808 }
982f999d
MD
809#ifdef MBUF_DEBUG
810 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
811#endif
5bd08532 812 m->m_len = 0;
c6339e39 813
7b6f875f 814 updatestats(m, type);
984263bc
MD
815 return (m);
816}
817
818struct mbuf *
8a3125c6 819m_gethdr(int how, int type)
984263bc 820{
12496bdf 821 struct mbuf *m;
7b6f875f
JH
822 int ocf = MBTOM(how);
823 int ntries = 0;
12496bdf 824
7b6f875f
JH
825retryonce:
826
827 m = objcache_get(mbufphdr_cache, ocf);
828
829 if (m == NULL) {
830 if ((how & MB_TRYWAIT) && ntries++ == 0) {
831 struct objcache *reclaimlist[] = {
832 mbuf_cache,
94eaee9a
JT
833 mbufcluster_cache, mbufphdrcluster_cache,
834 mbufjcluster_cache, mbufphdrjcluster_cache
7b6f875f 835 };
a3034532 836 const int nreclaims = NELEM(reclaimlist);
7b6f875f
JH
837
838 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
839 m_reclaim();
840 goto retryonce;
c6339e39 841 }
a5955b15 842 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 843 return (NULL);
12496bdf 844 }
982f999d
MD
845#ifdef MBUF_DEBUG
846 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
847#endif
5bd08532
MD
848 m->m_len = 0;
849 m->m_pkthdr.len = 0;
c6339e39 850
7b6f875f 851 updatestats(m, type);
984263bc
MD
852 return (m);
853}
854
7b6f875f
JH
855/*
856 * Get a mbuf (not a mbuf cluster!) and zero it.
857 * Deprecated.
858 */
984263bc 859struct mbuf *
8a3125c6 860m_getclr(int how, int type)
984263bc 861{
1fd87d54 862 struct mbuf *m;
984263bc 863
7b6f875f
JH
864 m = m_get(how, type);
865 if (m != NULL)
866 bzero(m->m_data, MLEN);
984263bc
MD
867 return (m);
868}
869
94eaee9a
JT
870struct mbuf *
871m_getjcl(int how, short type, int flags, size_t size)
872{
873 struct mbuf *m = NULL;
2e7afdb4 874 struct objcache *mbclc, *mbphclc;
94eaee9a
JT
875 int ocflags = MBTOM(how);
876 int ntries = 0;
877
2e7afdb4
JT
878 switch (size) {
879 case MCLBYTES:
880 mbclc = mbufcluster_cache;
881 mbphclc = mbufphdrcluster_cache;
882 break;
883 default:
884 mbclc = mbufjcluster_cache;
885 mbphclc = mbufphdrjcluster_cache;
886 break;
887 }
888
94eaee9a
JT
889retryonce:
890
891 if (flags & M_PKTHDR)
2e7afdb4 892 m = objcache_get(mbphclc, ocflags);
94eaee9a 893 else
2e7afdb4 894 m = objcache_get(mbclc, ocflags);
94eaee9a
JT
895
896 if (m == NULL) {
897 if ((how & MB_TRYWAIT) && ntries++ == 0) {
898 struct objcache *reclaimlist[1];
899
900 if (flags & M_PKTHDR)
2e7afdb4 901 reclaimlist[0] = mbclc;
94eaee9a 902 else
2e7afdb4 903 reclaimlist[0] = mbphclc;
94eaee9a
JT
904 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
905 m_reclaim();
906 goto retryonce;
907 }
908 ++mbstat[mycpu->gd_cpuid].m_drops;
909 return (NULL);
910 }
911
912#ifdef MBUF_DEBUG
913 KASSERT(m->m_data == m->m_ext.ext_buf,
914 ("mbuf %p: bad m_data in get", m));
915#endif
916 m->m_type = type;
917 m->m_len = 0;
918 m->m_pkthdr.len = 0; /* just do it unconditonally */
919
920 mbuftrack(m);
921
922 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
923 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
924 return (m);
925}
926
984263bc 927/*
7b6f875f 928 * Returns an mbuf with an attached cluster.
984263bc
MD
929 * Because many network drivers use this kind of buffers a lot, it is
930 * convenient to keep a small pool of free buffers of this kind.
931 * Even a small size such as 10 gives about 10% improvement in the
932 * forwarding rate in a bridge or router.
984263bc 933 */
984263bc
MD
934struct mbuf *
935m_getcl(int how, short type, int flags)
936{
2e7afdb4 937 return (m_getjcl(how, type, flags, MCLBYTES));
984263bc
MD
938}
939
940/*
50503f0f
JH
941 * Allocate chain of requested length.
942 */
943struct mbuf *
944m_getc(int len, int how, int type)
945{
946 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
947 int nsize;
948
949 while (len > 0) {
950 n = m_getl(len, how, type, 0, &nsize);
951 if (n == NULL)
952 goto failed;
953 n->m_len = 0;
954 *ntail = n;
955 ntail = &n->m_next;
956 len -= nsize;
957 }
958 return (nfirst);
959
960failed:
961 m_freem(nfirst);
962 return (NULL);
963}
964
965/*
966 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
967 * and return a pointer to the head of the allocated chain. If m0 is
984263bc
MD
968 * non-null, then we assume that it is a single mbuf or an mbuf chain to
969 * which we want len bytes worth of mbufs and/or clusters attached, and so
50503f0f 970 * if we succeed in allocating it, we will just return a pointer to m0.
984263bc
MD
971 *
972 * If we happen to fail at any point during the allocation, we will free
973 * up everything we have already allocated and return NULL.
974 *
50503f0f 975 * Deprecated. Use m_getc() and m_cat() instead.
984263bc
MD
976 */
977struct mbuf *
dc14b0a9 978m_getm(struct mbuf *m0, int len, int type, int how)
984263bc 979{
50503f0f 980 struct mbuf *nfirst;
984263bc 981
50503f0f 982 nfirst = m_getc(len, how, type);
984263bc 983
50503f0f
JH
984 if (m0 != NULL) {
985 m_last(m0)->m_next = nfirst;
986 return (m0);
984263bc
MD
987 }
988
50503f0f 989 return (nfirst);
984263bc
MD
990}
991
992/*
7b6f875f
JH
993 * Adds a cluster to a normal mbuf, M_EXT is set on success.
994 * Deprecated. Use m_getcl() instead.
b6650ec0 995 */
90775e29
MD
996void
997m_mclget(struct mbuf *m, int how)
b6650ec0 998{
7b6f875f 999 struct mbcluster *mcl;
b6650ec0 1000
77e294a1 1001 KKASSERT((m->m_flags & M_EXT) == 0);
7b6f875f 1002 mcl = objcache_get(mclmeta_cache, MBTOM(how));
c3ef87ca
MD
1003 if (mcl != NULL) {
1004 linkcluster(m, mcl);
5bd48c1d
MD
1005 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters,
1006 1);
a5955b15
MD
1007 } else {
1008 ++mbstat[mycpu->gd_cpuid].m_drops;
c3ef87ca 1009 }
b6650ec0
MD
1010}
1011
df8d1020
MD
1012/*
1013 * Updates to mbcluster must be MPSAFE. Only an entity which already has
1014 * a reference to the cluster can ref it, so we are in no danger of
1015 * racing an add with a subtract. But the operation must still be atomic
1016 * since multiple entities may have a reference on the cluster.
1017 *
1018 * m_mclfree() is almost the same but it must contend with two entities
5bd48c1d 1019 * freeing the cluster at the same time.
df8d1020 1020 */
90775e29 1021static void
7b6f875f 1022m_mclref(void *arg)
b6650ec0 1023{
7b6f875f 1024 struct mbcluster *mcl = arg;
90775e29 1025
7b6f875f 1026 atomic_add_int(&mcl->mcl_refs, 1);
b6650ec0
MD
1027}
1028
1d16b2b5
MD
1029/*
1030 * When dereferencing a cluster we have to deal with a N->0 race, where
1031 * N entities free their references simultaniously. To do this we use
dee87a60 1032 * atomic_fetchadd_int().
1d16b2b5 1033 */
90775e29 1034static void
7b6f875f 1035m_mclfree(void *arg)
b6650ec0 1036{
7b6f875f 1037 struct mbcluster *mcl = arg;
90775e29 1038
dee87a60 1039 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
77e294a1 1040 objcache_put(mclmeta_cache, mcl);
b6650ec0
MD
1041}
1042
1043/*
b6650ec0
MD
1044 * Free a single mbuf and any associated external storage. The successor,
1045 * if any, is returned.
984263bc 1046 *
b6650ec0 1047 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
1048 * code does not call M_PREPEND properly.
1049 * (example: call to bpf_mtap from drivers)
1050 */
982f999d
MD
1051
1052#ifdef MBUF_DEBUG
1053
1054struct mbuf *
1055_m_free(struct mbuf *m, const char *func)
1056
1057#else
1058
984263bc 1059struct mbuf *
b6650ec0 1060m_free(struct mbuf *m)
982f999d
MD
1061
1062#endif
984263bc 1063{
b6650ec0 1064 struct mbuf *n;
4c1e2509 1065 struct globaldata *gd = mycpu;
b6650ec0 1066
361af367 1067 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
f3f0fc49 1068 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
4c1e2509 1069 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
90775e29 1070
7b6f875f 1071 n = m->m_next;
90775e29
MD
1072
1073 /*
7b6f875f
JH
1074 * Make sure the mbuf is in constructed state before returning it
1075 * to the objcache.
90775e29 1076 */
90775e29 1077 m->m_next = NULL;
e9fa4b60 1078 mbufuntrack(m);
982f999d
MD
1079#ifdef MBUF_DEBUG
1080 m->m_hdr.mh_lastfunc = func;
1081#endif
7b6f875f
JH
1082#ifdef notyet
1083 KKASSERT(m->m_nextpkt == NULL);
1084#else
1085 if (m->m_nextpkt != NULL) {
7b6f875f
JH
1086 static int afewtimes = 10;
1087
1088 if (afewtimes-- > 0) {
6ea70f76 1089 kprintf("mfree: m->m_nextpkt != NULL\n");
7ce2998e 1090 print_backtrace(-1);
90775e29 1091 }
7b6f875f
JH
1092 m->m_nextpkt = NULL;
1093 }
1094#endif
1095 if (m->m_flags & M_PKTHDR) {
7b6f875f 1096 m_tag_delete_chain(m); /* eliminate XXX JH */
77e294a1
MD
1097 }
1098
1099 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
1100
1101 /*
1102 * Clean the M_PKTHDR state so we can return the mbuf to its original
1103 * cache. This is based on the PHCACHE flag which tells us whether
1104 * the mbuf was originally allocated out of a packet-header cache
1105 * or a non-packet-header cache.
1106 */
1107 if (m->m_flags & M_PHCACHE) {
1108 m->m_flags |= M_PKTHDR;
1109 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
7b6f875f
JH
1110 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
1111 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
6b1d6bed 1112 SLIST_INIT(&m->m_pkthdr.tags);
90775e29 1113 }
7b6f875f 1114
77e294a1
MD
1115 /*
1116 * Handle remaining flags combinations. M_CLCACHE tells us whether
1117 * the mbuf was originally allocated from a cluster cache or not,
1118 * and is totally separate from whether the mbuf is currently
1119 * associated with a cluster.
1120 */
77e294a1
MD
1121 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1122 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1123 /*
1124 * mbuf+cluster cache case. The mbuf was allocated from the
1125 * combined mbuf_cluster cache and can be returned to the
1126 * cache if the cluster hasn't been shared.
1127 */
1128 if (m_sharecount(m) == 1) {
1129 /*
1130 * The cluster has not been shared, we can just
1131 * reset the data pointer and return the mbuf
1132 * to the cluster cache. Note that the reference
1133 * count is left intact (it is still associated with
1134 * an mbuf).
1135 */
1136 m->m_data = m->m_ext.ext_buf;
94eaee9a
JT
1137 if (m->m_flags & M_EXT && m->m_ext.ext_size != MCLBYTES) {
1138 if (m->m_flags & M_PHCACHE)
1139 objcache_put(mbufphdrjcluster_cache, m);
1140 else
1141 objcache_put(mbufjcluster_cache, m);
1142 } else {
1143 if (m->m_flags & M_PHCACHE)
1144 objcache_put(mbufphdrcluster_cache, m);
1145 else
1146 objcache_put(mbufcluster_cache, m);
1147 }
4c1e2509 1148 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1149 } else {
1150 /*
1151 * Hell. Someone else has a ref on this cluster,
1152 * we have to disconnect it which means we can't
1153 * put it back into the mbufcluster_cache, we
1154 * have to destroy the mbuf.
1155 *
cb086467
MD
1156 * Other mbuf references to the cluster will typically
1157 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1158 *
77e294a1
MD
1159 * XXX we could try to connect another cluster to
1160 * it.
1161 */
94eaee9a 1162
7b6f875f
JH
1163 m->m_ext.ext_free(m->m_ext.ext_arg);
1164 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
94eaee9a
JT
1165 if (m->m_ext.ext_size == MCLBYTES) {
1166 if (m->m_flags & M_PHCACHE)
1167 objcache_dtor(mbufphdrcluster_cache, m);
1168 else
1169 objcache_dtor(mbufcluster_cache, m);
1170 } else {
1171 if (m->m_flags & M_PHCACHE)
1172 objcache_dtor(mbufphdrjcluster_cache, m);
1173 else
1174 objcache_dtor(mbufjcluster_cache, m);
1175 }
7b6f875f 1176 }
77e294a1
MD
1177 break;
1178 case M_EXT | M_EXT_CLUSTER:
1179 /*
1180 * Normal cluster associated with an mbuf that was allocated
1181 * from the normal mbuf pool rather then the cluster pool.
1182 * The cluster has to be independantly disassociated from the
1183 * mbuf.
1184 */
cb086467 1185 if (m_sharecount(m) == 1)
4c1e2509 1186 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1187 /* fall through */
1188 case M_EXT:
1189 /*
1190 * Normal cluster association case, disconnect the cluster from
1191 * the mbuf. The cluster may or may not be custom.
1192 */
1193 m->m_ext.ext_free(m->m_ext.ext_arg);
1194 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1195 /* fall through */
1196 case 0:
1197 /*
1198 * return the mbuf to the mbuf cache.
1199 */
1200 if (m->m_flags & M_PHCACHE) {
7b6f875f
JH
1201 m->m_data = m->m_pktdat;
1202 objcache_put(mbufphdr_cache, m);
90775e29 1203 } else {
7b6f875f
JH
1204 m->m_data = m->m_dat;
1205 objcache_put(mbuf_cache, m);
90775e29 1206 }
4c1e2509 1207 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
77e294a1
MD
1208 break;
1209 default:
1210 if (!panicstr)
1211 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1212 break;
b6650ec0 1213 }
984263bc
MD
1214 return (n);
1215}
1216
982f999d
MD
1217#ifdef MBUF_DEBUG
1218
1219void
1220_m_freem(struct mbuf *m, const char *func)
1221{
1222 while (m)
1223 m = _m_free(m, func);
1224}
1225
1226#else
1227
984263bc 1228void
b6650ec0 1229m_freem(struct mbuf *m)
984263bc 1230{
90775e29
MD
1231 while (m)
1232 m = m_free(m);
984263bc
MD
1233}
1234
982f999d
MD
1235#endif
1236
7c85e8ac
SW
1237void
1238m_extadd(struct mbuf *m, caddr_t buf, u_int size, void (*reff)(void *),
1239 void (*freef)(void *), void *arg)
1240{
1241 m->m_ext.ext_arg = arg;
1242 m->m_ext.ext_buf = buf;
1243 m->m_ext.ext_ref = reff;
1244 m->m_ext.ext_free = freef;
1245 m->m_ext.ext_size = size;
1246 reff(arg);
1247 m->m_data = buf;
1248 m->m_flags |= M_EXT;
1249}
1250
984263bc 1251/*
df80f2ea 1252 * mbuf utility routines
984263bc
MD
1253 */
1254
1255/*
7b6f875f 1256 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
984263bc
MD
1257 * copy junk along.
1258 */
1259struct mbuf *
8a3125c6 1260m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
1261{
1262 struct mbuf *mn;
1263
c3ef87ca
MD
1264 if (m->m_flags & M_PKTHDR)
1265 mn = m_gethdr(how, m->m_type);
1266 else
1267 mn = m_get(how, m->m_type);
7b6f875f 1268 if (mn == NULL) {
984263bc 1269 m_freem(m);
7b6f875f 1270 return (NULL);
984263bc
MD
1271 }
1272 if (m->m_flags & M_PKTHDR)
1273 M_MOVE_PKTHDR(mn, m);
1274 mn->m_next = m;
1275 m = mn;
1276 if (len < MHLEN)
1277 MH_ALIGN(m, len);
1278 m->m_len = len;
1279 return (m);
1280}
1281
1282/*
1283 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1284 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 1285 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
1286 * Note that the copy is read-only, because clusters are not copied,
1287 * only their reference counts are incremented.
1288 */
984263bc 1289struct mbuf *
8a3125c6 1290m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 1291{
1fd87d54
RG
1292 struct mbuf *n, **np;
1293 int off = off0;
984263bc
MD
1294 struct mbuf *top;
1295 int copyhdr = 0;
1296
1297 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1298 KASSERT(len >= 0, ("m_copym, negative len %d", len));
5bd48c1d 1299 if (off == 0 && (m->m_flags & M_PKTHDR))
984263bc
MD
1300 copyhdr = 1;
1301 while (off > 0) {
1302 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1303 if (off < m->m_len)
1304 break;
1305 off -= m->m_len;
1306 m = m->m_next;
1307 }
1308 np = &top;
5bd48c1d 1309 top = NULL;
984263bc 1310 while (len > 0) {
7b6f875f 1311 if (m == NULL) {
984263bc
MD
1312 KASSERT(len == M_COPYALL,
1313 ("m_copym, length > size of mbuf chain"));
1314 break;
1315 }
c3ef87ca
MD
1316 /*
1317 * Because we are sharing any cluster attachment below,
1318 * be sure to get an mbuf that does not have a cluster
1319 * associated with it.
1320 */
1321 if (copyhdr)
1322 n = m_gethdr(wait, m->m_type);
1323 else
1324 n = m_get(wait, m->m_type);
984263bc 1325 *np = n;
7b6f875f 1326 if (n == NULL)
984263bc
MD
1327 goto nospace;
1328 if (copyhdr) {
1329 if (!m_dup_pkthdr(n, m, wait))
1330 goto nospace;
1331 if (len == M_COPYALL)
1332 n->m_pkthdr.len -= off0;
1333 else
1334 n->m_pkthdr.len = len;
1335 copyhdr = 0;
1336 }
1337 n->m_len = min(len, m->m_len - off);
1338 if (m->m_flags & M_EXT) {
c3ef87ca 1339 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1340 n->m_data = m->m_data + off;
7b6f875f 1341 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1342 n->m_ext = m->m_ext;
b542cd49 1343 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
7eccf245 1344 } else {
984263bc
MD
1345 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1346 (unsigned)n->m_len);
7eccf245 1347 }
984263bc
MD
1348 if (len != M_COPYALL)
1349 len -= n->m_len;
1350 off = 0;
1351 m = m->m_next;
1352 np = &n->m_next;
1353 }
7b6f875f 1354 if (top == NULL)
4c1e2509 1355 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
984263bc
MD
1356 return (top);
1357nospace:
1358 m_freem(top);
4c1e2509 1359 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1360 return (NULL);
984263bc
MD
1361}
1362
1363/*
1364 * Copy an entire packet, including header (which must be present).
1365 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1366 * Note that the copy is read-only, because clusters are not copied,
1367 * only their reference counts are incremented.
1368 * Preserve alignment of the first mbuf so if the creator has left
1369 * some room at the beginning (e.g. for inserting protocol headers)
1370 * the copies also have the room available.
1371 */
1372struct mbuf *
8a3125c6 1373m_copypacket(struct mbuf *m, int how)
984263bc
MD
1374{
1375 struct mbuf *top, *n, *o;
1376
7f3602fe 1377 n = m_gethdr(how, m->m_type);
984263bc
MD
1378 top = n;
1379 if (!n)
1380 goto nospace;
1381
1382 if (!m_dup_pkthdr(n, m, how))
1383 goto nospace;
1384 n->m_len = m->m_len;
1385 if (m->m_flags & M_EXT) {
c3ef87ca 1386 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1387 n->m_data = m->m_data;
7b6f875f 1388 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1389 n->m_ext = m->m_ext;
b542cd49 1390 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1391 } else {
1392 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1393 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1394 }
1395
1396 m = m->m_next;
1397 while (m) {
7b6f875f 1398 o = m_get(how, m->m_type);
984263bc
MD
1399 if (!o)
1400 goto nospace;
1401
1402 n->m_next = o;
1403 n = n->m_next;
1404
1405 n->m_len = m->m_len;
1406 if (m->m_flags & M_EXT) {
c3ef87ca 1407 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1408 n->m_data = m->m_data;
7b6f875f 1409 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1410 n->m_ext = m->m_ext;
b542cd49 1411 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1412 } else {
1413 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1414 }
1415
1416 m = m->m_next;
1417 }
1418 return top;
1419nospace:
1420 m_freem(top);
4c1e2509 1421 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1422 return (NULL);
984263bc
MD
1423}
1424
1425/*
1426 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1427 * continuing for "len" bytes, into the indicated buffer.
1428 */
1429void
8a3125c6 1430m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1431{
1fd87d54 1432 unsigned count;
984263bc
MD
1433
1434 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1435 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1436 while (off > 0) {
1437 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1438 if (off < m->m_len)
1439 break;
1440 off -= m->m_len;
1441 m = m->m_next;
1442 }
1443 while (len > 0) {
1444 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1445 count = min(m->m_len - off, len);
1446 bcopy(mtod(m, caddr_t) + off, cp, count);
1447 len -= count;
1448 cp += count;
1449 off = 0;
1450 m = m->m_next;
1451 }
1452}
1453
1454/*
1455 * Copy a packet header mbuf chain into a completely new chain, including
1456 * copying any mbuf clusters. Use this instead of m_copypacket() when
1457 * you need a writable copy of an mbuf chain.
1458 */
1459struct mbuf *
8a3125c6 1460m_dup(struct mbuf *m, int how)
984263bc
MD
1461{
1462 struct mbuf **p, *top = NULL;
1463 int remain, moff, nsize;
1464
1465 /* Sanity check */
1466 if (m == NULL)
50503f0f 1467 return (NULL);
5e2195bf 1468 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
984263bc
MD
1469
1470 /* While there's more data, get a new mbuf, tack it on, and fill it */
1471 remain = m->m_pkthdr.len;
1472 moff = 0;
1473 p = &top;
1474 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1475 struct mbuf *n;
1476
1477 /* Get the next new mbuf */
50503f0f
JH
1478 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1479 &nsize);
984263bc
MD
1480 if (n == NULL)
1481 goto nospace;
50503f0f 1482 if (top == NULL)
984263bc 1483 if (!m_dup_pkthdr(n, m, how))
50503f0f 1484 goto nospace0;
984263bc
MD
1485
1486 /* Link it into the new chain */
1487 *p = n;
1488 p = &n->m_next;
1489
1490 /* Copy data from original mbuf(s) into new mbuf */
50503f0f 1491 n->m_len = 0;
984263bc
MD
1492 while (n->m_len < nsize && m != NULL) {
1493 int chunk = min(nsize - n->m_len, m->m_len - moff);
1494
1495 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1496 moff += chunk;
1497 n->m_len += chunk;
1498 remain -= chunk;
1499 if (moff == m->m_len) {
1500 m = m->m_next;
1501 moff = 0;
1502 }
1503 }
1504
1505 /* Check correct total mbuf length */
1506 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
50503f0f 1507 ("%s: bogus m_pkthdr.len", __func__));
984263bc
MD
1508 }
1509 return (top);
1510
1511nospace:
1512 m_freem(top);
50503f0f 1513nospace0:
4c1e2509 1514 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
50503f0f 1515 return (NULL);
984263bc
MD
1516}
1517
3bf6fec3
MD
1518/*
1519 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1520 * copying any mbuf clusters. This is typically used to realign a data
1521 * chain by nfs_realign().
1522 *
1523 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT
1524 * and NULL can be returned if MB_DONTWAIT is passed.
1525 *
1526 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1527 * cluster mbufs can exhaust our supply of mbufs.
1528 */
1529struct mbuf *
1530m_dup_data(struct mbuf *m, int how)
1531{
1532 struct mbuf **p, *n, *top = NULL;
1533 int mlen, moff, chunk, gsize, nsize;
1534
1535 /*
1536 * Degenerate case
1537 */
1538 if (m == NULL)
1539 return (NULL);
1540
1541 /*
1542 * Optimize the mbuf allocation but do not get too carried away.
1543 */
1544 if (m->m_next || m->m_len > MLEN)
94eaee9a
JT
1545 if (m->m_flags & M_EXT && m->m_ext.ext_size == MCLBYTES)
1546 gsize = MCLBYTES;
1547 else
1548 gsize = MJUMPAGESIZE;
3bf6fec3
MD
1549 else
1550 gsize = MLEN;
1551
1552 /* Chain control */
1553 p = &top;
1554 n = NULL;
1555 nsize = 0;
1556
1557 /*
1558 * Scan the mbuf chain until nothing is left, the new mbuf chain
1559 * will be allocated on the fly as needed.
1560 */
1561 while (m) {
1562 mlen = m->m_len;
1563 moff = 0;
1564
1565 while (mlen) {
1566 KKASSERT(m->m_type == MT_DATA);
1567 if (n == NULL) {
1568 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1569 n->m_len = 0;
1570 if (n == NULL)
1571 goto nospace;
1572 *p = n;
1573 p = &n->m_next;
1574 }
1575 chunk = imin(mlen, nsize);
1576 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1577 mlen -= chunk;
1578 moff += chunk;
1579 n->m_len += chunk;
1580 nsize -= chunk;
1581 if (nsize == 0)
1582 n = NULL;
1583 }
1584 m = m->m_next;
1585 }
1586 *p = NULL;
1587 return(top);
1588nospace:
1589 *p = NULL;
1590 m_freem(top);
1591 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1592 return (NULL);
1593}
1594
984263bc
MD
1595/*
1596 * Concatenate mbuf chain n to m.
1597 * Both chains must be of the same type (e.g. MT_DATA).
1598 * Any m_pkthdr is not updated.
1599 */
1600void
8a3125c6 1601m_cat(struct mbuf *m, struct mbuf *n)
984263bc 1602{
50503f0f 1603 m = m_last(m);
984263bc
MD
1604 while (n) {
1605 if (m->m_flags & M_EXT ||
1606 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1607 /* just join the two chains */
1608 m->m_next = n;
1609 return;
1610 }
1611 /* splat the data from one into the other */
1612 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1613 (u_int)n->m_len);
1614 m->m_len += n->m_len;
1615 n = m_free(n);
1616 }
1617}
1618
1619void
8a3125c6 1620m_adj(struct mbuf *mp, int req_len)
984263bc 1621{
1fd87d54
RG
1622 int len = req_len;
1623 struct mbuf *m;
1624 int count;
984263bc
MD
1625
1626 if ((m = mp) == NULL)
1627 return;
1628 if (len >= 0) {
1629 /*
1630 * Trim from head.
1631 */
1632 while (m != NULL && len > 0) {
1633 if (m->m_len <= len) {
1634 len -= m->m_len;
1635 m->m_len = 0;
1636 m = m->m_next;
1637 } else {
1638 m->m_len -= len;
1639 m->m_data += len;
1640 len = 0;
1641 }
1642 }
1643 m = mp;
1644 if (mp->m_flags & M_PKTHDR)
1645 m->m_pkthdr.len -= (req_len - len);
1646 } else {
1647 /*
1648 * Trim from tail. Scan the mbuf chain,
1649 * calculating its length and finding the last mbuf.
1650 * If the adjustment only affects this mbuf, then just
1651 * adjust and return. Otherwise, rescan and truncate
1652 * after the remaining size.
1653 */
1654 len = -len;
1655 count = 0;
1656 for (;;) {
1657 count += m->m_len;
60233e58 1658 if (m->m_next == NULL)
984263bc
MD
1659 break;
1660 m = m->m_next;
1661 }
1662 if (m->m_len >= len) {
1663 m->m_len -= len;
1664 if (mp->m_flags & M_PKTHDR)
1665 mp->m_pkthdr.len -= len;
1666 return;
1667 }
1668 count -= len;
1669 if (count < 0)
1670 count = 0;
1671 /*
1672 * Correct length for chain is "count".
1673 * Find the mbuf with last data, adjust its length,
1674 * and toss data from remaining mbufs on chain.
1675 */
1676 m = mp;
1677 if (m->m_flags & M_PKTHDR)
1678 m->m_pkthdr.len = count;
1679 for (; m; m = m->m_next) {
1680 if (m->m_len >= count) {
1681 m->m_len = count;
1682 break;
1683 }
1684 count -= m->m_len;
1685 }
1686 while (m->m_next)
1687 (m = m->m_next) ->m_len = 0;
1688 }
1689}
1690
a3768f58
RP
1691/*
1692 * Set the m_data pointer of a newly-allocated mbuf
1693 * to place an object of the specified size at the
1694 * end of the mbuf, longword aligned.
1695 */
1696void
1697m_align(struct mbuf *m, int len)
1698{
1699 int adjust;
1700
1701 if (m->m_flags & M_EXT)
1702 adjust = m->m_ext.ext_size - len;
1703 else if (m->m_flags & M_PKTHDR)
1704 adjust = MHLEN - len;
1705 else
1706 adjust = MLEN - len;
1707 m->m_data += adjust &~ (sizeof(long)-1);
1708}
1709
984263bc 1710/*
7b6f875f 1711 * Rearrange an mbuf chain so that len bytes are contiguous
9e4465af
MD
1712 * and in the data area of an mbuf (so that mtod will work for a structure
1713 * of size len). Returns the resulting mbuf chain on success, frees it and
1714 * returns null on failure. If there is room, it will add up to
1715 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1716 * avoid being called next time.
984263bc 1717 */
984263bc 1718struct mbuf *
8a3125c6 1719m_pullup(struct mbuf *n, int len)
984263bc 1720{
1fd87d54
RG
1721 struct mbuf *m;
1722 int count;
984263bc
MD
1723 int space;
1724
1725 /*
1726 * If first mbuf has no cluster, and has room for len bytes
1727 * without shifting current data, pullup into it,
1728 * otherwise allocate a new mbuf to prepend to the chain.
1729 */
7b6f875f
JH
1730 if (!(n->m_flags & M_EXT) &&
1731 n->m_data + len < &n->m_dat[MLEN] &&
1732 n->m_next) {
984263bc
MD
1733 if (n->m_len >= len)
1734 return (n);
1735 m = n;
1736 n = n->m_next;
1737 len -= m->m_len;
1738 } else {
1739 if (len > MHLEN)
1740 goto bad;
c3ef87ca
MD
1741 if (n->m_flags & M_PKTHDR)
1742 m = m_gethdr(MB_DONTWAIT, n->m_type);
1743 else
1744 m = m_get(MB_DONTWAIT, n->m_type);
7b6f875f 1745 if (m == NULL)
984263bc
MD
1746 goto bad;
1747 m->m_len = 0;
1748 if (n->m_flags & M_PKTHDR)
1749 M_MOVE_PKTHDR(m, n);
1750 }
1751 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1752 do {
1753 count = min(min(max(len, max_protohdr), space), n->m_len);
1754 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1755 (unsigned)count);
1756 len -= count;
1757 m->m_len += count;
1758 n->m_len -= count;
1759 space -= count;
1760 if (n->m_len)
1761 n->m_data += count;
1762 else
1763 n = m_free(n);
1764 } while (len > 0 && n);
1765 if (len > 0) {
7b6f875f 1766 m_free(m);
984263bc
MD
1767 goto bad;
1768 }
1769 m->m_next = n;
1770 return (m);
1771bad:
1772 m_freem(n);
4c1e2509 1773 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1774 return (NULL);
984263bc
MD
1775}
1776
1777/*
1778 * Partition an mbuf chain in two pieces, returning the tail --
1779 * all but the first len0 bytes. In case of failure, it returns NULL and
1780 * attempts to restore the chain to its original state.
1781 *
1782 * Note that the resulting mbufs might be read-only, because the new
1783 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1784 * the "breaking point" happens to lie within a cluster mbuf. Use the
1785 * M_WRITABLE() macro to check for this case.
1786 */
1787struct mbuf *
8a3125c6 1788m_split(struct mbuf *m0, int len0, int wait)
984263bc 1789{
1fd87d54 1790 struct mbuf *m, *n;
984263bc
MD
1791 unsigned len = len0, remain;
1792
1793 for (m = m0; m && len > m->m_len; m = m->m_next)
1794 len -= m->m_len;
7b6f875f
JH
1795 if (m == NULL)
1796 return (NULL);
984263bc
MD
1797 remain = m->m_len - len;
1798 if (m0->m_flags & M_PKTHDR) {
7b6f875f
JH
1799 n = m_gethdr(wait, m0->m_type);
1800 if (n == NULL)
1801 return (NULL);
984263bc
MD
1802 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1803 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1804 m0->m_pkthdr.len = len0;
1805 if (m->m_flags & M_EXT)
1806 goto extpacket;
1807 if (remain > MHLEN) {
1808 /* m can't be the lead packet */
1809 MH_ALIGN(n, 0);
1810 n->m_next = m_split(m, len, wait);
7b6f875f
JH
1811 if (n->m_next == NULL) {
1812 m_free(n);
1813 return (NULL);
984263bc
MD
1814 } else {
1815 n->m_len = 0;
1816 return (n);
1817 }
1818 } else
1819 MH_ALIGN(n, remain);
1820 } else if (remain == 0) {
1821 n = m->m_next;
1822 m->m_next = 0;
1823 return (n);
1824 } else {
7b6f875f
JH
1825 n = m_get(wait, m->m_type);
1826 if (n == NULL)
1827 return (NULL);
984263bc
MD
1828 M_ALIGN(n, remain);
1829 }
1830extpacket:
1831 if (m->m_flags & M_EXT) {
c3ef87ca 1832 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1833 n->m_data = m->m_data + len;
7b6f875f 1834 m->m_ext.ext_ref(m->m_ext.ext_arg);
7eccf245 1835 n->m_ext = m->m_ext;
b542cd49 1836 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1837 } else {
1838 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1839 }
1840 n->m_len = remain;
1841 m->m_len = len;
1842 n->m_next = m->m_next;
1843 m->m_next = 0;
1844 return (n);
1845}
50503f0f 1846
984263bc
MD
1847/*
1848 * Routine to copy from device local memory into mbufs.
50503f0f 1849 * Note: "offset" is ill-defined and always called as 0, so ignore it.
984263bc
MD
1850 */
1851struct mbuf *
50503f0f
JH
1852m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1853 void (*copy)(volatile const void *from, volatile void *to, size_t length))
984263bc 1854{
50503f0f
JH
1855 struct mbuf *m, *mfirst = NULL, **mtail;
1856 int nsize, flags;
1857
1858 if (copy == NULL)
1859 copy = bcopy;
1860 mtail = &mfirst;
1861 flags = M_PKTHDR;
1862
1863 while (len > 0) {
1864 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1865 if (m == NULL) {
1866 m_freem(mfirst);
1867 return (NULL);
984263bc 1868 }
50503f0f
JH
1869 m->m_len = min(len, nsize);
1870
1871 if (flags & M_PKTHDR) {
1872 if (len + max_linkhdr <= nsize)
1873 m->m_data += max_linkhdr;
1874 m->m_pkthdr.rcvif = ifp;
1875 m->m_pkthdr.len = len;
1876 flags = 0;
984263bc 1877 }
50503f0f
JH
1878
1879 copy(buf, m->m_data, (unsigned)m->m_len);
1880 buf += m->m_len;
1881 len -= m->m_len;
1882 *mtail = m;
1883 mtail = &m->m_next;
984263bc 1884 }
50503f0f
JH
1885
1886 return (mfirst);
984263bc
MD
1887}
1888
cf12ba3c
SZ
1889/*
1890 * Routine to pad mbuf to the specified length 'padto'.
1891 */
1892int
1893m_devpad(struct mbuf *m, int padto)
1894{
1895 struct mbuf *last = NULL;
1896 int padlen;
1897
1898 if (padto <= m->m_pkthdr.len)
1899 return 0;
1900
1901 padlen = padto - m->m_pkthdr.len;
1902
1903 /* if there's only the packet-header and we can pad there, use it. */
1904 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1905 last = m;
1906 } else {
1907 /*
1908 * Walk packet chain to find last mbuf. We will either
1909 * pad there, or append a new mbuf and pad it
1910 */
1911 for (last = m; last->m_next != NULL; last = last->m_next)
1912 ; /* EMPTY */
1913
1914 /* `last' now points to last in chain. */
1915 if (M_TRAILINGSPACE(last) < padlen) {
1916 struct mbuf *n;
1917
1918 /* Allocate new empty mbuf, pad it. Compact later. */
1919 MGET(n, MB_DONTWAIT, MT_DATA);
1920 if (n == NULL)
1921 return ENOBUFS;
1922 n->m_len = 0;
1923 last->m_next = n;
1924 last = n;
1925 }
1926 }
1927 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1928 KKASSERT(M_WRITABLE(last));
1929
1930 /* Now zero the pad area */
1931 bzero(mtod(last, char *) + last->m_len, padlen);
1932 last->m_len += padlen;
1933 m->m_pkthdr.len += padlen;
1934 return 0;
1935}
1936
984263bc
MD
1937/*
1938 * Copy data from a buffer back into the indicated mbuf chain,
1939 * starting "off" bytes from the beginning, extending the mbuf
1940 * chain if necessary.
1941 */
1942void
8a3125c6 1943m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1944{
1fd87d54
RG
1945 int mlen;
1946 struct mbuf *m = m0, *n;
984263bc
MD
1947 int totlen = 0;
1948
7b6f875f 1949 if (m0 == NULL)
984263bc
MD
1950 return;
1951 while (off > (mlen = m->m_len)) {
1952 off -= mlen;
1953 totlen += mlen;
7b6f875f 1954 if (m->m_next == NULL) {
74f1caca 1955 n = m_getclr(MB_DONTWAIT, m->m_type);
7b6f875f 1956 if (n == NULL)
984263bc
MD
1957 goto out;
1958 n->m_len = min(MLEN, len + off);
1959 m->m_next = n;
1960 }
1961 m = m->m_next;
1962 }
1963 while (len > 0) {
1964 mlen = min (m->m_len - off, len);
1965 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1966 cp += mlen;
1967 len -= mlen;
1968 mlen += off;
1969 off = 0;
1970 totlen += mlen;
1971 if (len == 0)
1972 break;
7b6f875f 1973 if (m->m_next == NULL) {
74f1caca 1974 n = m_get(MB_DONTWAIT, m->m_type);
7b6f875f 1975 if (n == NULL)
984263bc
MD
1976 break;
1977 n->m_len = min(MLEN, len);
1978 m->m_next = n;
1979 }
1980 m = m->m_next;
1981 }
1982out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1983 m->m_pkthdr.len = totlen;
1984}
1985
bf2cc98c
RP
1986/*
1987 * Append the specified data to the indicated mbuf chain,
1988 * Extend the mbuf chain if the new data does not fit in
1989 * existing space.
1990 *
1991 * Return 1 if able to complete the job; otherwise 0.
1992 */
1993int
1994m_append(struct mbuf *m0, int len, c_caddr_t cp)
1995{
1996 struct mbuf *m, *n;
1997 int remainder, space;
1998
1999 for (m = m0; m->m_next != NULL; m = m->m_next)
2000 ;
2001 remainder = len;
2002 space = M_TRAILINGSPACE(m);
2003 if (space > 0) {
2004 /*
2005 * Copy into available space.
2006 */
2007 if (space > remainder)
2008 space = remainder;
2009 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
2010 m->m_len += space;
2011 cp += space, remainder -= space;
2012 }
2013 while (remainder > 0) {
2014 /*
2015 * Allocate a new mbuf; could check space
2016 * and allocate a cluster instead.
2017 */
2018 n = m_get(MB_DONTWAIT, m->m_type);
2019 if (n == NULL)
2020 break;
2021 n->m_len = min(MLEN, remainder);
2022 bcopy(cp, mtod(n, caddr_t), n->m_len);
2023 cp += n->m_len, remainder -= n->m_len;
2024 m->m_next = n;
2025 m = n;
2026 }
2027 if (m0->m_flags & M_PKTHDR)
2028 m0->m_pkthdr.len += len - remainder;
2029 return (remainder == 0);
2030}
2031
920c9f10
AH
2032/*
2033 * Apply function f to the data in an mbuf chain starting "off" bytes from
2034 * the beginning, continuing for "len" bytes.
2035 */
2036int
2037m_apply(struct mbuf *m, int off, int len,
2038 int (*f)(void *, void *, u_int), void *arg)
2039{
2040 u_int count;
2041 int rval;
2042
2043 KASSERT(off >= 0, ("m_apply, negative off %d", off));
2044 KASSERT(len >= 0, ("m_apply, negative len %d", len));
2045 while (off > 0) {
2046 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2047 if (off < m->m_len)
2048 break;
2049 off -= m->m_len;
2050 m = m->m_next;
2051 }
2052 while (len > 0) {
2053 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
2054 count = min(m->m_len - off, len);
2055 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
2056 if (rval)
2057 return (rval);
2058 len -= count;
2059 off = 0;
2060 m = m->m_next;
2061 }
2062 return (0);
2063}
2064
2065/*
2066 * Return a pointer to mbuf/offset of location in mbuf chain.
2067 */
2068struct mbuf *
2069m_getptr(struct mbuf *m, int loc, int *off)
2070{
2071
2072 while (loc >= 0) {
2073 /* Normal end of search. */
2074 if (m->m_len > loc) {
2075 *off = loc;
2076 return (m);
2077 } else {
2078 loc -= m->m_len;
2079 if (m->m_next == NULL) {
2080 if (loc == 0) {
2081 /* Point at the end of valid data. */
2082 *off = m->m_len;
2083 return (m);
2084 }
2085 return (NULL);
2086 }
2087 m = m->m_next;
2088 }
2089 }
2090 return (NULL);
2091}
2092
984263bc
MD
2093void
2094m_print(const struct mbuf *m)
2095{
2096 int len;
2097 const struct mbuf *m2;
2098
2099 len = m->m_pkthdr.len;
2100 m2 = m;
2101 while (len) {
6ea70f76 2102 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
984263bc
MD
2103 len -= m2->m_len;
2104 m2 = m2->m_next;
2105 }
2106 return;
2107}
2108
2109/*
2110 * "Move" mbuf pkthdr from "from" to "to".
2111 * "from" must have M_PKTHDR set, and "to" must be empty.
2112 */
2113void
2114m_move_pkthdr(struct mbuf *to, struct mbuf *from)
2115{
e0d05288 2116 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
984263bc 2117
77e294a1 2118 to->m_flags |= from->m_flags & M_COPYFLAGS;
984263bc
MD
2119 to->m_pkthdr = from->m_pkthdr; /* especially tags */
2120 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
984263bc
MD
2121}
2122
2123/*
2124 * Duplicate "from"'s mbuf pkthdr in "to".
2125 * "from" must have M_PKTHDR set, and "to" must be empty.
2126 * In particular, this does a deep copy of the packet tags.
2127 */
2128int
f15db79e 2129m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc 2130{
7f3602fe
JH
2131 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
2132
4bac35fc 2133 to->m_flags = (from->m_flags & M_COPYFLAGS) |
c4da22e4 2134 (to->m_flags & ~M_COPYFLAGS);
984263bc
MD
2135 to->m_pkthdr = from->m_pkthdr;
2136 SLIST_INIT(&to->m_pkthdr.tags);
2137 return (m_tag_copy_chain(to, from, how));
2138}
2139
2140/*
2141 * Defragment a mbuf chain, returning the shortest possible
2142 * chain of mbufs and clusters. If allocation fails and
2143 * this cannot be completed, NULL will be returned, but
2144 * the passed in chain will be unchanged. Upon success,
2145 * the original chain will be freed, and the new chain
2146 * will be returned.
2147 *
2148 * If a non-packet header is passed in, the original
2149 * mbuf (chain?) will be returned unharmed.
c8f5127a
JS
2150 *
2151 * m_defrag_nofree doesn't free the passed in mbuf.
984263bc
MD
2152 */
2153struct mbuf *
2154m_defrag(struct mbuf *m0, int how)
c8f5127a
JS
2155{
2156 struct mbuf *m_new;
2157
2158 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2159 return (NULL);
2160 if (m_new != m0)
2161 m_freem(m0);
2162 return (m_new);
2163}
2164
2165struct mbuf *
2166m_defrag_nofree(struct mbuf *m0, int how)
984263bc
MD
2167{
2168 struct mbuf *m_new = NULL, *m_final = NULL;
61721e90 2169 int progress = 0, length, nsize;
984263bc
MD
2170
2171 if (!(m0->m_flags & M_PKTHDR))
2172 return (m0);
2173
2174#ifdef MBUF_STRESS_TEST
2175 if (m_defragrandomfailures) {
0ced1954 2176 int temp = karc4random() & 0xff;
984263bc
MD
2177 if (temp == 0xba)
2178 goto nospace;
2179 }
2180#endif
2181
61721e90 2182 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
984263bc
MD
2183 if (m_final == NULL)
2184 goto nospace;
61721e90 2185 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
984263bc 2186
3641b7ca 2187 if (m_dup_pkthdr(m_final, m0, how) == 0)
984263bc
MD
2188 goto nospace;
2189
2190 m_new = m_final;
2191
2192 while (progress < m0->m_pkthdr.len) {
2193 length = m0->m_pkthdr.len - progress;
2194 if (length > MCLBYTES)
2195 length = MCLBYTES;
2196
2197 if (m_new == NULL) {
61721e90 2198 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
984263bc
MD
2199 if (m_new == NULL)
2200 goto nospace;
2201 }
2202
2203 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2204 progress += length;
2205 m_new->m_len = length;
2206 if (m_new != m_final)
2207 m_cat(m_final, m_new);
2208 m_new = NULL;
2209 }
2210 if (m0->m_next == NULL)
2211 m_defraguseless++;
984263bc 2212 m_defragpackets++;
c8f5127a
JS
2213 m_defragbytes += m_final->m_pkthdr.len;
2214 return (m_final);
984263bc
MD
2215nospace:
2216 m_defragfailure++;
2217 if (m_new)
2218 m_free(m_new);
61721e90 2219 m_freem(m_final);
984263bc
MD
2220 return (NULL);
2221}
0c33f36d
JH
2222
2223/*
2224 * Move data from uio into mbufs.
0c33f36d
JH
2225 */
2226struct mbuf *
e12241e1 2227m_uiomove(struct uio *uio)
0c33f36d 2228{
0c33f36d 2229 struct mbuf *m; /* current working mbuf */
e12241e1
JH
2230 struct mbuf *head = NULL; /* result mbuf chain */
2231 struct mbuf **mp = &head;
e54488bb
MD
2232 int flags = M_PKTHDR;
2233 int nsize;
2234 int error;
2235 int resid;
0c33f36d 2236
0c33f36d 2237 do {
e54488bb
MD
2238 if (uio->uio_resid > INT_MAX)
2239 resid = INT_MAX;
2240 else
2241 resid = (int)uio->uio_resid;
e12241e1 2242 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
61721e90
JH
2243 if (flags) {
2244 m->m_pkthdr.len = 0;
2245 /* Leave room for protocol headers. */
2246 if (resid < MHLEN)
2247 MH_ALIGN(m, resid);
2248 flags = 0;
0c33f36d 2249 }
e54488bb 2250 m->m_len = imin(nsize, resid);
61721e90 2251 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
0c33f36d
JH
2252 if (error) {
2253 m_free(m);
2254 goto failed;
2255 }
0c33f36d
JH
2256 *mp = m;
2257 mp = &m->m_next;
61721e90 2258 head->m_pkthdr.len += m->m_len;
e54488bb 2259 } while (uio->uio_resid > 0);
0c33f36d
JH
2260
2261 return (head);
2262
2263failed:
61721e90 2264 m_freem(head);
0c33f36d
JH
2265 return (NULL);
2266}
df80f2ea 2267
50503f0f
JH
2268struct mbuf *
2269m_last(struct mbuf *m)
2270{
2271 while (m->m_next)
2272 m = m->m_next;
2273 return (m);
2274}
2275
df80f2ea
JH
2276/*
2277 * Return the number of bytes in an mbuf chain.
2278 * If lastm is not NULL, also return the last mbuf.
2279 */
2280u_int
2281m_lengthm(struct mbuf *m, struct mbuf **lastm)
2282{
2283 u_int len = 0;
2284 struct mbuf *prev = m;
2285
2286 while (m) {
2287 len += m->m_len;
2288 prev = m;
2289 m = m->m_next;
2290 }
2291 if (lastm != NULL)
2292 *lastm = prev;
2293 return (len);
2294}
2295
2296/*
2297 * Like m_lengthm(), except also keep track of mbuf usage.
2298 */
2299u_int
2300m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2301{
2302 u_int len = 0, mbcnt = 0;
2303 struct mbuf *prev = m;
2304
2305 while (m) {
2306 len += m->m_len;
2307 mbcnt += MSIZE;
2308 if (m->m_flags & M_EXT)
2309 mbcnt += m->m_ext.ext_size;
2310 prev = m;
2311 m = m->m_next;
2312 }
2313 if (lastm != NULL)
2314 *lastm = prev;
2315 *pmbcnt = mbcnt;
2316 return (len);
2317}