ips - missing part from 667d31bb
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
5bd48c1d
MD
2 * (MPSAFE)
3 *
0c33f36d 4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jeffrey M. Hsu.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
66d6c637 36/*
984263bc
MD
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
8a3125c6 68 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc 69 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
3f98f485 70 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
984263bc
MD
71 */
72
73#include "opt_param.h"
74#include "opt_mbuf_stress_test.h"
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/malloc.h>
78#include <sys/mbuf.h>
79#include <sys/kernel.h>
80#include <sys/sysctl.h>
81#include <sys/domain.h>
7b6f875f 82#include <sys/objcache.h>
e9fa4b60 83#include <sys/tree.h>
984263bc 84#include <sys/protosw.h>
0c33f36d 85#include <sys/uio.h>
ef0fdad1 86#include <sys/thread.h>
a2a5ad0d 87#include <sys/globaldata.h>
5bd48c1d 88
90775e29 89#include <sys/thread2.h>
5bd48c1d 90#include <sys/spinlock2.h>
984263bc 91
1d16b2b5 92#include <machine/atomic.h>
e54488bb 93#include <machine/limits.h>
1d16b2b5 94
984263bc
MD
95#include <vm/vm.h>
96#include <vm/vm_kern.h>
97#include <vm/vm_extern.h>
98
99#ifdef INVARIANTS
100#include <machine/cpu.h>
101#endif
102
90775e29
MD
103/*
104 * mbuf cluster meta-data
105 */
7b6f875f 106struct mbcluster {
90775e29
MD
107 int32_t mcl_refs;
108 void *mcl_data;
7b6f875f 109};
90775e29 110
e9fa4b60
MD
111/*
112 * mbuf tracking for debugging purposes
113 */
114#ifdef MBUF_DEBUG
115
116static MALLOC_DEFINE(M_MTRACK, "mtrack", "mtrack");
117
118struct mbctrack;
119RB_HEAD(mbuf_rb_tree, mbtrack);
120RB_PROTOTYPE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *);
121
122struct mbtrack {
123 RB_ENTRY(mbtrack) rb_node;
124 int trackid;
125 struct mbuf *m;
126};
127
128static int
129mbtrack_cmp(struct mbtrack *mb1, struct mbtrack *mb2)
130{
131 if (mb1->m < mb2->m)
132 return(-1);
133 if (mb1->m > mb2->m)
134 return(1);
135 return(0);
136}
137
138RB_GENERATE2(mbuf_rb_tree, mbtrack, rb_node, mbtrack_cmp, struct mbuf *, m);
139
140struct mbuf_rb_tree mbuf_track_root;
5bd48c1d 141static struct spinlock mbuf_track_spin = SPINLOCK_INITIALIZER(mbuf_track_spin);
e9fa4b60
MD
142
143static void
144mbuftrack(struct mbuf *m)
145{
146 struct mbtrack *mbt;
147
e9fa4b60 148 mbt = kmalloc(sizeof(*mbt), M_MTRACK, M_INTWAIT|M_ZERO);
5bd48c1d 149 spin_lock(&mbuf_track_spin);
e9fa4b60 150 mbt->m = m;
5bd48c1d
MD
151 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root, mbt)) {
152 spin_unlock(&mbuf_track_spin);
e9fa4b60 153 panic("mbuftrack: mbuf %p already being tracked\n", m);
5bd48c1d
MD
154 }
155 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
156}
157
158static void
159mbufuntrack(struct mbuf *m)
160{
161 struct mbtrack *mbt;
162
5bd48c1d 163 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
164 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
165 if (mbt == NULL) {
5bd48c1d
MD
166 spin_unlock(&mbuf_track_spin);
167 panic("mbufuntrack: mbuf %p was not tracked\n", m);
e9fa4b60
MD
168 } else {
169 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root, mbt);
6cef7136 170 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
171 kfree(mbt, M_MTRACK);
172 }
e9fa4b60
MD
173}
174
175void
176mbuftrackid(struct mbuf *m, int trackid)
177{
178 struct mbtrack *mbt;
179 struct mbuf *n;
180
5bd48c1d 181 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
182 while (m) {
183 n = m->m_nextpkt;
184 while (m) {
185 mbt = mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root, m);
5bd48c1d
MD
186 if (mbt == NULL) {
187 spin_unlock(&mbuf_track_spin);
188 panic("mbuftrackid: mbuf %p not tracked", m);
189 }
190 mbt->trackid = trackid;
e9fa4b60
MD
191 m = m->m_next;
192 }
193 m = n;
194 }
5bd48c1d 195 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
196}
197
198static int
199mbuftrack_callback(struct mbtrack *mbt, void *arg)
200{
201 struct sysctl_req *req = arg;
202 char buf[64];
203 int error;
204
205 ksnprintf(buf, sizeof(buf), "mbuf %p track %d\n", mbt->m, mbt->trackid);
206
5bd48c1d 207 spin_unlock(&mbuf_track_spin);
e9fa4b60 208 error = SYSCTL_OUT(req, buf, strlen(buf));
5bd48c1d 209 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
210 if (error)
211 return(-error);
212 return(0);
213}
214
215static int
216mbuftrack_show(SYSCTL_HANDLER_ARGS)
217{
218 int error;
219
5bd48c1d 220 spin_lock(&mbuf_track_spin);
e9fa4b60
MD
221 error = mbuf_rb_tree_RB_SCAN(&mbuf_track_root, NULL,
222 mbuftrack_callback, req);
5bd48c1d 223 spin_unlock(&mbuf_track_spin);
e9fa4b60
MD
224 return (-error);
225}
226SYSCTL_PROC(_kern_ipc, OID_AUTO, showmbufs, CTLFLAG_RD|CTLTYPE_STRING,
227 0, 0, mbuftrack_show, "A", "Show all in-use mbufs");
228
229#else
230
231#define mbuftrack(m)
232#define mbufuntrack(m)
233
234#endif
235
7b6f875f 236static void mbinit(void *);
ba39e2e0 237SYSINIT(mbuf, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, mbinit, NULL)
984263bc 238
4c1e2509 239static u_long mbtypes[SMP_MAXCPU][MT_NTYPES];
90775e29 240
4c1e2509 241static struct mbstat mbstat[SMP_MAXCPU];
984263bc
MD
242int max_linkhdr;
243int max_protohdr;
244int max_hdr;
245int max_datalen;
246int m_defragpackets;
247int m_defragbytes;
248int m_defraguseless;
249int m_defragfailure;
250#ifdef MBUF_STRESS_TEST
251int m_defragrandomfailures;
252#endif
253
7b6f875f
JH
254struct objcache *mbuf_cache, *mbufphdr_cache;
255struct objcache *mclmeta_cache;
256struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
257
984263bc
MD
258int nmbclusters;
259int nmbufs;
984263bc 260
984263bc
MD
261SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
262 &max_linkhdr, 0, "");
263SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
264 &max_protohdr, 0, "");
265SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
266SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
267 &max_datalen, 0, "");
268SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
269 &mbuf_wait, 0, "");
4c1e2509
JT
270static int do_mbstat(SYSCTL_HANDLER_ARGS);
271
272SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat, CTLTYPE_STRUCT|CTLFLAG_RD,
273 0, 0, do_mbstat, "S,mbstat", "");
274
275static int do_mbtypes(SYSCTL_HANDLER_ARGS);
276
277SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtypes, CTLTYPE_ULONG|CTLFLAG_RD,
278 0, 0, do_mbtypes, "LU", "");
279
280static int
281do_mbstat(SYSCTL_HANDLER_ARGS)
282{
283 struct mbstat mbstat_total;
284 struct mbstat *mbstat_totalp;
285 int i;
286
287 bzero(&mbstat_total, sizeof(mbstat_total));
288 mbstat_totalp = &mbstat_total;
289
290 for (i = 0; i < ncpus; i++)
291 {
292 mbstat_total.m_mbufs += mbstat[i].m_mbufs;
293 mbstat_total.m_clusters += mbstat[i].m_clusters;
294 mbstat_total.m_spare += mbstat[i].m_spare;
295 mbstat_total.m_clfree += mbstat[i].m_clfree;
296 mbstat_total.m_drops += mbstat[i].m_drops;
297 mbstat_total.m_wait += mbstat[i].m_wait;
298 mbstat_total.m_drain += mbstat[i].m_drain;
299 mbstat_total.m_mcfail += mbstat[i].m_mcfail;
300 mbstat_total.m_mpfail += mbstat[i].m_mpfail;
301
302 }
303 /*
304 * The following fields are not cumulative fields so just
305 * get their values once.
306 */
307 mbstat_total.m_msize = mbstat[0].m_msize;
308 mbstat_total.m_mclbytes = mbstat[0].m_mclbytes;
309 mbstat_total.m_minclsize = mbstat[0].m_minclsize;
310 mbstat_total.m_mlen = mbstat[0].m_mlen;
311 mbstat_total.m_mhlen = mbstat[0].m_mhlen;
312
313 return(sysctl_handle_opaque(oidp, mbstat_totalp, sizeof(mbstat_total), req));
314}
315
316static int
317do_mbtypes(SYSCTL_HANDLER_ARGS)
318{
319 u_long totals[MT_NTYPES];
320 int i, j;
321
322 for (i = 0; i < MT_NTYPES; i++)
323 totals[i] = 0;
324
325 for (i = 0; i < ncpus; i++)
326 {
327 for (j = 0; j < MT_NTYPES; j++)
328 totals[j] += mbtypes[i][j];
329 }
330
331 return(sysctl_handle_opaque(oidp, totals, sizeof(totals), req));
332}
18c48b9c
MD
333
334/*
335 * These are read-only because we do not currently have any code
336 * to adjust the objcache limits after the fact. The variables
337 * may only be set as boot-time tunables.
338 */
339SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
984263bc 340 &nmbclusters, 0, "Maximum number of mbuf clusters available");
18c48b9c 341SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
984263bc 342 "Maximum number of mbufs available");
7b6f875f 343
984263bc
MD
344SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
345 &m_defragpackets, 0, "");
346SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
347 &m_defragbytes, 0, "");
348SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
349 &m_defraguseless, 0, "");
350SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
351 &m_defragfailure, 0, "");
352#ifdef MBUF_STRESS_TEST
353SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
354 &m_defragrandomfailures, 0, "");
355#endif
356
90775e29
MD
357static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
358static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
7b6f875f 359static MALLOC_DEFINE(M_MCLMETA, "mclmeta", "mclmeta");
90775e29
MD
360
361static void m_reclaim (void);
90775e29
MD
362static void m_mclref(void *arg);
363static void m_mclfree(void *arg);
984263bc
MD
364
365#ifndef NMBCLUSTERS
366#define NMBCLUSTERS (512 + maxusers * 16)
367#endif
368#ifndef NMBUFS
7b6f875f 369#define NMBUFS (nmbclusters * 2)
984263bc
MD
370#endif
371
372/*
373 * Perform sanity checks of tunables declared above.
374 */
375static void
376tunable_mbinit(void *dummy)
377{
984263bc
MD
378 /*
379 * This has to be done before VM init.
380 */
381 nmbclusters = NMBCLUSTERS;
382 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
383 nmbufs = NMBUFS;
384 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
385 /* Sanity checks */
386 if (nmbufs < nmbclusters * 2)
387 nmbufs = nmbclusters * 2;
984263bc 388}
ba39e2e0
MD
389SYSINIT(tunable_mbinit, SI_BOOT1_TUNABLES, SI_ORDER_ANY,
390 tunable_mbinit, NULL);
984263bc
MD
391
392/* "number of clusters of pages" */
393#define NCL_INIT 1
394
395#define NMB_INIT 16
396
7b6f875f
JH
397/*
398 * The mbuf object cache only guarantees that m_next and m_nextpkt are
399 * NULL and that m_data points to the beginning of the data area. In
400 * particular, m_len and m_pkthdr.len are uninitialized. It is the
401 * responsibility of the caller to initialize those fields before use.
402 */
403
404static boolean_t __inline
405mbuf_ctor(void *obj, void *private, int ocflags)
984263bc 406{
7b6f875f 407 struct mbuf *m = obj;
984263bc 408
7b6f875f
JH
409 m->m_next = NULL;
410 m->m_nextpkt = NULL;
411 m->m_data = m->m_dat;
412 m->m_flags = 0;
413
414 return (TRUE);
984263bc
MD
415}
416
417/*
7b6f875f 418 * Initialize the mbuf and the packet header fields.
984263bc 419 */
7b6f875f
JH
420static boolean_t
421mbufphdr_ctor(void *obj, void *private, int ocflags)
984263bc 422{
7b6f875f 423 struct mbuf *m = obj;
984263bc 424
7b6f875f
JH
425 m->m_next = NULL;
426 m->m_nextpkt = NULL;
427 m->m_data = m->m_pktdat;
77e294a1 428 m->m_flags = M_PKTHDR | M_PHCACHE;
984263bc 429
7b6f875f
JH
430 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
431 SLIST_INIT(&m->m_pkthdr.tags);
432 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
433 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
434
435 return (TRUE);
984263bc
MD
436}
437
438/*
7b6f875f 439 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
984263bc 440 */
7b6f875f
JH
441static boolean_t
442mclmeta_ctor(void *obj, void *private, int ocflags)
984263bc 443{
7b6f875f
JH
444 struct mbcluster *cl = obj;
445 void *buf;
446
447 if (ocflags & M_NOWAIT)
efda3bd0 448 buf = kmalloc(MCLBYTES, M_MBUFCL, M_NOWAIT | M_ZERO);
7b6f875f 449 else
efda3bd0 450 buf = kmalloc(MCLBYTES, M_MBUFCL, M_INTWAIT | M_ZERO);
7b6f875f
JH
451 if (buf == NULL)
452 return (FALSE);
77e294a1 453 cl->mcl_refs = 0;
7b6f875f
JH
454 cl->mcl_data = buf;
455 return (TRUE);
456}
984263bc 457
c3ef87ca
MD
458static void
459mclmeta_dtor(void *obj, void *private)
460{
461 struct mbcluster *mcl = obj;
462
463 KKASSERT(mcl->mcl_refs == 0);
efda3bd0 464 kfree(mcl->mcl_data, M_MBUFCL);
c3ef87ca
MD
465}
466
7b6f875f
JH
467static void
468linkcluster(struct mbuf *m, struct mbcluster *cl)
469{
984263bc 470 /*
7b6f875f
JH
471 * Add the cluster to the mbuf. The caller will detect that the
472 * mbuf now has an attached cluster.
984263bc 473 */
7b6f875f
JH
474 m->m_ext.ext_arg = cl;
475 m->m_ext.ext_buf = cl->mcl_data;
476 m->m_ext.ext_ref = m_mclref;
477 m->m_ext.ext_free = m_mclfree;
478 m->m_ext.ext_size = MCLBYTES;
df8d1020 479 atomic_add_int(&cl->mcl_refs, 1);
984263bc 480
7b6f875f
JH
481 m->m_data = m->m_ext.ext_buf;
482 m->m_flags |= M_EXT | M_EXT_CLUSTER;
984263bc
MD
483}
484
7b6f875f
JH
485static boolean_t
486mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
487{
488 struct mbuf *m = obj;
489 struct mbcluster *cl;
490
491 mbufphdr_ctor(obj, private, ocflags);
492 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
493 if (cl == NULL) {
494 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 495 return (FALSE);
a5955b15 496 }
77e294a1 497 m->m_flags |= M_CLCACHE;
7b6f875f
JH
498 linkcluster(m, cl);
499 return (TRUE);
500}
984263bc 501
7b6f875f
JH
502static boolean_t
503mbufcluster_ctor(void *obj, void *private, int ocflags)
984263bc 504{
7b6f875f
JH
505 struct mbuf *m = obj;
506 struct mbcluster *cl;
507
508 mbuf_ctor(obj, private, ocflags);
509 cl = objcache_get(mclmeta_cache, ocflags);
a5955b15
MD
510 if (cl == NULL) {
511 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 512 return (FALSE);
a5955b15 513 }
77e294a1 514 m->m_flags |= M_CLCACHE;
7b6f875f
JH
515 linkcluster(m, cl);
516 return (TRUE);
517}
984263bc 518
77e294a1
MD
519/*
520 * Used for both the cluster and cluster PHDR caches.
521 *
522 * The mbuf may have lost its cluster due to sharing, deal
523 * with the situation by checking M_EXT.
524 */
7b6f875f
JH
525static void
526mbufcluster_dtor(void *obj, void *private)
984263bc 527{
7b6f875f 528 struct mbuf *m = obj;
77e294a1 529 struct mbcluster *mcl;
984263bc 530
77e294a1
MD
531 if (m->m_flags & M_EXT) {
532 KKASSERT((m->m_flags & M_EXT_CLUSTER) != 0);
533 mcl = m->m_ext.ext_arg;
534 KKASSERT(mcl->mcl_refs == 1);
535 mcl->mcl_refs = 0;
536 objcache_put(mclmeta_cache, mcl);
537 }
984263bc
MD
538}
539
7b6f875f
JH
540struct objcache_malloc_args mbuf_malloc_args = { MSIZE, M_MBUF };
541struct objcache_malloc_args mclmeta_malloc_args =
542 { sizeof(struct mbcluster), M_MCLMETA };
543
544/* ARGSUSED*/
90775e29 545static void
7b6f875f 546mbinit(void *dummy)
984263bc 547{
6f21e2f4 548 int mb_limit, cl_limit;
0aa16b5d 549 int limit;
4c1e2509
JT
550 int i;
551
0aa16b5d
SZ
552 /*
553 * Initialize statistics
554 */
555 for (i = 0; i < ncpus; i++) {
4c1e2509
JT
556 atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
557 atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
558 atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
559 atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
560 atomic_set_long_nonlocked(&mbstat[i].m_mhlen, MHLEN);
561 }
984263bc 562
0aa16b5d
SZ
563 /*
564 * Create objtect caches and save cluster limits, which will
565 * be used to adjust backing kmalloc pools' limit later.
566 */
567
6f21e2f4 568 mb_limit = cl_limit = 0;
0aa16b5d
SZ
569
570 limit = nmbufs;
571 mbuf_cache = objcache_create("mbuf", &limit, 0,
5b7da64a 572 mbuf_ctor, NULL, NULL,
7b6f875f 573 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 574 mb_limit += limit;
0aa16b5d
SZ
575
576 limit = nmbufs;
577 mbufphdr_cache = objcache_create("mbuf pkt hdr", &limit, 64,
5b7da64a 578 mbufphdr_ctor, NULL, NULL,
7b6f875f 579 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 580 mb_limit += limit;
0aa16b5d
SZ
581
582 cl_limit = nmbclusters;
583 mclmeta_cache = objcache_create("cluster mbuf", &cl_limit, 0,
7b6f875f
JH
584 mclmeta_ctor, mclmeta_dtor, NULL,
585 objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args);
0aa16b5d
SZ
586
587 limit = nmbclusters;
588 mbufcluster_cache = objcache_create("mbuf + cluster", &limit, 0,
7b6f875f
JH
589 mbufcluster_ctor, mbufcluster_dtor, NULL,
590 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 591 mb_limit += limit;
0aa16b5d
SZ
592
593 limit = nmbclusters;
7b6f875f 594 mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster",
0aa16b5d 595 &limit, 64, mbufphdrcluster_ctor, mbufcluster_dtor, NULL,
7b6f875f 596 objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
6f21e2f4 597 mb_limit += limit;
0aa16b5d
SZ
598
599 /*
600 * Adjust backing kmalloc pools' limit
3f98f485
SZ
601 *
602 * NOTE: We raise the limit by another 1/8 to take the effect
603 * of loosememuse into account.
0aa16b5d 604 */
3f98f485 605 cl_limit += cl_limit / 8;
0aa16b5d
SZ
606 kmalloc_raise_limit(mclmeta_malloc_args.mtype,
607 mclmeta_malloc_args.objsize * cl_limit);
608 kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
609
3f98f485 610 mb_limit += mb_limit / 8;
0aa16b5d
SZ
611 kmalloc_raise_limit(mbuf_malloc_args.mtype,
612 mbuf_malloc_args.objsize * mb_limit);
90775e29 613}
984263bc 614
90775e29
MD
615/*
616 * Return the number of references to this mbuf's data. 0 is returned
617 * if the mbuf is not M_EXT, a reference count is returned if it is
7b6f875f 618 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
90775e29
MD
619 */
620int
621m_sharecount(struct mbuf *m)
622{
7b6f875f
JH
623 switch (m->m_flags & (M_EXT | M_EXT_CLUSTER)) {
624 case 0:
625 return (0);
626 case M_EXT:
627 return (99);
628 case M_EXT | M_EXT_CLUSTER:
629 return (((struct mbcluster *)m->m_ext.ext_arg)->mcl_refs);
630 }
631 /* NOTREACHED */
632 return (0); /* to shut up compiler */
90775e29
MD
633}
634
635/*
636 * change mbuf to new type
637 */
638void
639m_chtype(struct mbuf *m, int type)
640{
4c1e2509
JT
641 struct globaldata *gd = mycpu;
642
643 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
644 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
645 atomic_set_short_nonlocked(&m->m_type, type);
984263bc
MD
646}
647
984263bc 648static void
8a3125c6 649m_reclaim(void)
984263bc 650{
1fd87d54
RG
651 struct domain *dp;
652 struct protosw *pr;
984263bc 653
5bd48c1d
MD
654 kprintf("Debug: m_reclaim() called\n");
655
9c70fe43 656 SLIST_FOREACH(dp, &domains, dom_next) {
8a3125c6 657 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
658 if (pr->pr_drain)
659 (*pr->pr_drain)();
8a3125c6
MD
660 }
661 }
4c1e2509 662 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_drain, 1);
984263bc
MD
663}
664
7b6f875f
JH
665static void __inline
666updatestats(struct mbuf *m, int type)
667{
4c1e2509 668 struct globaldata *gd = mycpu;
7b6f875f 669
fcd1202a 670 m->m_type = type;
e9fa4b60 671 mbuftrack(m);
982f999d
MD
672#ifdef MBUF_DEBUG
673 KASSERT(m->m_next == NULL, ("mbuf %p: bad m_next in get", m));
674 KASSERT(m->m_nextpkt == NULL, ("mbuf %p: bad m_nextpkt in get", m));
675#endif
4c1e2509
JT
676
677 atomic_add_long_nonlocked(&mbtypes[gd->gd_cpuid][type], 1);
678 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
679
7b6f875f
JH
680}
681
984263bc 682/*
7b6f875f 683 * Allocate an mbuf.
984263bc
MD
684 */
685struct mbuf *
8a3125c6 686m_get(int how, int type)
984263bc 687{
12496bdf 688 struct mbuf *m;
7b6f875f
JH
689 int ntries = 0;
690 int ocf = MBTOM(how);
12496bdf 691
7b6f875f
JH
692retryonce:
693
694 m = objcache_get(mbuf_cache, ocf);
695
696 if (m == NULL) {
697 if ((how & MB_TRYWAIT) && ntries++ == 0) {
698 struct objcache *reclaimlist[] = {
699 mbufphdr_cache,
5bd48c1d
MD
700 mbufcluster_cache,
701 mbufphdrcluster_cache
7b6f875f
JH
702 };
703 const int nreclaims = __arysize(reclaimlist);
704
705 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
706 m_reclaim();
707 goto retryonce;
c6339e39 708 }
a5955b15 709 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 710 return (NULL);
12496bdf 711 }
982f999d
MD
712#ifdef MBUF_DEBUG
713 KASSERT(m->m_data == m->m_dat, ("mbuf %p: bad m_data in get", m));
714#endif
c6339e39 715
7b6f875f 716 updatestats(m, type);
984263bc
MD
717 return (m);
718}
719
720struct mbuf *
8a3125c6 721m_gethdr(int how, int type)
984263bc 722{
12496bdf 723 struct mbuf *m;
7b6f875f
JH
724 int ocf = MBTOM(how);
725 int ntries = 0;
12496bdf 726
7b6f875f
JH
727retryonce:
728
729 m = objcache_get(mbufphdr_cache, ocf);
730
731 if (m == NULL) {
732 if ((how & MB_TRYWAIT) && ntries++ == 0) {
733 struct objcache *reclaimlist[] = {
734 mbuf_cache,
735 mbufcluster_cache, mbufphdrcluster_cache
736 };
737 const int nreclaims = __arysize(reclaimlist);
738
739 if (!objcache_reclaimlist(reclaimlist, nreclaims, ocf))
740 m_reclaim();
741 goto retryonce;
c6339e39 742 }
a5955b15 743 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 744 return (NULL);
12496bdf 745 }
982f999d
MD
746#ifdef MBUF_DEBUG
747 KASSERT(m->m_data == m->m_pktdat, ("mbuf %p: bad m_data in get", m));
748#endif
c6339e39 749
7b6f875f 750 updatestats(m, type);
984263bc
MD
751 return (m);
752}
753
7b6f875f
JH
754/*
755 * Get a mbuf (not a mbuf cluster!) and zero it.
756 * Deprecated.
757 */
984263bc 758struct mbuf *
8a3125c6 759m_getclr(int how, int type)
984263bc 760{
1fd87d54 761 struct mbuf *m;
984263bc 762
7b6f875f
JH
763 m = m_get(how, type);
764 if (m != NULL)
765 bzero(m->m_data, MLEN);
984263bc
MD
766 return (m);
767}
768
769/*
7b6f875f 770 * Returns an mbuf with an attached cluster.
984263bc
MD
771 * Because many network drivers use this kind of buffers a lot, it is
772 * convenient to keep a small pool of free buffers of this kind.
773 * Even a small size such as 10 gives about 10% improvement in the
774 * forwarding rate in a bridge or router.
984263bc 775 */
984263bc
MD
776struct mbuf *
777m_getcl(int how, short type, int flags)
778{
7b6f875f
JH
779 struct mbuf *m;
780 int ocflags = MBTOM(how);
781 int ntries = 0;
984263bc 782
7b6f875f
JH
783retryonce:
784
785 if (flags & M_PKTHDR)
786 m = objcache_get(mbufphdrcluster_cache, ocflags);
787 else
788 m = objcache_get(mbufcluster_cache, ocflags);
789
790 if (m == NULL) {
791 if ((how & MB_TRYWAIT) && ntries++ == 0) {
792 struct objcache *reclaimlist[1];
793
794 if (flags & M_PKTHDR)
795 reclaimlist[0] = mbufcluster_cache;
796 else
797 reclaimlist[0] = mbufphdrcluster_cache;
798 if (!objcache_reclaimlist(reclaimlist, 1, ocflags))
799 m_reclaim();
800 goto retryonce;
984263bc 801 }
a5955b15 802 ++mbstat[mycpu->gd_cpuid].m_drops;
7b6f875f 803 return (NULL);
984263bc 804 }
7b6f875f 805
982f999d
MD
806#ifdef MBUF_DEBUG
807 KASSERT(m->m_data == m->m_ext.ext_buf,
808 ("mbuf %p: bad m_data in get", m));
809#endif
7b6f875f
JH
810 m->m_type = type;
811
e9fa4b60 812 mbuftrack(m);
4c1e2509
JT
813
814 atomic_add_long_nonlocked(&mbtypes[mycpu->gd_cpuid][type], 1);
815 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
7b6f875f 816 return (m);
984263bc
MD
817}
818
819/*
50503f0f
JH
820 * Allocate chain of requested length.
821 */
822struct mbuf *
823m_getc(int len, int how, int type)
824{
825 struct mbuf *n, *nfirst = NULL, **ntail = &nfirst;
826 int nsize;
827
828 while (len > 0) {
829 n = m_getl(len, how, type, 0, &nsize);
830 if (n == NULL)
831 goto failed;
832 n->m_len = 0;
833 *ntail = n;
834 ntail = &n->m_next;
835 len -= nsize;
836 }
837 return (nfirst);
838
839failed:
840 m_freem(nfirst);
841 return (NULL);
842}
843
844/*
845 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
846 * and return a pointer to the head of the allocated chain. If m0 is
984263bc
MD
847 * non-null, then we assume that it is a single mbuf or an mbuf chain to
848 * which we want len bytes worth of mbufs and/or clusters attached, and so
50503f0f 849 * if we succeed in allocating it, we will just return a pointer to m0.
984263bc
MD
850 *
851 * If we happen to fail at any point during the allocation, we will free
852 * up everything we have already allocated and return NULL.
853 *
50503f0f 854 * Deprecated. Use m_getc() and m_cat() instead.
984263bc
MD
855 */
856struct mbuf *
dc14b0a9 857m_getm(struct mbuf *m0, int len, int type, int how)
984263bc 858{
50503f0f 859 struct mbuf *nfirst;
984263bc 860
50503f0f 861 nfirst = m_getc(len, how, type);
984263bc 862
50503f0f
JH
863 if (m0 != NULL) {
864 m_last(m0)->m_next = nfirst;
865 return (m0);
984263bc
MD
866 }
867
50503f0f 868 return (nfirst);
984263bc
MD
869}
870
871/*
7b6f875f
JH
872 * Adds a cluster to a normal mbuf, M_EXT is set on success.
873 * Deprecated. Use m_getcl() instead.
b6650ec0 874 */
90775e29
MD
875void
876m_mclget(struct mbuf *m, int how)
b6650ec0 877{
7b6f875f 878 struct mbcluster *mcl;
b6650ec0 879
77e294a1 880 KKASSERT((m->m_flags & M_EXT) == 0);
7b6f875f 881 mcl = objcache_get(mclmeta_cache, MBTOM(how));
c3ef87ca
MD
882 if (mcl != NULL) {
883 linkcluster(m, mcl);
5bd48c1d
MD
884 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters,
885 1);
a5955b15
MD
886 } else {
887 ++mbstat[mycpu->gd_cpuid].m_drops;
c3ef87ca 888 }
b6650ec0
MD
889}
890
df8d1020
MD
891/*
892 * Updates to mbcluster must be MPSAFE. Only an entity which already has
893 * a reference to the cluster can ref it, so we are in no danger of
894 * racing an add with a subtract. But the operation must still be atomic
895 * since multiple entities may have a reference on the cluster.
896 *
897 * m_mclfree() is almost the same but it must contend with two entities
5bd48c1d 898 * freeing the cluster at the same time.
df8d1020 899 */
90775e29 900static void
7b6f875f 901m_mclref(void *arg)
b6650ec0 902{
7b6f875f 903 struct mbcluster *mcl = arg;
90775e29 904
7b6f875f 905 atomic_add_int(&mcl->mcl_refs, 1);
b6650ec0
MD
906}
907
1d16b2b5
MD
908/*
909 * When dereferencing a cluster we have to deal with a N->0 race, where
910 * N entities free their references simultaniously. To do this we use
dee87a60 911 * atomic_fetchadd_int().
1d16b2b5 912 */
90775e29 913static void
7b6f875f 914m_mclfree(void *arg)
b6650ec0 915{
7b6f875f 916 struct mbcluster *mcl = arg;
90775e29 917
dee87a60 918 if (atomic_fetchadd_int(&mcl->mcl_refs, -1) == 1)
77e294a1 919 objcache_put(mclmeta_cache, mcl);
b6650ec0
MD
920}
921
922/*
b6650ec0
MD
923 * Free a single mbuf and any associated external storage. The successor,
924 * if any, is returned.
984263bc 925 *
b6650ec0 926 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
927 * code does not call M_PREPEND properly.
928 * (example: call to bpf_mtap from drivers)
929 */
982f999d
MD
930
931#ifdef MBUF_DEBUG
932
933struct mbuf *
934_m_free(struct mbuf *m, const char *func)
935
936#else
937
984263bc 938struct mbuf *
b6650ec0 939m_free(struct mbuf *m)
982f999d
MD
940
941#endif
984263bc 942{
b6650ec0 943 struct mbuf *n;
4c1e2509 944 struct globaldata *gd = mycpu;
b6650ec0 945
361af367 946 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
f3f0fc49 947 KASSERT(M_TRAILINGSPACE(m) >= 0, ("overflowed mbuf %p", m));
4c1e2509 948 atomic_subtract_long_nonlocked(&mbtypes[gd->gd_cpuid][m->m_type], 1);
90775e29 949
7b6f875f 950 n = m->m_next;
90775e29
MD
951
952 /*
7b6f875f
JH
953 * Make sure the mbuf is in constructed state before returning it
954 * to the objcache.
90775e29 955 */
90775e29 956 m->m_next = NULL;
e9fa4b60 957 mbufuntrack(m);
982f999d
MD
958#ifdef MBUF_DEBUG
959 m->m_hdr.mh_lastfunc = func;
960#endif
7b6f875f
JH
961#ifdef notyet
962 KKASSERT(m->m_nextpkt == NULL);
963#else
964 if (m->m_nextpkt != NULL) {
7b6f875f
JH
965 static int afewtimes = 10;
966
967 if (afewtimes-- > 0) {
6ea70f76 968 kprintf("mfree: m->m_nextpkt != NULL\n");
7ce2998e 969 print_backtrace(-1);
90775e29 970 }
7b6f875f
JH
971 m->m_nextpkt = NULL;
972 }
973#endif
974 if (m->m_flags & M_PKTHDR) {
7b6f875f 975 m_tag_delete_chain(m); /* eliminate XXX JH */
77e294a1
MD
976 }
977
978 m->m_flags &= (M_EXT | M_EXT_CLUSTER | M_CLCACHE | M_PHCACHE);
979
980 /*
981 * Clean the M_PKTHDR state so we can return the mbuf to its original
982 * cache. This is based on the PHCACHE flag which tells us whether
983 * the mbuf was originally allocated out of a packet-header cache
984 * or a non-packet-header cache.
985 */
986 if (m->m_flags & M_PHCACHE) {
987 m->m_flags |= M_PKTHDR;
988 m->m_pkthdr.rcvif = NULL; /* eliminate XXX JH */
7b6f875f
JH
989 m->m_pkthdr.csum_flags = 0; /* eliminate XXX JH */
990 m->m_pkthdr.fw_flags = 0; /* eliminate XXX JH */
6b1d6bed 991 SLIST_INIT(&m->m_pkthdr.tags);
90775e29 992 }
7b6f875f 993
77e294a1
MD
994 /*
995 * Handle remaining flags combinations. M_CLCACHE tells us whether
996 * the mbuf was originally allocated from a cluster cache or not,
997 * and is totally separate from whether the mbuf is currently
998 * associated with a cluster.
999 */
77e294a1
MD
1000 switch(m->m_flags & (M_CLCACHE | M_EXT | M_EXT_CLUSTER)) {
1001 case M_CLCACHE | M_EXT | M_EXT_CLUSTER:
1002 /*
1003 * mbuf+cluster cache case. The mbuf was allocated from the
1004 * combined mbuf_cluster cache and can be returned to the
1005 * cache if the cluster hasn't been shared.
1006 */
1007 if (m_sharecount(m) == 1) {
1008 /*
1009 * The cluster has not been shared, we can just
1010 * reset the data pointer and return the mbuf
1011 * to the cluster cache. Note that the reference
1012 * count is left intact (it is still associated with
1013 * an mbuf).
1014 */
1015 m->m_data = m->m_ext.ext_buf;
1016 if (m->m_flags & M_PHCACHE)
1017 objcache_put(mbufphdrcluster_cache, m);
1018 else
1019 objcache_put(mbufcluster_cache, m);
4c1e2509 1020 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1021 } else {
1022 /*
1023 * Hell. Someone else has a ref on this cluster,
1024 * we have to disconnect it which means we can't
1025 * put it back into the mbufcluster_cache, we
1026 * have to destroy the mbuf.
1027 *
cb086467
MD
1028 * Other mbuf references to the cluster will typically
1029 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1030 *
77e294a1
MD
1031 * XXX we could try to connect another cluster to
1032 * it.
1033 */
7b6f875f
JH
1034 m->m_ext.ext_free(m->m_ext.ext_arg);
1035 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
77e294a1
MD
1036 if (m->m_flags & M_PHCACHE)
1037 objcache_dtor(mbufphdrcluster_cache, m);
1038 else
1039 objcache_dtor(mbufcluster_cache, m);
7b6f875f 1040 }
77e294a1
MD
1041 break;
1042 case M_EXT | M_EXT_CLUSTER:
1043 /*
1044 * Normal cluster associated with an mbuf that was allocated
1045 * from the normal mbuf pool rather then the cluster pool.
1046 * The cluster has to be independantly disassociated from the
1047 * mbuf.
1048 */
cb086467 1049 if (m_sharecount(m) == 1)
4c1e2509 1050 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_clusters, 1);
77e294a1
MD
1051 /* fall through */
1052 case M_EXT:
1053 /*
1054 * Normal cluster association case, disconnect the cluster from
1055 * the mbuf. The cluster may or may not be custom.
1056 */
1057 m->m_ext.ext_free(m->m_ext.ext_arg);
1058 m->m_flags &= ~(M_EXT | M_EXT_CLUSTER);
1059 /* fall through */
1060 case 0:
1061 /*
1062 * return the mbuf to the mbuf cache.
1063 */
1064 if (m->m_flags & M_PHCACHE) {
7b6f875f
JH
1065 m->m_data = m->m_pktdat;
1066 objcache_put(mbufphdr_cache, m);
90775e29 1067 } else {
7b6f875f
JH
1068 m->m_data = m->m_dat;
1069 objcache_put(mbuf_cache, m);
90775e29 1070 }
4c1e2509 1071 atomic_subtract_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mbufs, 1);
77e294a1
MD
1072 break;
1073 default:
1074 if (!panicstr)
1075 panic("bad mbuf flags %p %08x\n", m, m->m_flags);
1076 break;
b6650ec0 1077 }
984263bc
MD
1078 return (n);
1079}
1080
982f999d
MD
1081#ifdef MBUF_DEBUG
1082
1083void
1084_m_freem(struct mbuf *m, const char *func)
1085{
1086 while (m)
1087 m = _m_free(m, func);
1088}
1089
1090#else
1091
984263bc 1092void
b6650ec0 1093m_freem(struct mbuf *m)
984263bc 1094{
90775e29
MD
1095 while (m)
1096 m = m_free(m);
984263bc
MD
1097}
1098
982f999d
MD
1099#endif
1100
984263bc 1101/*
df80f2ea 1102 * mbuf utility routines
984263bc
MD
1103 */
1104
1105/*
7b6f875f 1106 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
984263bc
MD
1107 * copy junk along.
1108 */
1109struct mbuf *
8a3125c6 1110m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
1111{
1112 struct mbuf *mn;
1113
c3ef87ca
MD
1114 if (m->m_flags & M_PKTHDR)
1115 mn = m_gethdr(how, m->m_type);
1116 else
1117 mn = m_get(how, m->m_type);
7b6f875f 1118 if (mn == NULL) {
984263bc 1119 m_freem(m);
7b6f875f 1120 return (NULL);
984263bc
MD
1121 }
1122 if (m->m_flags & M_PKTHDR)
1123 M_MOVE_PKTHDR(mn, m);
1124 mn->m_next = m;
1125 m = mn;
1126 if (len < MHLEN)
1127 MH_ALIGN(m, len);
1128 m->m_len = len;
1129 return (m);
1130}
1131
1132/*
1133 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1134 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 1135 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
1136 * Note that the copy is read-only, because clusters are not copied,
1137 * only their reference counts are incremented.
1138 */
984263bc 1139struct mbuf *
8a3125c6 1140m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 1141{
1fd87d54
RG
1142 struct mbuf *n, **np;
1143 int off = off0;
984263bc
MD
1144 struct mbuf *top;
1145 int copyhdr = 0;
1146
1147 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1148 KASSERT(len >= 0, ("m_copym, negative len %d", len));
5bd48c1d 1149 if (off == 0 && (m->m_flags & M_PKTHDR))
984263bc
MD
1150 copyhdr = 1;
1151 while (off > 0) {
1152 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1153 if (off < m->m_len)
1154 break;
1155 off -= m->m_len;
1156 m = m->m_next;
1157 }
1158 np = &top;
5bd48c1d 1159 top = NULL;
984263bc 1160 while (len > 0) {
7b6f875f 1161 if (m == NULL) {
984263bc
MD
1162 KASSERT(len == M_COPYALL,
1163 ("m_copym, length > size of mbuf chain"));
1164 break;
1165 }
c3ef87ca
MD
1166 /*
1167 * Because we are sharing any cluster attachment below,
1168 * be sure to get an mbuf that does not have a cluster
1169 * associated with it.
1170 */
1171 if (copyhdr)
1172 n = m_gethdr(wait, m->m_type);
1173 else
1174 n = m_get(wait, m->m_type);
984263bc 1175 *np = n;
7b6f875f 1176 if (n == NULL)
984263bc
MD
1177 goto nospace;
1178 if (copyhdr) {
1179 if (!m_dup_pkthdr(n, m, wait))
1180 goto nospace;
1181 if (len == M_COPYALL)
1182 n->m_pkthdr.len -= off0;
1183 else
1184 n->m_pkthdr.len = len;
1185 copyhdr = 0;
1186 }
1187 n->m_len = min(len, m->m_len - off);
1188 if (m->m_flags & M_EXT) {
c3ef87ca 1189 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1190 n->m_data = m->m_data + off;
7b6f875f 1191 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1192 n->m_ext = m->m_ext;
b542cd49 1193 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
7eccf245 1194 } else {
984263bc
MD
1195 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1196 (unsigned)n->m_len);
7eccf245 1197 }
984263bc
MD
1198 if (len != M_COPYALL)
1199 len -= n->m_len;
1200 off = 0;
1201 m = m->m_next;
1202 np = &n->m_next;
1203 }
7b6f875f 1204 if (top == NULL)
4c1e2509 1205 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
984263bc
MD
1206 return (top);
1207nospace:
1208 m_freem(top);
4c1e2509 1209 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1210 return (NULL);
984263bc
MD
1211}
1212
1213/*
1214 * Copy an entire packet, including header (which must be present).
1215 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1216 * Note that the copy is read-only, because clusters are not copied,
1217 * only their reference counts are incremented.
1218 * Preserve alignment of the first mbuf so if the creator has left
1219 * some room at the beginning (e.g. for inserting protocol headers)
1220 * the copies also have the room available.
1221 */
1222struct mbuf *
8a3125c6 1223m_copypacket(struct mbuf *m, int how)
984263bc
MD
1224{
1225 struct mbuf *top, *n, *o;
1226
7f3602fe 1227 n = m_gethdr(how, m->m_type);
984263bc
MD
1228 top = n;
1229 if (!n)
1230 goto nospace;
1231
1232 if (!m_dup_pkthdr(n, m, how))
1233 goto nospace;
1234 n->m_len = m->m_len;
1235 if (m->m_flags & M_EXT) {
c3ef87ca 1236 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1237 n->m_data = m->m_data;
7b6f875f 1238 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1239 n->m_ext = m->m_ext;
b542cd49 1240 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1241 } else {
1242 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1243 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1244 }
1245
1246 m = m->m_next;
1247 while (m) {
7b6f875f 1248 o = m_get(how, m->m_type);
984263bc
MD
1249 if (!o)
1250 goto nospace;
1251
1252 n->m_next = o;
1253 n = n->m_next;
1254
1255 n->m_len = m->m_len;
1256 if (m->m_flags & M_EXT) {
c3ef87ca 1257 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1258 n->m_data = m->m_data;
7b6f875f 1259 m->m_ext.ext_ref(m->m_ext.ext_arg);
984263bc 1260 n->m_ext = m->m_ext;
b542cd49 1261 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1262 } else {
1263 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1264 }
1265
1266 m = m->m_next;
1267 }
1268 return top;
1269nospace:
1270 m_freem(top);
4c1e2509 1271 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1272 return (NULL);
984263bc
MD
1273}
1274
1275/*
1276 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1277 * continuing for "len" bytes, into the indicated buffer.
1278 */
1279void
8a3125c6 1280m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1281{
1fd87d54 1282 unsigned count;
984263bc
MD
1283
1284 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1285 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1286 while (off > 0) {
1287 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1288 if (off < m->m_len)
1289 break;
1290 off -= m->m_len;
1291 m = m->m_next;
1292 }
1293 while (len > 0) {
1294 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1295 count = min(m->m_len - off, len);
1296 bcopy(mtod(m, caddr_t) + off, cp, count);
1297 len -= count;
1298 cp += count;
1299 off = 0;
1300 m = m->m_next;
1301 }
1302}
1303
1304/*
1305 * Copy a packet header mbuf chain into a completely new chain, including
1306 * copying any mbuf clusters. Use this instead of m_copypacket() when
1307 * you need a writable copy of an mbuf chain.
1308 */
1309struct mbuf *
8a3125c6 1310m_dup(struct mbuf *m, int how)
984263bc
MD
1311{
1312 struct mbuf **p, *top = NULL;
1313 int remain, moff, nsize;
1314
1315 /* Sanity check */
1316 if (m == NULL)
50503f0f 1317 return (NULL);
5e2195bf 1318 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
984263bc
MD
1319
1320 /* While there's more data, get a new mbuf, tack it on, and fill it */
1321 remain = m->m_pkthdr.len;
1322 moff = 0;
1323 p = &top;
1324 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1325 struct mbuf *n;
1326
1327 /* Get the next new mbuf */
50503f0f
JH
1328 n = m_getl(remain, how, m->m_type, top == NULL ? M_PKTHDR : 0,
1329 &nsize);
984263bc
MD
1330 if (n == NULL)
1331 goto nospace;
50503f0f 1332 if (top == NULL)
984263bc 1333 if (!m_dup_pkthdr(n, m, how))
50503f0f 1334 goto nospace0;
984263bc
MD
1335
1336 /* Link it into the new chain */
1337 *p = n;
1338 p = &n->m_next;
1339
1340 /* Copy data from original mbuf(s) into new mbuf */
50503f0f 1341 n->m_len = 0;
984263bc
MD
1342 while (n->m_len < nsize && m != NULL) {
1343 int chunk = min(nsize - n->m_len, m->m_len - moff);
1344
1345 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1346 moff += chunk;
1347 n->m_len += chunk;
1348 remain -= chunk;
1349 if (moff == m->m_len) {
1350 m = m->m_next;
1351 moff = 0;
1352 }
1353 }
1354
1355 /* Check correct total mbuf length */
1356 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
50503f0f 1357 ("%s: bogus m_pkthdr.len", __func__));
984263bc
MD
1358 }
1359 return (top);
1360
1361nospace:
1362 m_freem(top);
50503f0f 1363nospace0:
4c1e2509 1364 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
50503f0f 1365 return (NULL);
984263bc
MD
1366}
1367
3bf6fec3
MD
1368/*
1369 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1370 * copying any mbuf clusters. This is typically used to realign a data
1371 * chain by nfs_realign().
1372 *
1373 * The original chain is left intact. how should be MB_WAIT or MB_DONTWAIT
1374 * and NULL can be returned if MB_DONTWAIT is passed.
1375 *
1376 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1377 * cluster mbufs can exhaust our supply of mbufs.
1378 */
1379struct mbuf *
1380m_dup_data(struct mbuf *m, int how)
1381{
1382 struct mbuf **p, *n, *top = NULL;
1383 int mlen, moff, chunk, gsize, nsize;
1384
1385 /*
1386 * Degenerate case
1387 */
1388 if (m == NULL)
1389 return (NULL);
1390
1391 /*
1392 * Optimize the mbuf allocation but do not get too carried away.
1393 */
1394 if (m->m_next || m->m_len > MLEN)
1395 gsize = MCLBYTES;
1396 else
1397 gsize = MLEN;
1398
1399 /* Chain control */
1400 p = &top;
1401 n = NULL;
1402 nsize = 0;
1403
1404 /*
1405 * Scan the mbuf chain until nothing is left, the new mbuf chain
1406 * will be allocated on the fly as needed.
1407 */
1408 while (m) {
1409 mlen = m->m_len;
1410 moff = 0;
1411
1412 while (mlen) {
1413 KKASSERT(m->m_type == MT_DATA);
1414 if (n == NULL) {
1415 n = m_getl(gsize, how, MT_DATA, 0, &nsize);
1416 n->m_len = 0;
1417 if (n == NULL)
1418 goto nospace;
1419 *p = n;
1420 p = &n->m_next;
1421 }
1422 chunk = imin(mlen, nsize);
1423 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1424 mlen -= chunk;
1425 moff += chunk;
1426 n->m_len += chunk;
1427 nsize -= chunk;
1428 if (nsize == 0)
1429 n = NULL;
1430 }
1431 m = m->m_next;
1432 }
1433 *p = NULL;
1434 return(top);
1435nospace:
1436 *p = NULL;
1437 m_freem(top);
1438 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
1439 return (NULL);
1440}
1441
984263bc
MD
1442/*
1443 * Concatenate mbuf chain n to m.
1444 * Both chains must be of the same type (e.g. MT_DATA).
1445 * Any m_pkthdr is not updated.
1446 */
1447void
8a3125c6 1448m_cat(struct mbuf *m, struct mbuf *n)
984263bc 1449{
50503f0f 1450 m = m_last(m);
984263bc
MD
1451 while (n) {
1452 if (m->m_flags & M_EXT ||
1453 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1454 /* just join the two chains */
1455 m->m_next = n;
1456 return;
1457 }
1458 /* splat the data from one into the other */
1459 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1460 (u_int)n->m_len);
1461 m->m_len += n->m_len;
1462 n = m_free(n);
1463 }
1464}
1465
1466void
8a3125c6 1467m_adj(struct mbuf *mp, int req_len)
984263bc 1468{
1fd87d54
RG
1469 int len = req_len;
1470 struct mbuf *m;
1471 int count;
984263bc
MD
1472
1473 if ((m = mp) == NULL)
1474 return;
1475 if (len >= 0) {
1476 /*
1477 * Trim from head.
1478 */
1479 while (m != NULL && len > 0) {
1480 if (m->m_len <= len) {
1481 len -= m->m_len;
1482 m->m_len = 0;
1483 m = m->m_next;
1484 } else {
1485 m->m_len -= len;
1486 m->m_data += len;
1487 len = 0;
1488 }
1489 }
1490 m = mp;
1491 if (mp->m_flags & M_PKTHDR)
1492 m->m_pkthdr.len -= (req_len - len);
1493 } else {
1494 /*
1495 * Trim from tail. Scan the mbuf chain,
1496 * calculating its length and finding the last mbuf.
1497 * If the adjustment only affects this mbuf, then just
1498 * adjust and return. Otherwise, rescan and truncate
1499 * after the remaining size.
1500 */
1501 len = -len;
1502 count = 0;
1503 for (;;) {
1504 count += m->m_len;
60233e58 1505 if (m->m_next == NULL)
984263bc
MD
1506 break;
1507 m = m->m_next;
1508 }
1509 if (m->m_len >= len) {
1510 m->m_len -= len;
1511 if (mp->m_flags & M_PKTHDR)
1512 mp->m_pkthdr.len -= len;
1513 return;
1514 }
1515 count -= len;
1516 if (count < 0)
1517 count = 0;
1518 /*
1519 * Correct length for chain is "count".
1520 * Find the mbuf with last data, adjust its length,
1521 * and toss data from remaining mbufs on chain.
1522 */
1523 m = mp;
1524 if (m->m_flags & M_PKTHDR)
1525 m->m_pkthdr.len = count;
1526 for (; m; m = m->m_next) {
1527 if (m->m_len >= count) {
1528 m->m_len = count;
1529 break;
1530 }
1531 count -= m->m_len;
1532 }
1533 while (m->m_next)
1534 (m = m->m_next) ->m_len = 0;
1535 }
1536}
1537
a3768f58
RP
1538/*
1539 * Set the m_data pointer of a newly-allocated mbuf
1540 * to place an object of the specified size at the
1541 * end of the mbuf, longword aligned.
1542 */
1543void
1544m_align(struct mbuf *m, int len)
1545{
1546 int adjust;
1547
1548 if (m->m_flags & M_EXT)
1549 adjust = m->m_ext.ext_size - len;
1550 else if (m->m_flags & M_PKTHDR)
1551 adjust = MHLEN - len;
1552 else
1553 adjust = MLEN - len;
1554 m->m_data += adjust &~ (sizeof(long)-1);
1555}
1556
984263bc 1557/*
7b6f875f 1558 * Rearrange an mbuf chain so that len bytes are contiguous
9e4465af
MD
1559 * and in the data area of an mbuf (so that mtod will work for a structure
1560 * of size len). Returns the resulting mbuf chain on success, frees it and
1561 * returns null on failure. If there is room, it will add up to
1562 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1563 * avoid being called next time.
984263bc 1564 */
984263bc 1565struct mbuf *
8a3125c6 1566m_pullup(struct mbuf *n, int len)
984263bc 1567{
1fd87d54
RG
1568 struct mbuf *m;
1569 int count;
984263bc
MD
1570 int space;
1571
1572 /*
1573 * If first mbuf has no cluster, and has room for len bytes
1574 * without shifting current data, pullup into it,
1575 * otherwise allocate a new mbuf to prepend to the chain.
1576 */
7b6f875f
JH
1577 if (!(n->m_flags & M_EXT) &&
1578 n->m_data + len < &n->m_dat[MLEN] &&
1579 n->m_next) {
984263bc
MD
1580 if (n->m_len >= len)
1581 return (n);
1582 m = n;
1583 n = n->m_next;
1584 len -= m->m_len;
1585 } else {
1586 if (len > MHLEN)
1587 goto bad;
c3ef87ca
MD
1588 if (n->m_flags & M_PKTHDR)
1589 m = m_gethdr(MB_DONTWAIT, n->m_type);
1590 else
1591 m = m_get(MB_DONTWAIT, n->m_type);
7b6f875f 1592 if (m == NULL)
984263bc
MD
1593 goto bad;
1594 m->m_len = 0;
1595 if (n->m_flags & M_PKTHDR)
1596 M_MOVE_PKTHDR(m, n);
1597 }
1598 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1599 do {
1600 count = min(min(max(len, max_protohdr), space), n->m_len);
1601 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1602 (unsigned)count);
1603 len -= count;
1604 m->m_len += count;
1605 n->m_len -= count;
1606 space -= count;
1607 if (n->m_len)
1608 n->m_data += count;
1609 else
1610 n = m_free(n);
1611 } while (len > 0 && n);
1612 if (len > 0) {
7b6f875f 1613 m_free(m);
984263bc
MD
1614 goto bad;
1615 }
1616 m->m_next = n;
1617 return (m);
1618bad:
1619 m_freem(n);
4c1e2509 1620 atomic_add_long_nonlocked(&mbstat[mycpu->gd_cpuid].m_mcfail, 1);
7b6f875f 1621 return (NULL);
984263bc
MD
1622}
1623
1624/*
1625 * Partition an mbuf chain in two pieces, returning the tail --
1626 * all but the first len0 bytes. In case of failure, it returns NULL and
1627 * attempts to restore the chain to its original state.
1628 *
1629 * Note that the resulting mbufs might be read-only, because the new
1630 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1631 * the "breaking point" happens to lie within a cluster mbuf. Use the
1632 * M_WRITABLE() macro to check for this case.
1633 */
1634struct mbuf *
8a3125c6 1635m_split(struct mbuf *m0, int len0, int wait)
984263bc 1636{
1fd87d54 1637 struct mbuf *m, *n;
984263bc
MD
1638 unsigned len = len0, remain;
1639
1640 for (m = m0; m && len > m->m_len; m = m->m_next)
1641 len -= m->m_len;
7b6f875f
JH
1642 if (m == NULL)
1643 return (NULL);
984263bc
MD
1644 remain = m->m_len - len;
1645 if (m0->m_flags & M_PKTHDR) {
7b6f875f
JH
1646 n = m_gethdr(wait, m0->m_type);
1647 if (n == NULL)
1648 return (NULL);
984263bc
MD
1649 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1650 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1651 m0->m_pkthdr.len = len0;
1652 if (m->m_flags & M_EXT)
1653 goto extpacket;
1654 if (remain > MHLEN) {
1655 /* m can't be the lead packet */
1656 MH_ALIGN(n, 0);
1657 n->m_next = m_split(m, len, wait);
7b6f875f
JH
1658 if (n->m_next == NULL) {
1659 m_free(n);
1660 return (NULL);
984263bc
MD
1661 } else {
1662 n->m_len = 0;
1663 return (n);
1664 }
1665 } else
1666 MH_ALIGN(n, remain);
1667 } else if (remain == 0) {
1668 n = m->m_next;
1669 m->m_next = 0;
1670 return (n);
1671 } else {
7b6f875f
JH
1672 n = m_get(wait, m->m_type);
1673 if (n == NULL)
1674 return (NULL);
984263bc
MD
1675 M_ALIGN(n, remain);
1676 }
1677extpacket:
1678 if (m->m_flags & M_EXT) {
c3ef87ca 1679 KKASSERT((n->m_flags & M_EXT) == 0);
984263bc 1680 n->m_data = m->m_data + len;
7b6f875f 1681 m->m_ext.ext_ref(m->m_ext.ext_arg);
7eccf245 1682 n->m_ext = m->m_ext;
b542cd49 1683 n->m_flags |= m->m_flags & (M_EXT | M_EXT_CLUSTER);
984263bc
MD
1684 } else {
1685 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1686 }
1687 n->m_len = remain;
1688 m->m_len = len;
1689 n->m_next = m->m_next;
1690 m->m_next = 0;
1691 return (n);
1692}
50503f0f 1693
984263bc
MD
1694/*
1695 * Routine to copy from device local memory into mbufs.
50503f0f 1696 * Note: "offset" is ill-defined and always called as 0, so ignore it.
984263bc
MD
1697 */
1698struct mbuf *
50503f0f
JH
1699m_devget(char *buf, int len, int offset, struct ifnet *ifp,
1700 void (*copy)(volatile const void *from, volatile void *to, size_t length))
984263bc 1701{
50503f0f
JH
1702 struct mbuf *m, *mfirst = NULL, **mtail;
1703 int nsize, flags;
1704
1705 if (copy == NULL)
1706 copy = bcopy;
1707 mtail = &mfirst;
1708 flags = M_PKTHDR;
1709
1710 while (len > 0) {
1711 m = m_getl(len, MB_DONTWAIT, MT_DATA, flags, &nsize);
1712 if (m == NULL) {
1713 m_freem(mfirst);
1714 return (NULL);
984263bc 1715 }
50503f0f
JH
1716 m->m_len = min(len, nsize);
1717
1718 if (flags & M_PKTHDR) {
1719 if (len + max_linkhdr <= nsize)
1720 m->m_data += max_linkhdr;
1721 m->m_pkthdr.rcvif = ifp;
1722 m->m_pkthdr.len = len;
1723 flags = 0;
984263bc 1724 }
50503f0f
JH
1725
1726 copy(buf, m->m_data, (unsigned)m->m_len);
1727 buf += m->m_len;
1728 len -= m->m_len;
1729 *mtail = m;
1730 mtail = &m->m_next;
984263bc 1731 }
50503f0f
JH
1732
1733 return (mfirst);
984263bc
MD
1734}
1735
cf12ba3c
SZ
1736/*
1737 * Routine to pad mbuf to the specified length 'padto'.
1738 */
1739int
1740m_devpad(struct mbuf *m, int padto)
1741{
1742 struct mbuf *last = NULL;
1743 int padlen;
1744
1745 if (padto <= m->m_pkthdr.len)
1746 return 0;
1747
1748 padlen = padto - m->m_pkthdr.len;
1749
1750 /* if there's only the packet-header and we can pad there, use it. */
1751 if (m->m_pkthdr.len == m->m_len && M_TRAILINGSPACE(m) >= padlen) {
1752 last = m;
1753 } else {
1754 /*
1755 * Walk packet chain to find last mbuf. We will either
1756 * pad there, or append a new mbuf and pad it
1757 */
1758 for (last = m; last->m_next != NULL; last = last->m_next)
1759 ; /* EMPTY */
1760
1761 /* `last' now points to last in chain. */
1762 if (M_TRAILINGSPACE(last) < padlen) {
1763 struct mbuf *n;
1764
1765 /* Allocate new empty mbuf, pad it. Compact later. */
1766 MGET(n, MB_DONTWAIT, MT_DATA);
1767 if (n == NULL)
1768 return ENOBUFS;
1769 n->m_len = 0;
1770 last->m_next = n;
1771 last = n;
1772 }
1773 }
1774 KKASSERT(M_TRAILINGSPACE(last) >= padlen);
1775 KKASSERT(M_WRITABLE(last));
1776
1777 /* Now zero the pad area */
1778 bzero(mtod(last, char *) + last->m_len, padlen);
1779 last->m_len += padlen;
1780 m->m_pkthdr.len += padlen;
1781 return 0;
1782}
1783
984263bc
MD
1784/*
1785 * Copy data from a buffer back into the indicated mbuf chain,
1786 * starting "off" bytes from the beginning, extending the mbuf
1787 * chain if necessary.
1788 */
1789void
8a3125c6 1790m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1791{
1fd87d54
RG
1792 int mlen;
1793 struct mbuf *m = m0, *n;
984263bc
MD
1794 int totlen = 0;
1795
7b6f875f 1796 if (m0 == NULL)
984263bc
MD
1797 return;
1798 while (off > (mlen = m->m_len)) {
1799 off -= mlen;
1800 totlen += mlen;
7b6f875f 1801 if (m->m_next == NULL) {
74f1caca 1802 n = m_getclr(MB_DONTWAIT, m->m_type);
7b6f875f 1803 if (n == NULL)
984263bc
MD
1804 goto out;
1805 n->m_len = min(MLEN, len + off);
1806 m->m_next = n;
1807 }
1808 m = m->m_next;
1809 }
1810 while (len > 0) {
1811 mlen = min (m->m_len - off, len);
1812 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1813 cp += mlen;
1814 len -= mlen;
1815 mlen += off;
1816 off = 0;
1817 totlen += mlen;
1818 if (len == 0)
1819 break;
7b6f875f 1820 if (m->m_next == NULL) {
74f1caca 1821 n = m_get(MB_DONTWAIT, m->m_type);
7b6f875f 1822 if (n == NULL)
984263bc
MD
1823 break;
1824 n->m_len = min(MLEN, len);
1825 m->m_next = n;
1826 }
1827 m = m->m_next;
1828 }
1829out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1830 m->m_pkthdr.len = totlen;
1831}
1832
bf2cc98c
RP
1833/*
1834 * Append the specified data to the indicated mbuf chain,
1835 * Extend the mbuf chain if the new data does not fit in
1836 * existing space.
1837 *
1838 * Return 1 if able to complete the job; otherwise 0.
1839 */
1840int
1841m_append(struct mbuf *m0, int len, c_caddr_t cp)
1842{
1843 struct mbuf *m, *n;
1844 int remainder, space;
1845
1846 for (m = m0; m->m_next != NULL; m = m->m_next)
1847 ;
1848 remainder = len;
1849 space = M_TRAILINGSPACE(m);
1850 if (space > 0) {
1851 /*
1852 * Copy into available space.
1853 */
1854 if (space > remainder)
1855 space = remainder;
1856 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1857 m->m_len += space;
1858 cp += space, remainder -= space;
1859 }
1860 while (remainder > 0) {
1861 /*
1862 * Allocate a new mbuf; could check space
1863 * and allocate a cluster instead.
1864 */
1865 n = m_get(MB_DONTWAIT, m->m_type);
1866 if (n == NULL)
1867 break;
1868 n->m_len = min(MLEN, remainder);
1869 bcopy(cp, mtod(n, caddr_t), n->m_len);
1870 cp += n->m_len, remainder -= n->m_len;
1871 m->m_next = n;
1872 m = n;
1873 }
1874 if (m0->m_flags & M_PKTHDR)
1875 m0->m_pkthdr.len += len - remainder;
1876 return (remainder == 0);
1877}
1878
920c9f10
AH
1879/*
1880 * Apply function f to the data in an mbuf chain starting "off" bytes from
1881 * the beginning, continuing for "len" bytes.
1882 */
1883int
1884m_apply(struct mbuf *m, int off, int len,
1885 int (*f)(void *, void *, u_int), void *arg)
1886{
1887 u_int count;
1888 int rval;
1889
1890 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1891 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1892 while (off > 0) {
1893 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1894 if (off < m->m_len)
1895 break;
1896 off -= m->m_len;
1897 m = m->m_next;
1898 }
1899 while (len > 0) {
1900 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1901 count = min(m->m_len - off, len);
1902 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1903 if (rval)
1904 return (rval);
1905 len -= count;
1906 off = 0;
1907 m = m->m_next;
1908 }
1909 return (0);
1910}
1911
1912/*
1913 * Return a pointer to mbuf/offset of location in mbuf chain.
1914 */
1915struct mbuf *
1916m_getptr(struct mbuf *m, int loc, int *off)
1917{
1918
1919 while (loc >= 0) {
1920 /* Normal end of search. */
1921 if (m->m_len > loc) {
1922 *off = loc;
1923 return (m);
1924 } else {
1925 loc -= m->m_len;
1926 if (m->m_next == NULL) {
1927 if (loc == 0) {
1928 /* Point at the end of valid data. */
1929 *off = m->m_len;
1930 return (m);
1931 }
1932 return (NULL);
1933 }
1934 m = m->m_next;
1935 }
1936 }
1937 return (NULL);
1938}
1939
984263bc
MD
1940void
1941m_print(const struct mbuf *m)
1942{
1943 int len;
1944 const struct mbuf *m2;
1945
1946 len = m->m_pkthdr.len;
1947 m2 = m;
1948 while (len) {
6ea70f76 1949 kprintf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
984263bc
MD
1950 len -= m2->m_len;
1951 m2 = m2->m_next;
1952 }
1953 return;
1954}
1955
1956/*
1957 * "Move" mbuf pkthdr from "from" to "to".
1958 * "from" must have M_PKTHDR set, and "to" must be empty.
1959 */
1960void
1961m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1962{
e0d05288 1963 KASSERT((to->m_flags & M_PKTHDR), ("m_move_pkthdr: not packet header"));
984263bc 1964
77e294a1 1965 to->m_flags |= from->m_flags & M_COPYFLAGS;
984263bc
MD
1966 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1967 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
984263bc
MD
1968}
1969
1970/*
1971 * Duplicate "from"'s mbuf pkthdr in "to".
1972 * "from" must have M_PKTHDR set, and "to" must be empty.
1973 * In particular, this does a deep copy of the packet tags.
1974 */
1975int
f15db79e 1976m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc 1977{
7f3602fe
JH
1978 KASSERT((to->m_flags & M_PKTHDR), ("m_dup_pkthdr: not packet header"));
1979
4bac35fc 1980 to->m_flags = (from->m_flags & M_COPYFLAGS) |
c4da22e4 1981 (to->m_flags & ~M_COPYFLAGS);
984263bc
MD
1982 to->m_pkthdr = from->m_pkthdr;
1983 SLIST_INIT(&to->m_pkthdr.tags);
1984 return (m_tag_copy_chain(to, from, how));
1985}
1986
1987/*
1988 * Defragment a mbuf chain, returning the shortest possible
1989 * chain of mbufs and clusters. If allocation fails and
1990 * this cannot be completed, NULL will be returned, but
1991 * the passed in chain will be unchanged. Upon success,
1992 * the original chain will be freed, and the new chain
1993 * will be returned.
1994 *
1995 * If a non-packet header is passed in, the original
1996 * mbuf (chain?) will be returned unharmed.
c8f5127a
JS
1997 *
1998 * m_defrag_nofree doesn't free the passed in mbuf.
984263bc
MD
1999 */
2000struct mbuf *
2001m_defrag(struct mbuf *m0, int how)
c8f5127a
JS
2002{
2003 struct mbuf *m_new;
2004
2005 if ((m_new = m_defrag_nofree(m0, how)) == NULL)
2006 return (NULL);
2007 if (m_new != m0)
2008 m_freem(m0);
2009 return (m_new);
2010}
2011
2012struct mbuf *
2013m_defrag_nofree(struct mbuf *m0, int how)
984263bc
MD
2014{
2015 struct mbuf *m_new = NULL, *m_final = NULL;
61721e90 2016 int progress = 0, length, nsize;
984263bc
MD
2017
2018 if (!(m0->m_flags & M_PKTHDR))
2019 return (m0);
2020
2021#ifdef MBUF_STRESS_TEST
2022 if (m_defragrandomfailures) {
0ced1954 2023 int temp = karc4random() & 0xff;
984263bc
MD
2024 if (temp == 0xba)
2025 goto nospace;
2026 }
2027#endif
2028
61721e90 2029 m_final = m_getl(m0->m_pkthdr.len, how, MT_DATA, M_PKTHDR, &nsize);
984263bc
MD
2030 if (m_final == NULL)
2031 goto nospace;
61721e90 2032 m_final->m_len = 0; /* in case m0->m_pkthdr.len is zero */
984263bc 2033
3641b7ca 2034 if (m_dup_pkthdr(m_final, m0, how) == 0)
984263bc
MD
2035 goto nospace;
2036
2037 m_new = m_final;
2038
2039 while (progress < m0->m_pkthdr.len) {
2040 length = m0->m_pkthdr.len - progress;
2041 if (length > MCLBYTES)
2042 length = MCLBYTES;
2043
2044 if (m_new == NULL) {
61721e90 2045 m_new = m_getl(length, how, MT_DATA, 0, &nsize);
984263bc
MD
2046 if (m_new == NULL)
2047 goto nospace;
2048 }
2049
2050 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
2051 progress += length;
2052 m_new->m_len = length;
2053 if (m_new != m_final)
2054 m_cat(m_final, m_new);
2055 m_new = NULL;
2056 }
2057 if (m0->m_next == NULL)
2058 m_defraguseless++;
984263bc 2059 m_defragpackets++;
c8f5127a
JS
2060 m_defragbytes += m_final->m_pkthdr.len;
2061 return (m_final);
984263bc
MD
2062nospace:
2063 m_defragfailure++;
2064 if (m_new)
2065 m_free(m_new);
61721e90 2066 m_freem(m_final);
984263bc
MD
2067 return (NULL);
2068}
0c33f36d
JH
2069
2070/*
2071 * Move data from uio into mbufs.
0c33f36d
JH
2072 */
2073struct mbuf *
e12241e1 2074m_uiomove(struct uio *uio)
0c33f36d 2075{
0c33f36d 2076 struct mbuf *m; /* current working mbuf */
e12241e1
JH
2077 struct mbuf *head = NULL; /* result mbuf chain */
2078 struct mbuf **mp = &head;
e54488bb
MD
2079 int flags = M_PKTHDR;
2080 int nsize;
2081 int error;
2082 int resid;
0c33f36d 2083
0c33f36d 2084 do {
e54488bb
MD
2085 if (uio->uio_resid > INT_MAX)
2086 resid = INT_MAX;
2087 else
2088 resid = (int)uio->uio_resid;
e12241e1 2089 m = m_getl(resid, MB_WAIT, MT_DATA, flags, &nsize);
61721e90
JH
2090 if (flags) {
2091 m->m_pkthdr.len = 0;
2092 /* Leave room for protocol headers. */
2093 if (resid < MHLEN)
2094 MH_ALIGN(m, resid);
2095 flags = 0;
0c33f36d 2096 }
e54488bb 2097 m->m_len = imin(nsize, resid);
61721e90 2098 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
0c33f36d
JH
2099 if (error) {
2100 m_free(m);
2101 goto failed;
2102 }
0c33f36d
JH
2103 *mp = m;
2104 mp = &m->m_next;
61721e90 2105 head->m_pkthdr.len += m->m_len;
e54488bb 2106 } while (uio->uio_resid > 0);
0c33f36d
JH
2107
2108 return (head);
2109
2110failed:
61721e90 2111 m_freem(head);
0c33f36d
JH
2112 return (NULL);
2113}
df80f2ea 2114
50503f0f
JH
2115struct mbuf *
2116m_last(struct mbuf *m)
2117{
2118 while (m->m_next)
2119 m = m->m_next;
2120 return (m);
2121}
2122
df80f2ea
JH
2123/*
2124 * Return the number of bytes in an mbuf chain.
2125 * If lastm is not NULL, also return the last mbuf.
2126 */
2127u_int
2128m_lengthm(struct mbuf *m, struct mbuf **lastm)
2129{
2130 u_int len = 0;
2131 struct mbuf *prev = m;
2132
2133 while (m) {
2134 len += m->m_len;
2135 prev = m;
2136 m = m->m_next;
2137 }
2138 if (lastm != NULL)
2139 *lastm = prev;
2140 return (len);
2141}
2142
2143/*
2144 * Like m_lengthm(), except also keep track of mbuf usage.
2145 */
2146u_int
2147m_countm(struct mbuf *m, struct mbuf **lastm, u_int *pmbcnt)
2148{
2149 u_int len = 0, mbcnt = 0;
2150 struct mbuf *prev = m;
2151
2152 while (m) {
2153 len += m->m_len;
2154 mbcnt += MSIZE;
2155 if (m->m_flags & M_EXT)
2156 mbcnt += m->m_ext.ext_size;
2157 prev = m;
2158 m = m->m_next;
2159 }
2160 if (lastm != NULL)
2161 *lastm = prev;
2162 *pmbcnt = mbcnt;
2163 return (len);
2164}