Change sendfile() to use the new m_ext callback scheme for cleaning up after
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
0c33f36d 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
36 *
37 * License terms: all terms for the DragonFly license above plus the following:
38 *
39 * 4. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 *
42 * This product includes software developed by Jeffrey M. Hsu
43 * for the DragonFly Project.
44 *
45 * This requirement may be waived with permission from Jeffrey Hsu.
46 * This requirement will sunset and may be removed on July 8 2005,
47 * after which the standard DragonFly license (as shown above) will
48 * apply.
49 */
50
51/*
984263bc
MD
52 * Copyright (c) 1982, 1986, 1988, 1991, 1993
53 * The Regents of the University of California. All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. All advertising materials mentioning features or use of this software
64 * must display the following acknowledgement:
65 * This product includes software developed by the University of
66 * California, Berkeley and its contributors.
67 * 4. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
70 *
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 * SUCH DAMAGE.
82 *
8a3125c6 83 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc 84 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
7eccf245 85 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.22 2004/07/29 08:46:21 dillon Exp $
984263bc
MD
86 */
87
88#include "opt_param.h"
89#include "opt_mbuf_stress_test.h"
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/malloc.h>
93#include <sys/mbuf.h>
94#include <sys/kernel.h>
95#include <sys/sysctl.h>
96#include <sys/domain.h>
97#include <sys/protosw.h>
0c33f36d 98#include <sys/uio.h>
ef0fdad1 99#include <sys/thread.h>
a2a5ad0d 100#include <sys/globaldata.h>
984263bc
MD
101
102#include <vm/vm.h>
103#include <vm/vm_kern.h>
104#include <vm/vm_extern.h>
105
106#ifdef INVARIANTS
107#include <machine/cpu.h>
108#endif
109
402ed7e1 110static void mbinit (void *);
984263bc
MD
111SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
112
113struct mbuf *mbutl;
cb840899 114struct mbuf *mbute;
984263bc
MD
115char *mclrefcnt;
116struct mbstat mbstat;
117u_long mbtypes[MT_NTYPES];
118struct mbuf *mmbfree;
119union mcluster *mclfree;
120int max_linkhdr;
121int max_protohdr;
122int max_hdr;
123int max_datalen;
124int m_defragpackets;
125int m_defragbytes;
126int m_defraguseless;
127int m_defragfailure;
128#ifdef MBUF_STRESS_TEST
129int m_defragrandomfailures;
130#endif
131
132int nmbclusters;
133int nmbufs;
134u_int m_mballoc_wid = 0;
135u_int m_clalloc_wid = 0;
136
984263bc
MD
137SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
138 &max_linkhdr, 0, "");
139SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
140 &max_protohdr, 0, "");
141SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
142SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
143 &max_datalen, 0, "");
144SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
145 &mbuf_wait, 0, "");
146SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
147SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
148 sizeof(mbtypes), "LU", "");
149SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
150 &nmbclusters, 0, "Maximum number of mbuf clusters available");
151SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
152 "Maximum number of mbufs available");
153SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
154 &m_defragpackets, 0, "");
155SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
156 &m_defragbytes, 0, "");
157SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
158 &m_defraguseless, 0, "");
159SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
160 &m_defragfailure, 0, "");
161#ifdef MBUF_STRESS_TEST
162SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
163 &m_defragrandomfailures, 0, "");
164#endif
165
402ed7e1 166static void m_reclaim (void);
984263bc
MD
167
168#ifndef NMBCLUSTERS
169#define NMBCLUSTERS (512 + maxusers * 16)
170#endif
171#ifndef NMBUFS
172#define NMBUFS (nmbclusters * 4)
173#endif
174
175/*
176 * Perform sanity checks of tunables declared above.
177 */
178static void
179tunable_mbinit(void *dummy)
180{
181
182 /*
183 * This has to be done before VM init.
184 */
185 nmbclusters = NMBCLUSTERS;
186 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
187 nmbufs = NMBUFS;
188 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
189 /* Sanity checks */
190 if (nmbufs < nmbclusters * 2)
191 nmbufs = nmbclusters * 2;
192
193 return;
194}
195SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
196
197/* "number of clusters of pages" */
198#define NCL_INIT 1
199
200#define NMB_INIT 16
201
202/* ARGSUSED*/
203static void
8a3125c6 204mbinit(void *dummy)
984263bc
MD
205{
206 int s;
207
208 mmbfree = NULL; mclfree = NULL;
209 mbstat.m_msize = MSIZE;
210 mbstat.m_mclbytes = MCLBYTES;
211 mbstat.m_minclsize = MINCLSIZE;
212 mbstat.m_mlen = MLEN;
213 mbstat.m_mhlen = MHLEN;
214
215 s = splimp();
74f1caca 216 if (m_mballoc(NMB_INIT, MB_DONTWAIT) == 0)
984263bc
MD
217 goto bad;
218#if MCLBYTES <= PAGE_SIZE
74f1caca 219 if (m_clalloc(NCL_INIT, MB_DONTWAIT) == 0)
984263bc
MD
220 goto bad;
221#else
222 /* It's OK to call contigmalloc in this context. */
74f1caca 223 if (m_clalloc(16, MB_WAIT) == 0)
984263bc
MD
224 goto bad;
225#endif
226 splx(s);
227 return;
228bad:
229 panic("mbinit");
230}
231
232/*
233 * Allocate at least nmb mbufs and place on mbuf free list.
234 * Must be called at splimp.
235 */
236/* ARGSUSED */
237int
8a3125c6 238m_mballoc(int nmb, int how)
984263bc 239{
1fd87d54
RG
240 caddr_t p;
241 int i;
984263bc
MD
242 int nbytes;
243
244 /*
245 * If we've hit the mbuf limit, stop allocating from mb_map,
246 * (or trying to) in order to avoid dipping into the section of
247 * mb_map which we've "reserved" for clusters.
248 */
249 if ((nmb + mbstat.m_mbufs) > nmbufs)
250 return (0);
251
252 /*
253 * Once we run out of map space, it will be impossible to get
254 * any more (nothing is ever freed back to the map)
255 * -- however you are not dead as m_reclaim might
256 * still be able to free a substantial amount of space.
257 *
258 * XXX Furthermore, we can also work with "recycled" mbufs (when
74f1caca 259 * we're calling with MB_WAIT the sleep procedure will be woken
984263bc
MD
260 * up when an mbuf is freed. See m_mballoc_wait()).
261 */
262 if (mb_map_full)
263 return (0);
264
265 nbytes = round_page(nmb * MSIZE);
266 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
74f1caca 267 if (p == 0 && how == MB_WAIT) {
984263bc
MD
268 mbstat.m_wait++;
269 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
270 }
271
272 /*
273 * Either the map is now full, or `how' is M_NOWAIT and there
274 * are no pages left.
275 */
276 if (p == NULL)
277 return (0);
278
279 nmb = nbytes / MSIZE;
280 for (i = 0; i < nmb; i++) {
281 ((struct mbuf *)p)->m_next = mmbfree;
282 mmbfree = (struct mbuf *)p;
283 p += MSIZE;
284 }
285 mbstat.m_mbufs += nmb;
286 mbtypes[MT_FREE] += nmb;
287 return (1);
288}
289
290/*
291 * Once the mb_map has been exhausted and if the call to the allocation macros
74f1caca 292 * (or, in some cases, functions) is with MB_WAIT, then it is necessary to rely
984263bc
MD
293 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
294 * designated (mbuf_wait) time.
295 */
296struct mbuf *
297m_mballoc_wait(int caller, int type)
298{
299 struct mbuf *p;
300 int s;
301
302 s = splimp();
303 m_mballoc_wid++;
377d4740 304 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
305 m_mballoc_wid--;
306 splx(s);
307
308 /*
309 * Now that we (think) that we've got something, we will redo an
310 * MGET, but avoid getting into another instance of m_mballoc_wait()
311 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
312 * this way, purposely, in the [unlikely] case that an mbuf was
313 * freed but the sleep was not awakened in time.
314 */
315 p = NULL;
316 switch (caller) {
317 case MGET_C:
74f1caca 318 MGET(p, MB_DONTWAIT, type);
984263bc
MD
319 break;
320 case MGETHDR_C:
74f1caca 321 MGETHDR(p, MB_DONTWAIT, type);
984263bc
MD
322 break;
323 default:
324 panic("m_mballoc_wait: invalid caller (%d)", caller);
325 }
326
327 s = splimp();
328 if (p != NULL) { /* We waited and got something... */
329 mbstat.m_wait++;
330 /* Wake up another if we have more free. */
331 if (mmbfree != NULL)
332 MMBWAKEUP();
333 }
334 splx(s);
335 return (p);
336}
337
338#if MCLBYTES > PAGE_SIZE
339static int i_want_my_mcl;
340
341static void
342kproc_mclalloc(void)
343{
344 int status;
345
346 while (1) {
377d4740 347 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
984263bc
MD
348
349 for (; i_want_my_mcl; i_want_my_mcl--) {
74f1caca 350 if (m_clalloc(1, MB_WAIT) == 0)
984263bc
MD
351 printf("m_clalloc failed even in process context!\n");
352 }
353 }
354}
355
bc6dffab 356static struct thread *mclallocthread;
984263bc
MD
357static struct kproc_desc mclalloc_kp = {
358 "mclalloc",
359 kproc_mclalloc,
bc6dffab 360 &mclallocthread
984263bc 361};
bc6dffab 362SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
984263bc
MD
363 &mclalloc_kp);
364#endif
365
366/*
367 * Allocate some number of mbuf clusters
368 * and place on cluster free list.
369 * Must be called at splimp.
370 */
371/* ARGSUSED */
372int
8a3125c6 373m_clalloc(int ncl, int how)
984263bc 374{
1fd87d54
RG
375 caddr_t p;
376 int i;
984263bc
MD
377 int npg;
378
379 /*
380 * If we've hit the mcluster number limit, stop allocating from
381 * mb_map, (or trying to) in order to avoid dipping into the section
382 * of mb_map which we've "reserved" for mbufs.
383 */
384 if ((ncl + mbstat.m_clusters) > nmbclusters)
385 goto m_clalloc_fail;
386
387 /*
388 * Once we run out of map space, it will be impossible
389 * to get any more (nothing is ever freed back to the
390 * map). From this point on, we solely rely on freed
391 * mclusters.
392 */
393 if (mb_map_full)
394 goto m_clalloc_fail;
395
396#if MCLBYTES > PAGE_SIZE
74f1caca 397 if (how != MB_WAIT) {
984263bc
MD
398 i_want_my_mcl += ncl;
399 wakeup(&i_want_my_mcl);
400 mbstat.m_wait++;
401 p = 0;
402 } else {
7fa7744b 403 p = contigmalloc_map(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
984263bc
MD
404 ~0ul, PAGE_SIZE, 0, mb_map);
405 }
406#else
407 npg = ncl;
408 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
74f1caca 409 how != MB_WAIT ? M_NOWAIT : M_WAITOK);
984263bc
MD
410 ncl = ncl * PAGE_SIZE / MCLBYTES;
411#endif
412 /*
413 * Either the map is now full, or `how' is M_NOWAIT and there
414 * are no pages left.
415 */
416 if (p == NULL) {
417 static int last_report ; /* when we did that (in ticks) */
418m_clalloc_fail:
419 mbstat.m_drops++;
420 if (ticks < last_report || (ticks - last_report) >= hz) {
421 last_report = ticks;
422 printf("All mbuf clusters exhausted, please see tuning(7).\n");
423 }
424 return (0);
425 }
426
427 for (i = 0; i < ncl; i++) {
428 ((union mcluster *)p)->mcl_next = mclfree;
429 mclfree = (union mcluster *)p;
430 p += MCLBYTES;
431 mbstat.m_clfree++;
432 }
433 mbstat.m_clusters += ncl;
434 return (1);
435}
436
437/*
438 * Once the mb_map submap has been exhausted and the allocation is called with
74f1caca 439 * MB_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
984263bc
MD
440 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
441 * due to sudden mcluster availability.
442 */
443caddr_t
444m_clalloc_wait(void)
445{
446 caddr_t p;
447 int s;
448
984263bc 449 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
ef0fdad1 450 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
451
452 /* Sleep until something's available or until we expire. */
453 m_clalloc_wid++;
377d4740 454 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
455 m_clalloc_wid--;
456
457 /*
458 * Now that we (think) that we've got something, we will redo and
459 * MGET, but avoid getting into another instance of m_clalloc_wait()
460 */
74f1caca 461 p = m_mclalloc(MB_DONTWAIT);
984263bc
MD
462
463 s = splimp();
464 if (p != NULL) { /* We waited and got something... */
465 mbstat.m_wait++;
466 /* Wake up another if we have more free. */
467 if (mclfree != NULL)
468 MCLWAKEUP();
469 }
470
471 splx(s);
472 return (p);
473}
474
475/*
476 * When MGET fails, ask protocols to free space when short of memory,
477 * then re-attempt to allocate an mbuf.
478 */
479struct mbuf *
8a3125c6 480m_retry(int i, int t)
984263bc 481{
12496bdf
MD
482 struct mbuf *m;
483 int ms;
984263bc
MD
484
485 /*
486 * Must only do the reclaim if not in an interrupt context.
487 */
74f1caca 488 if (i == MB_WAIT) {
ef0fdad1 489 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 490 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
491 m_reclaim();
492 }
493
12496bdf
MD
494 ms = splimp();
495 if (mmbfree == NULL)
496 (void)m_mballoc(1, i);
497 m = mmbfree;
498 if (m != NULL) {
499 mmbfree = m->m_next;
500 mbtypes[MT_FREE]--;
501 m->m_type = t;
502 mbtypes[t]++;
503 m->m_next = NULL;
504 m->m_nextpkt = NULL;
505 m->m_data = m->m_dat;
506 m->m_flags = 0;
507 splx(ms);
984263bc 508 mbstat.m_wait++;
12496bdf 509 } else {
984263bc 510 static int last_report ; /* when we did that (in ticks) */
377d4740 511
12496bdf 512 splx(ms);
984263bc
MD
513 mbstat.m_drops++;
514 if (ticks < last_report || (ticks - last_report) >= hz) {
515 last_report = ticks;
516 printf("All mbufs exhausted, please see tuning(7).\n");
517 }
518 }
519
520 return (m);
521}
522
523/*
524 * As above; retry an MGETHDR.
525 */
526struct mbuf *
8a3125c6 527m_retryhdr(int i, int t)
984263bc 528{
12496bdf
MD
529 struct mbuf *m;
530 int ms;
984263bc
MD
531
532 /*
533 * Must only do the reclaim if not in an interrupt context.
534 */
74f1caca 535 if (i == MB_WAIT) {
ef0fdad1 536 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 537 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
538 m_reclaim();
539 }
540
12496bdf
MD
541 ms = splimp();
542 if (mmbfree == NULL)
543 (void)m_mballoc(1, i);
544 m = mmbfree;
545 if (m != NULL) {
546 mmbfree = m->m_next;
547 mbtypes[MT_FREE]--;
548 m->m_type = t;
549 mbtypes[t]++;
550 m->m_next = NULL;
551 m->m_nextpkt = NULL;
552 m->m_data = m->m_pktdat;
553 m->m_flags = M_PKTHDR;
554 m->m_pkthdr.rcvif = NULL;
555 SLIST_INIT(&m->m_pkthdr.tags);
556 m->m_pkthdr.csum_flags = 0;
557 splx(ms);
984263bc 558 mbstat.m_wait++;
12496bdf 559 } else {
984263bc 560 static int last_report ; /* when we did that (in ticks) */
12496bdf
MD
561
562 splx(ms);
984263bc
MD
563 mbstat.m_drops++;
564 if (ticks < last_report || (ticks - last_report) >= hz) {
565 last_report = ticks;
566 printf("All mbufs exhausted, please see tuning(7).\n");
567 }
568 }
569
570 return (m);
571}
572
573static void
8a3125c6 574m_reclaim(void)
984263bc 575{
1fd87d54
RG
576 struct domain *dp;
577 struct protosw *pr;
984263bc
MD
578 int s = splimp();
579
8a3125c6
MD
580 for (dp = domains; dp; dp = dp->dom_next) {
581 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
582 if (pr->pr_drain)
583 (*pr->pr_drain)();
8a3125c6
MD
584 }
585 }
984263bc
MD
586 splx(s);
587 mbstat.m_drain++;
588}
589
590/*
591 * Space allocation routines.
592 * These are also available as macros
593 * for critical paths.
594 */
595struct mbuf *
8a3125c6 596m_get(int how, int type)
984263bc 597{
12496bdf
MD
598 struct mbuf *m;
599 int ms;
600
601 ms = splimp();
602 if (mmbfree == NULL)
603 (void)m_mballoc(1, how);
604 m = mmbfree;
605 if (m != NULL) {
606 mmbfree = m->m_next;
607 mbtypes[MT_FREE]--;
608 m->m_type = type;
609 mbtypes[type]++;
610 m->m_next = NULL;
611 m->m_nextpkt = NULL;
612 m->m_data = m->m_dat;
613 m->m_flags = 0;
614 splx(ms);
615 } else {
616 splx(ms);
617 m = m_retry(how, type);
74f1caca 618 if (m == NULL && how == MB_WAIT)
12496bdf
MD
619 m = m_mballoc_wait(MGET_C, type);
620 }
984263bc
MD
621 return (m);
622}
623
624struct mbuf *
8a3125c6 625m_gethdr(int how, int type)
984263bc 626{
12496bdf
MD
627 struct mbuf *m;
628 int ms;
629
630 ms = splimp();
631 if (mmbfree == NULL)
632 (void)m_mballoc(1, how);
633 m = mmbfree;
634 if (m != NULL) {
635 mmbfree = m->m_next;
636 mbtypes[MT_FREE]--;
637 m->m_type = type;
638 mbtypes[type]++;
639 m->m_next = NULL;
640 m->m_nextpkt = NULL;
641 m->m_data = m->m_pktdat;
642 m->m_flags = M_PKTHDR;
643 m->m_pkthdr.rcvif = NULL;
644 SLIST_INIT(&m->m_pkthdr.tags);
645 m->m_pkthdr.csum_flags = 0;
646 splx(ms);
647 } else {
648 splx(ms);
649 m = m_retryhdr(how, type);
74f1caca 650 if (m == NULL && how == MB_WAIT)
12496bdf
MD
651 m = m_mballoc_wait(MGETHDR_C, type);
652 }
984263bc
MD
653 return (m);
654}
655
656struct mbuf *
8a3125c6 657m_getclr(int how, int type)
984263bc 658{
1fd87d54 659 struct mbuf *m;
984263bc
MD
660
661 MGET(m, how, type);
662 if (m == 0)
663 return (0);
664 bzero(mtod(m, caddr_t), MLEN);
665 return (m);
666}
667
668/*
669 * m_getcl() returns an mbuf with an attached cluster.
670 * Because many network drivers use this kind of buffers a lot, it is
671 * convenient to keep a small pool of free buffers of this kind.
672 * Even a small size such as 10 gives about 10% improvement in the
673 * forwarding rate in a bridge or router.
674 * The size of this free list is controlled by the sysctl variable
675 * mcl_pool_max. The list is populated on m_freem(), and used in
676 * m_getcl() if elements are available.
677 */
678static struct mbuf *mcl_pool;
679static int mcl_pool_now;
a3c3073c 680static int mcl_pool_max = 10;
984263bc
MD
681
682SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
683 "Maximum number of mbufs+cluster in free list");
684SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
685 "Current number of mbufs+cluster in free list");
686
687struct mbuf *
688m_getcl(int how, short type, int flags)
689{
690 int s = splimp();
691 struct mbuf *mp;
692
693 if (flags & M_PKTHDR) {
694 if (type == MT_DATA && mcl_pool) {
695 mp = mcl_pool;
696 mcl_pool = mp->m_nextpkt;
697 mcl_pool_now--;
698 splx(s);
699 mp->m_nextpkt = NULL;
700 mp->m_data = mp->m_ext.ext_buf;
701 mp->m_flags = M_PKTHDR|M_EXT;
702 mp->m_pkthdr.rcvif = NULL;
703 mp->m_pkthdr.csum_flags = 0;
704 return mp;
705 } else
706 MGETHDR(mp, how, type);
707 } else
708 MGET(mp, how, type);
709 if (mp) {
710 MCLGET(mp, how);
711 if ( (mp->m_flags & M_EXT) == 0) {
712 m_free(mp);
713 mp = NULL;
714 }
715 }
716 splx(s);
717 return mp;
718}
719
720/*
721 * struct mbuf *
722 * m_getm(m, len, how, type)
723 *
724 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
725 * best) and return a pointer to the top of the allocated chain. If m is
726 * non-null, then we assume that it is a single mbuf or an mbuf chain to
727 * which we want len bytes worth of mbufs and/or clusters attached, and so
728 * if we succeed in allocating it, we will just return a pointer to m.
729 *
730 * If we happen to fail at any point during the allocation, we will free
731 * up everything we have already allocated and return NULL.
732 *
733 */
734struct mbuf *
735m_getm(struct mbuf *m, int len, int how, int type)
736{
737 struct mbuf *top, *tail, *mp, *mtail = NULL;
738
739 KASSERT(len >= 0, ("len is < 0 in m_getm"));
740
741 MGET(mp, how, type);
742 if (mp == NULL)
743 return (NULL);
744 else if (len > MINCLSIZE) {
745 MCLGET(mp, how);
746 if ((mp->m_flags & M_EXT) == 0) {
747 m_free(mp);
748 return (NULL);
749 }
750 }
751 mp->m_len = 0;
752 len -= M_TRAILINGSPACE(mp);
753
754 if (m != NULL)
755 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
756 else
757 m = mp;
758
759 top = tail = mp;
760 while (len > 0) {
761 MGET(mp, how, type);
762 if (mp == NULL)
763 goto failed;
764
765 tail->m_next = mp;
766 tail = mp;
767 if (len > MINCLSIZE) {
768 MCLGET(mp, how);
769 if ((mp->m_flags & M_EXT) == 0)
770 goto failed;
771 }
772
773 mp->m_len = 0;
774 len -= M_TRAILINGSPACE(mp);
775 }
776
777 if (mtail != NULL)
778 mtail->m_next = top;
779 return (m);
780
781failed:
782 m_freem(top);
783 return (NULL);
784}
785
786/*
b6650ec0
MD
787 * m_mclalloc() - Allocates an mbuf cluster.
788 */
789caddr_t
790m_mclalloc(int how)
791{
792 caddr_t mp;
793 int s;
794
795 s = splimp();
796
797 if (mclfree == NULL)
798 m_clalloc(1, how);
799 mp = (caddr_t)mclfree;
800 if (mp != NULL) {
cb840899
MD
801 KKASSERT((struct mbuf *)mp >= mbutl &&
802 (struct mbuf *)mp < mbute);
b6650ec0
MD
803 mclrefcnt[mtocl(mp)]++;
804 mbstat.m_clfree--;
805 mclfree = ((union mcluster *)mp)->mcl_next;
806 splx(s);
807 return(mp);
808 }
809 splx(s);
74f1caca 810 if (how == MB_WAIT)
b6650ec0
MD
811 return(m_clalloc_wait());
812 return(NULL);
813}
814
815/*
816 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
817 */
818void
819m_mclget(struct mbuf *m, int how)
820{
821 m->m_ext.ext_buf = m_mclalloc(how);
822 if (m->m_ext.ext_buf != NULL) {
823 m->m_data = m->m_ext.ext_buf;
824 m->m_flags |= M_EXT;
7eccf245
MD
825 KKASSERT((m->m_flags & M_EXT_OLD) == 0);
826 m->m_ext.ext_nfree.any = NULL;
827 m->m_ext.ext_nref.any = NULL;
b6650ec0
MD
828 m->m_ext.ext_size = MCLBYTES;
829 }
830}
831
832static __inline void
833_m_mclfree(caddr_t data)
834{
835 union mcluster *mp = (union mcluster *)data;
836
837 KASSERT(mclrefcnt[mtocl(mp)] > 0, ("freeing free cluster"));
cb840899
MD
838 KKASSERT((struct mbuf *)mp >= mbutl &&
839 (struct mbuf *)mp < mbute);
b6650ec0
MD
840 if (--mclrefcnt[mtocl(mp)] == 0) {
841 mp->mcl_next = mclfree;
842 mclfree = mp;
843 mbstat.m_clfree++;
844 MCLWAKEUP();
845 }
846}
847
848void
849m_mclfree(caddr_t mp)
850{
851 int s = splimp();
852 _m_mclfree(mp);
853 splx(s);
854}
855
7eccf245
MD
856/*
857 * Helper routines for M_EXT reference/free
858 */
859static __inline void
860m_extref(const struct mbuf *m)
861{
862 if (m->m_flags & M_EXT_OLD) {
863 if (m->m_ext.ext_nref.old == NULL) {
864 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
865 } else {
866 int s = splimp();
867 (*m->m_ext.ext_nref.old)(m->m_ext.ext_buf,
868 m->m_ext.ext_size);
869 splx(s);
870 }
871 } else {
872 if (m->m_ext.ext_nref.new == NULL) {
873 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
874 } else {
875 int s = splimp();
876 (*m->m_ext.ext_nref.new)(m->m_ext.ext_arg);
877 splx(s);
878 }
879 }
880}
881
882static __inline void
883m_extfree(struct mbuf *m)
884{
885 if (m->m_flags & M_EXT_OLD) {
886 if (m->m_ext.ext_nfree.old != NULL) {
887 m->m_ext.ext_nfree.old(m->m_ext.ext_buf,
888 m->m_ext.ext_size);
889 } else {
890 _m_mclfree(m->m_ext.ext_buf);
891 }
892 } else {
893 if (m->m_ext.ext_nfree.new != NULL) {
894 m->m_ext.ext_nfree.new(m->m_ext.ext_arg);
895 } else {
896 _m_mclfree(m->m_ext.ext_buf);
897 }
898 }
899}
900
901
b6650ec0
MD
902/*
903 * m_free()
904 *
905 * Free a single mbuf and any associated external storage. The successor,
906 * if any, is returned.
984263bc 907 *
b6650ec0 908 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
909 * code does not call M_PREPEND properly.
910 * (example: call to bpf_mtap from drivers)
911 */
984263bc 912struct mbuf *
b6650ec0 913m_free(struct mbuf *m)
984263bc 914{
b6650ec0
MD
915 int s;
916 struct mbuf *n;
917
918 s = splimp();
919 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf"));
920 mbtypes[m->m_type]--;
921 if ((m->m_flags & M_PKTHDR) != 0)
922 m_tag_delete_chain(m, NULL);
923 if (m->m_flags & M_EXT) {
7eccf245 924 m_extfree(m);
b6650ec0
MD
925 }
926 n = m->m_next;
927 m->m_type = MT_FREE;
928 mbtypes[MT_FREE]++;
929 m->m_next = mmbfree;
930 mmbfree = m;
931 MMBWAKEUP();
932 splx(s);
984263bc 933
984263bc
MD
934 return (n);
935}
936
937void
b6650ec0 938m_freem(struct mbuf *m)
984263bc
MD
939{
940 int s = splimp();
941
942 /*
943 * Try to keep a small pool of mbuf+cluster for quick use in
944 * device drivers. A good candidate is a M_PKTHDR buffer with
945 * only one cluster attached. Other mbufs, or those exceeding
946 * the pool size, are just m_free'd in the usual way.
947 * The following code makes sure that m_next, m_type,
948 * m_pkthdr.aux and m_ext.* are properly initialized.
949 * Other fields in the mbuf are initialized in m_getcl()
950 * upon allocation.
951 */
952 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
953 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
954 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
955 m_tag_delete_chain(m, NULL);
956 m->m_nextpkt = mcl_pool;
957 mcl_pool = m;
958 mcl_pool_now++;
959 } else {
960 while (m)
961 m = m_free(m);
962 }
963 splx(s);
964}
965
966/*
967 * Mbuffer utility routines.
968 */
969
970/*
971 * Lesser-used path for M_PREPEND:
972 * allocate new mbuf to prepend to chain,
973 * copy junk along.
974 */
975struct mbuf *
8a3125c6 976m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
977{
978 struct mbuf *mn;
979
980 MGET(mn, how, m->m_type);
981 if (mn == (struct mbuf *)NULL) {
982 m_freem(m);
983 return ((struct mbuf *)NULL);
984 }
985 if (m->m_flags & M_PKTHDR)
986 M_MOVE_PKTHDR(mn, m);
987 mn->m_next = m;
988 m = mn;
989 if (len < MHLEN)
990 MH_ALIGN(m, len);
991 m->m_len = len;
992 return (m);
993}
994
995/*
996 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
997 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 998 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
999 * Note that the copy is read-only, because clusters are not copied,
1000 * only their reference counts are incremented.
1001 */
1002#define MCFail (mbstat.m_mcfail)
1003
1004struct mbuf *
8a3125c6 1005m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 1006{
1fd87d54
RG
1007 struct mbuf *n, **np;
1008 int off = off0;
984263bc
MD
1009 struct mbuf *top;
1010 int copyhdr = 0;
1011
1012 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1013 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1014 if (off == 0 && m->m_flags & M_PKTHDR)
1015 copyhdr = 1;
1016 while (off > 0) {
1017 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1018 if (off < m->m_len)
1019 break;
1020 off -= m->m_len;
1021 m = m->m_next;
1022 }
1023 np = &top;
1024 top = 0;
1025 while (len > 0) {
1026 if (m == 0) {
1027 KASSERT(len == M_COPYALL,
1028 ("m_copym, length > size of mbuf chain"));
1029 break;
1030 }
1031 MGET(n, wait, m->m_type);
1032 *np = n;
1033 if (n == 0)
1034 goto nospace;
1035 if (copyhdr) {
1036 if (!m_dup_pkthdr(n, m, wait))
1037 goto nospace;
1038 if (len == M_COPYALL)
1039 n->m_pkthdr.len -= off0;
1040 else
1041 n->m_pkthdr.len = len;
1042 copyhdr = 0;
1043 }
1044 n->m_len = min(len, m->m_len - off);
1045 if (m->m_flags & M_EXT) {
1046 n->m_data = m->m_data + off;
7eccf245 1047 m_extref(m);
984263bc 1048 n->m_ext = m->m_ext;
7eccf245
MD
1049 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD);
1050 } else {
984263bc
MD
1051 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1052 (unsigned)n->m_len);
7eccf245 1053 }
984263bc
MD
1054 if (len != M_COPYALL)
1055 len -= n->m_len;
1056 off = 0;
1057 m = m->m_next;
1058 np = &n->m_next;
1059 }
1060 if (top == 0)
1061 MCFail++;
1062 return (top);
1063nospace:
1064 m_freem(top);
1065 MCFail++;
1066 return (0);
1067}
1068
1069/*
1070 * Copy an entire packet, including header (which must be present).
1071 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1072 * Note that the copy is read-only, because clusters are not copied,
1073 * only their reference counts are incremented.
1074 * Preserve alignment of the first mbuf so if the creator has left
1075 * some room at the beginning (e.g. for inserting protocol headers)
1076 * the copies also have the room available.
1077 */
1078struct mbuf *
8a3125c6 1079m_copypacket(struct mbuf *m, int how)
984263bc
MD
1080{
1081 struct mbuf *top, *n, *o;
1082
1083 MGET(n, how, m->m_type);
1084 top = n;
1085 if (!n)
1086 goto nospace;
1087
1088 if (!m_dup_pkthdr(n, m, how))
1089 goto nospace;
1090 n->m_len = m->m_len;
1091 if (m->m_flags & M_EXT) {
1092 n->m_data = m->m_data;
7eccf245 1093 m_extref(m);
984263bc 1094 n->m_ext = m->m_ext;
7eccf245 1095 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD);
984263bc
MD
1096 } else {
1097 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1098 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1099 }
1100
1101 m = m->m_next;
1102 while (m) {
1103 MGET(o, how, m->m_type);
1104 if (!o)
1105 goto nospace;
1106
1107 n->m_next = o;
1108 n = n->m_next;
1109
1110 n->m_len = m->m_len;
1111 if (m->m_flags & M_EXT) {
1112 n->m_data = m->m_data;
7eccf245 1113 m_extref(m);
984263bc 1114 n->m_ext = m->m_ext;
7eccf245 1115 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD);
984263bc
MD
1116 } else {
1117 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1118 }
1119
1120 m = m->m_next;
1121 }
1122 return top;
1123nospace:
1124 m_freem(top);
1125 MCFail++;
1126 return 0;
1127}
1128
1129/*
1130 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1131 * continuing for "len" bytes, into the indicated buffer.
1132 */
1133void
8a3125c6 1134m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1135{
1fd87d54 1136 unsigned count;
984263bc
MD
1137
1138 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1139 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1140 while (off > 0) {
1141 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1142 if (off < m->m_len)
1143 break;
1144 off -= m->m_len;
1145 m = m->m_next;
1146 }
1147 while (len > 0) {
1148 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1149 count = min(m->m_len - off, len);
1150 bcopy(mtod(m, caddr_t) + off, cp, count);
1151 len -= count;
1152 cp += count;
1153 off = 0;
1154 m = m->m_next;
1155 }
1156}
1157
1158/*
1159 * Copy a packet header mbuf chain into a completely new chain, including
1160 * copying any mbuf clusters. Use this instead of m_copypacket() when
1161 * you need a writable copy of an mbuf chain.
1162 */
1163struct mbuf *
8a3125c6 1164m_dup(struct mbuf *m, int how)
984263bc
MD
1165{
1166 struct mbuf **p, *top = NULL;
1167 int remain, moff, nsize;
1168
1169 /* Sanity check */
1170 if (m == NULL)
1171 return (0);
1172 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1173
1174 /* While there's more data, get a new mbuf, tack it on, and fill it */
1175 remain = m->m_pkthdr.len;
1176 moff = 0;
1177 p = &top;
1178 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1179 struct mbuf *n;
1180
1181 /* Get the next new mbuf */
1182 MGET(n, how, m->m_type);
1183 if (n == NULL)
1184 goto nospace;
1185 if (top == NULL) { /* first one, must be PKTHDR */
1186 if (!m_dup_pkthdr(n, m, how))
1187 goto nospace;
1188 nsize = MHLEN;
1189 } else /* not the first one */
1190 nsize = MLEN;
1191 if (remain >= MINCLSIZE) {
1192 MCLGET(n, how);
1193 if ((n->m_flags & M_EXT) == 0) {
1194 (void)m_free(n);
1195 goto nospace;
1196 }
1197 nsize = MCLBYTES;
1198 }
1199 n->m_len = 0;
1200
1201 /* Link it into the new chain */
1202 *p = n;
1203 p = &n->m_next;
1204
1205 /* Copy data from original mbuf(s) into new mbuf */
1206 while (n->m_len < nsize && m != NULL) {
1207 int chunk = min(nsize - n->m_len, m->m_len - moff);
1208
1209 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1210 moff += chunk;
1211 n->m_len += chunk;
1212 remain -= chunk;
1213 if (moff == m->m_len) {
1214 m = m->m_next;
1215 moff = 0;
1216 }
1217 }
1218
1219 /* Check correct total mbuf length */
1220 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1221 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1222 }
1223 return (top);
1224
1225nospace:
1226 m_freem(top);
1227 MCFail++;
1228 return (0);
1229}
1230
1231/*
1232 * Concatenate mbuf chain n to m.
1233 * Both chains must be of the same type (e.g. MT_DATA).
1234 * Any m_pkthdr is not updated.
1235 */
1236void
8a3125c6 1237m_cat(struct mbuf *m, struct mbuf *n)
984263bc
MD
1238{
1239 while (m->m_next)
1240 m = m->m_next;
1241 while (n) {
1242 if (m->m_flags & M_EXT ||
1243 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1244 /* just join the two chains */
1245 m->m_next = n;
1246 return;
1247 }
1248 /* splat the data from one into the other */
1249 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1250 (u_int)n->m_len);
1251 m->m_len += n->m_len;
1252 n = m_free(n);
1253 }
1254}
1255
1256void
8a3125c6 1257m_adj(struct mbuf *mp, int req_len)
984263bc 1258{
1fd87d54
RG
1259 int len = req_len;
1260 struct mbuf *m;
1261 int count;
984263bc
MD
1262
1263 if ((m = mp) == NULL)
1264 return;
1265 if (len >= 0) {
1266 /*
1267 * Trim from head.
1268 */
1269 while (m != NULL && len > 0) {
1270 if (m->m_len <= len) {
1271 len -= m->m_len;
1272 m->m_len = 0;
1273 m = m->m_next;
1274 } else {
1275 m->m_len -= len;
1276 m->m_data += len;
1277 len = 0;
1278 }
1279 }
1280 m = mp;
1281 if (mp->m_flags & M_PKTHDR)
1282 m->m_pkthdr.len -= (req_len - len);
1283 } else {
1284 /*
1285 * Trim from tail. Scan the mbuf chain,
1286 * calculating its length and finding the last mbuf.
1287 * If the adjustment only affects this mbuf, then just
1288 * adjust and return. Otherwise, rescan and truncate
1289 * after the remaining size.
1290 */
1291 len = -len;
1292 count = 0;
1293 for (;;) {
1294 count += m->m_len;
1295 if (m->m_next == (struct mbuf *)0)
1296 break;
1297 m = m->m_next;
1298 }
1299 if (m->m_len >= len) {
1300 m->m_len -= len;
1301 if (mp->m_flags & M_PKTHDR)
1302 mp->m_pkthdr.len -= len;
1303 return;
1304 }
1305 count -= len;
1306 if (count < 0)
1307 count = 0;
1308 /*
1309 * Correct length for chain is "count".
1310 * Find the mbuf with last data, adjust its length,
1311 * and toss data from remaining mbufs on chain.
1312 */
1313 m = mp;
1314 if (m->m_flags & M_PKTHDR)
1315 m->m_pkthdr.len = count;
1316 for (; m; m = m->m_next) {
1317 if (m->m_len >= count) {
1318 m->m_len = count;
1319 break;
1320 }
1321 count -= m->m_len;
1322 }
1323 while (m->m_next)
1324 (m = m->m_next) ->m_len = 0;
1325 }
1326}
1327
1328/*
1329 * Rearange an mbuf chain so that len bytes are contiguous
9e4465af
MD
1330 * and in the data area of an mbuf (so that mtod will work for a structure
1331 * of size len). Returns the resulting mbuf chain on success, frees it and
1332 * returns null on failure. If there is room, it will add up to
1333 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1334 * avoid being called next time.
984263bc
MD
1335 */
1336#define MPFail (mbstat.m_mpfail)
1337
1338struct mbuf *
8a3125c6 1339m_pullup(struct mbuf *n, int len)
984263bc 1340{
1fd87d54
RG
1341 struct mbuf *m;
1342 int count;
984263bc
MD
1343 int space;
1344
1345 /*
1346 * If first mbuf has no cluster, and has room for len bytes
1347 * without shifting current data, pullup into it,
1348 * otherwise allocate a new mbuf to prepend to the chain.
1349 */
1350 if ((n->m_flags & M_EXT) == 0 &&
1351 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1352 if (n->m_len >= len)
1353 return (n);
1354 m = n;
1355 n = n->m_next;
1356 len -= m->m_len;
1357 } else {
1358 if (len > MHLEN)
1359 goto bad;
74f1caca 1360 MGET(m, MB_DONTWAIT, n->m_type);
984263bc
MD
1361 if (m == 0)
1362 goto bad;
1363 m->m_len = 0;
1364 if (n->m_flags & M_PKTHDR)
1365 M_MOVE_PKTHDR(m, n);
1366 }
1367 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1368 do {
1369 count = min(min(max(len, max_protohdr), space), n->m_len);
1370 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1371 (unsigned)count);
1372 len -= count;
1373 m->m_len += count;
1374 n->m_len -= count;
1375 space -= count;
1376 if (n->m_len)
1377 n->m_data += count;
1378 else
1379 n = m_free(n);
1380 } while (len > 0 && n);
1381 if (len > 0) {
1382 (void) m_free(m);
1383 goto bad;
1384 }
1385 m->m_next = n;
1386 return (m);
1387bad:
1388 m_freem(n);
1389 MPFail++;
1390 return (0);
1391}
1392
1393/*
1394 * Partition an mbuf chain in two pieces, returning the tail --
1395 * all but the first len0 bytes. In case of failure, it returns NULL and
1396 * attempts to restore the chain to its original state.
1397 *
1398 * Note that the resulting mbufs might be read-only, because the new
1399 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1400 * the "breaking point" happens to lie within a cluster mbuf. Use the
1401 * M_WRITABLE() macro to check for this case.
1402 */
1403struct mbuf *
8a3125c6 1404m_split(struct mbuf *m0, int len0, int wait)
984263bc 1405{
1fd87d54 1406 struct mbuf *m, *n;
984263bc
MD
1407 unsigned len = len0, remain;
1408
1409 for (m = m0; m && len > m->m_len; m = m->m_next)
1410 len -= m->m_len;
1411 if (m == 0)
1412 return (0);
1413 remain = m->m_len - len;
1414 if (m0->m_flags & M_PKTHDR) {
1415 MGETHDR(n, wait, m0->m_type);
1416 if (n == 0)
1417 return (0);
1418 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1419 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1420 m0->m_pkthdr.len = len0;
1421 if (m->m_flags & M_EXT)
1422 goto extpacket;
1423 if (remain > MHLEN) {
1424 /* m can't be the lead packet */
1425 MH_ALIGN(n, 0);
1426 n->m_next = m_split(m, len, wait);
1427 if (n->m_next == 0) {
1428 (void) m_free(n);
1429 return (0);
1430 } else {
1431 n->m_len = 0;
1432 return (n);
1433 }
1434 } else
1435 MH_ALIGN(n, remain);
1436 } else if (remain == 0) {
1437 n = m->m_next;
1438 m->m_next = 0;
1439 return (n);
1440 } else {
1441 MGET(n, wait, m->m_type);
1442 if (n == 0)
1443 return (0);
1444 M_ALIGN(n, remain);
1445 }
1446extpacket:
1447 if (m->m_flags & M_EXT) {
984263bc 1448 n->m_data = m->m_data + len;
7eccf245
MD
1449 m_extref(m);
1450 n->m_ext = m->m_ext;
1451 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD);
984263bc
MD
1452 } else {
1453 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1454 }
1455 n->m_len = remain;
1456 m->m_len = len;
1457 n->m_next = m->m_next;
1458 m->m_next = 0;
1459 return (n);
1460}
1461/*
1462 * Routine to copy from device local memory into mbufs.
1463 */
1464struct mbuf *
8a3125c6
MD
1465m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
1466 void (*copy) (char *from, caddr_t to, u_int len))
984263bc 1467{
1fd87d54 1468 struct mbuf *m;
984263bc 1469 struct mbuf *top = 0, **mp = &top;
1fd87d54
RG
1470 int off = off0, len;
1471 char *cp;
984263bc
MD
1472 char *epkt;
1473
1474 cp = buf;
1475 epkt = cp + totlen;
1476 if (off) {
1477 cp += off + 2 * sizeof(u_short);
1478 totlen -= 2 * sizeof(u_short);
1479 }
74f1caca 1480 MGETHDR(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1481 if (m == 0)
1482 return (0);
1483 m->m_pkthdr.rcvif = ifp;
1484 m->m_pkthdr.len = totlen;
1485 m->m_len = MHLEN;
1486
1487 while (totlen > 0) {
1488 if (top) {
74f1caca 1489 MGET(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1490 if (m == 0) {
1491 m_freem(top);
1492 return (0);
1493 }
1494 m->m_len = MLEN;
1495 }
1496 len = min(totlen, epkt - cp);
1497 if (len >= MINCLSIZE) {
74f1caca 1498 MCLGET(m, MB_DONTWAIT);
984263bc
MD
1499 if (m->m_flags & M_EXT)
1500 m->m_len = len = min(len, MCLBYTES);
1501 else
1502 len = m->m_len;
1503 } else {
1504 /*
1505 * Place initial small packet/header at end of mbuf.
1506 */
1507 if (len < m->m_len) {
1508 if (top == 0 && len + max_linkhdr <= m->m_len)
1509 m->m_data += max_linkhdr;
1510 m->m_len = len;
1511 } else
1512 len = m->m_len;
1513 }
1514 if (copy)
1515 copy(cp, mtod(m, caddr_t), (unsigned)len);
1516 else
1517 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1518 cp += len;
1519 *mp = m;
1520 mp = &m->m_next;
1521 totlen -= len;
1522 if (cp == epkt)
1523 cp = buf;
1524 }
1525 return (top);
1526}
1527
1528/*
1529 * Copy data from a buffer back into the indicated mbuf chain,
1530 * starting "off" bytes from the beginning, extending the mbuf
1531 * chain if necessary.
1532 */
1533void
8a3125c6 1534m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1535{
1fd87d54
RG
1536 int mlen;
1537 struct mbuf *m = m0, *n;
984263bc
MD
1538 int totlen = 0;
1539
1540 if (m0 == 0)
1541 return;
1542 while (off > (mlen = m->m_len)) {
1543 off -= mlen;
1544 totlen += mlen;
1545 if (m->m_next == 0) {
74f1caca 1546 n = m_getclr(MB_DONTWAIT, m->m_type);
984263bc
MD
1547 if (n == 0)
1548 goto out;
1549 n->m_len = min(MLEN, len + off);
1550 m->m_next = n;
1551 }
1552 m = m->m_next;
1553 }
1554 while (len > 0) {
1555 mlen = min (m->m_len - off, len);
1556 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1557 cp += mlen;
1558 len -= mlen;
1559 mlen += off;
1560 off = 0;
1561 totlen += mlen;
1562 if (len == 0)
1563 break;
1564 if (m->m_next == 0) {
74f1caca 1565 n = m_get(MB_DONTWAIT, m->m_type);
984263bc
MD
1566 if (n == 0)
1567 break;
1568 n->m_len = min(MLEN, len);
1569 m->m_next = n;
1570 }
1571 m = m->m_next;
1572 }
1573out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1574 m->m_pkthdr.len = totlen;
1575}
1576
1577void
1578m_print(const struct mbuf *m)
1579{
1580 int len;
1581 const struct mbuf *m2;
1582
1583 len = m->m_pkthdr.len;
1584 m2 = m;
1585 while (len) {
1586 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1587 len -= m2->m_len;
1588 m2 = m2->m_next;
1589 }
1590 return;
1591}
1592
1593/*
1594 * "Move" mbuf pkthdr from "from" to "to".
1595 * "from" must have M_PKTHDR set, and "to" must be empty.
1596 */
1597void
1598m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1599{
1600 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1601
1602 to->m_flags = from->m_flags & M_COPYFLAGS;
1603 to->m_data = to->m_pktdat;
1604 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1605 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1606 from->m_flags &= ~M_PKTHDR;
1607}
1608
1609/*
1610 * Duplicate "from"'s mbuf pkthdr in "to".
1611 * "from" must have M_PKTHDR set, and "to" must be empty.
1612 * In particular, this does a deep copy of the packet tags.
1613 */
1614int
f15db79e 1615m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc
MD
1616{
1617 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1618 if ((to->m_flags & M_EXT) == 0)
1619 to->m_data = to->m_pktdat;
1620 to->m_pkthdr = from->m_pkthdr;
1621 SLIST_INIT(&to->m_pkthdr.tags);
1622 return (m_tag_copy_chain(to, from, how));
1623}
1624
1625/*
1626 * Defragment a mbuf chain, returning the shortest possible
1627 * chain of mbufs and clusters. If allocation fails and
1628 * this cannot be completed, NULL will be returned, but
1629 * the passed in chain will be unchanged. Upon success,
1630 * the original chain will be freed, and the new chain
1631 * will be returned.
1632 *
1633 * If a non-packet header is passed in, the original
1634 * mbuf (chain?) will be returned unharmed.
1635 */
1636struct mbuf *
1637m_defrag(struct mbuf *m0, int how)
1638{
1639 struct mbuf *m_new = NULL, *m_final = NULL;
1640 int progress = 0, length;
1641
1642 if (!(m0->m_flags & M_PKTHDR))
1643 return (m0);
1644
1645#ifdef MBUF_STRESS_TEST
1646 if (m_defragrandomfailures) {
1647 int temp = arc4random() & 0xff;
1648 if (temp == 0xba)
1649 goto nospace;
1650 }
1651#endif
1652
1653 if (m0->m_pkthdr.len > MHLEN)
1654 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1655 else
1656 m_final = m_gethdr(how, MT_DATA);
1657
1658 if (m_final == NULL)
1659 goto nospace;
1660
1661 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1662 goto nospace;
1663
1664 m_new = m_final;
1665
1666 while (progress < m0->m_pkthdr.len) {
1667 length = m0->m_pkthdr.len - progress;
1668 if (length > MCLBYTES)
1669 length = MCLBYTES;
1670
1671 if (m_new == NULL) {
1672 if (length > MLEN)
1673 m_new = m_getcl(how, MT_DATA, 0);
1674 else
1675 m_new = m_get(how, MT_DATA);
1676 if (m_new == NULL)
1677 goto nospace;
1678 }
1679
1680 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1681 progress += length;
1682 m_new->m_len = length;
1683 if (m_new != m_final)
1684 m_cat(m_final, m_new);
1685 m_new = NULL;
1686 }
1687 if (m0->m_next == NULL)
1688 m_defraguseless++;
1689 m_freem(m0);
1690 m0 = m_final;
1691 m_defragpackets++;
1692 m_defragbytes += m0->m_pkthdr.len;
1693 return (m0);
1694nospace:
1695 m_defragfailure++;
1696 if (m_new)
1697 m_free(m_new);
1698 if (m_final)
1699 m_freem(m_final);
1700 return (NULL);
1701}
0c33f36d
JH
1702
1703/*
1704 * Move data from uio into mbufs.
1705 * A length of zero means copy the whole uio.
1706 */
1707struct mbuf *
1708m_uiomove(struct uio *uio, int wait, int len0)
1709{
1710 struct mbuf *head; /* result mbuf chain */
1711 struct mbuf *m; /* current working mbuf */
1712 struct mbuf **mp;
1713 int resid, datalen, error;
1714
1715 resid = (len0 == 0) ? uio->uio_resid : min(len0, uio->uio_resid);
1716
1717 head = NULL;
1718 mp = &head;
1719 do {
1720 if (resid > MHLEN) {
1721 m = m_getcl(wait, MT_DATA, head == NULL ? M_PKTHDR : 0);
1722 if (m == NULL)
1723 goto failed;
1724 if (m->m_flags & M_PKTHDR)
1725 m->m_pkthdr.len = 0;
1726 } else {
1727 if (head == NULL) {
1728 MGETHDR(m, wait, MT_DATA);
1729 if (m == NULL)
1730 goto failed;
1731 m->m_pkthdr.len = 0;
1732 /* Leave room for protocol headers. */
1733 if (resid < MHLEN)
1734 MH_ALIGN(m, resid);
1735 } else {
1736 MGET(m, wait, MT_DATA);
1737 if (m == NULL)
1738 goto failed;
1739 }
1740 }
1741 datalen = min(MCLBYTES, resid);
1742 error = uiomove(mtod(m, caddr_t), datalen, uio);
1743 if (error) {
1744 m_free(m);
1745 goto failed;
1746 }
1747 m->m_len = datalen;
1748 *mp = m;
1749 mp = &m->m_next;
1750 head->m_pkthdr.len += datalen;
1751 resid -= datalen;
1752 } while (resid > 0);
1753
1754 return (head);
1755
1756failed:
1757 if (head)
1758 m_freem(head);
1759 return (NULL);
1760}