Back out the last change. Normal 'make' builds in the source tree are
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
0c33f36d 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
984263bc
MD
3 * Copyright (c) 1982, 1986, 1988, 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
8a3125c6 34 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc 35 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
a3c3073c 36 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.19 2004/06/06 11:49:54 hmp Exp $
984263bc
MD
37 */
38
39#include "opt_param.h"
40#include "opt_mbuf_stress_test.h"
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/kernel.h>
46#include <sys/sysctl.h>
47#include <sys/domain.h>
48#include <sys/protosw.h>
0c33f36d 49#include <sys/uio.h>
ef0fdad1 50#include <sys/thread.h>
a2a5ad0d 51#include <sys/globaldata.h>
984263bc
MD
52
53#include <vm/vm.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56
57#ifdef INVARIANTS
58#include <machine/cpu.h>
59#endif
60
402ed7e1 61static void mbinit (void *);
984263bc
MD
62SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
63
64struct mbuf *mbutl;
cb840899 65struct mbuf *mbute;
984263bc
MD
66char *mclrefcnt;
67struct mbstat mbstat;
68u_long mbtypes[MT_NTYPES];
69struct mbuf *mmbfree;
70union mcluster *mclfree;
71int max_linkhdr;
72int max_protohdr;
73int max_hdr;
74int max_datalen;
75int m_defragpackets;
76int m_defragbytes;
77int m_defraguseless;
78int m_defragfailure;
79#ifdef MBUF_STRESS_TEST
80int m_defragrandomfailures;
81#endif
82
83int nmbclusters;
84int nmbufs;
85u_int m_mballoc_wid = 0;
86u_int m_clalloc_wid = 0;
87
984263bc
MD
88SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
89 &max_linkhdr, 0, "");
90SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
91 &max_protohdr, 0, "");
92SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
93SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
94 &max_datalen, 0, "");
95SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
96 &mbuf_wait, 0, "");
97SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
98SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
99 sizeof(mbtypes), "LU", "");
100SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
101 &nmbclusters, 0, "Maximum number of mbuf clusters available");
102SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
103 "Maximum number of mbufs available");
104SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
105 &m_defragpackets, 0, "");
106SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
107 &m_defragbytes, 0, "");
108SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
109 &m_defraguseless, 0, "");
110SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
111 &m_defragfailure, 0, "");
112#ifdef MBUF_STRESS_TEST
113SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
114 &m_defragrandomfailures, 0, "");
115#endif
116
402ed7e1 117static void m_reclaim (void);
984263bc
MD
118
119#ifndef NMBCLUSTERS
120#define NMBCLUSTERS (512 + maxusers * 16)
121#endif
122#ifndef NMBUFS
123#define NMBUFS (nmbclusters * 4)
124#endif
125
126/*
127 * Perform sanity checks of tunables declared above.
128 */
129static void
130tunable_mbinit(void *dummy)
131{
132
133 /*
134 * This has to be done before VM init.
135 */
136 nmbclusters = NMBCLUSTERS;
137 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
138 nmbufs = NMBUFS;
139 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
140 /* Sanity checks */
141 if (nmbufs < nmbclusters * 2)
142 nmbufs = nmbclusters * 2;
143
144 return;
145}
146SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
147
148/* "number of clusters of pages" */
149#define NCL_INIT 1
150
151#define NMB_INIT 16
152
153/* ARGSUSED*/
154static void
8a3125c6 155mbinit(void *dummy)
984263bc
MD
156{
157 int s;
158
159 mmbfree = NULL; mclfree = NULL;
160 mbstat.m_msize = MSIZE;
161 mbstat.m_mclbytes = MCLBYTES;
162 mbstat.m_minclsize = MINCLSIZE;
163 mbstat.m_mlen = MLEN;
164 mbstat.m_mhlen = MHLEN;
165
166 s = splimp();
74f1caca 167 if (m_mballoc(NMB_INIT, MB_DONTWAIT) == 0)
984263bc
MD
168 goto bad;
169#if MCLBYTES <= PAGE_SIZE
74f1caca 170 if (m_clalloc(NCL_INIT, MB_DONTWAIT) == 0)
984263bc
MD
171 goto bad;
172#else
173 /* It's OK to call contigmalloc in this context. */
74f1caca 174 if (m_clalloc(16, MB_WAIT) == 0)
984263bc
MD
175 goto bad;
176#endif
177 splx(s);
178 return;
179bad:
180 panic("mbinit");
181}
182
183/*
184 * Allocate at least nmb mbufs and place on mbuf free list.
185 * Must be called at splimp.
186 */
187/* ARGSUSED */
188int
8a3125c6 189m_mballoc(int nmb, int how)
984263bc 190{
1fd87d54
RG
191 caddr_t p;
192 int i;
984263bc
MD
193 int nbytes;
194
195 /*
196 * If we've hit the mbuf limit, stop allocating from mb_map,
197 * (or trying to) in order to avoid dipping into the section of
198 * mb_map which we've "reserved" for clusters.
199 */
200 if ((nmb + mbstat.m_mbufs) > nmbufs)
201 return (0);
202
203 /*
204 * Once we run out of map space, it will be impossible to get
205 * any more (nothing is ever freed back to the map)
206 * -- however you are not dead as m_reclaim might
207 * still be able to free a substantial amount of space.
208 *
209 * XXX Furthermore, we can also work with "recycled" mbufs (when
74f1caca 210 * we're calling with MB_WAIT the sleep procedure will be woken
984263bc
MD
211 * up when an mbuf is freed. See m_mballoc_wait()).
212 */
213 if (mb_map_full)
214 return (0);
215
216 nbytes = round_page(nmb * MSIZE);
217 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
74f1caca 218 if (p == 0 && how == MB_WAIT) {
984263bc
MD
219 mbstat.m_wait++;
220 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
221 }
222
223 /*
224 * Either the map is now full, or `how' is M_NOWAIT and there
225 * are no pages left.
226 */
227 if (p == NULL)
228 return (0);
229
230 nmb = nbytes / MSIZE;
231 for (i = 0; i < nmb; i++) {
232 ((struct mbuf *)p)->m_next = mmbfree;
233 mmbfree = (struct mbuf *)p;
234 p += MSIZE;
235 }
236 mbstat.m_mbufs += nmb;
237 mbtypes[MT_FREE] += nmb;
238 return (1);
239}
240
241/*
242 * Once the mb_map has been exhausted and if the call to the allocation macros
74f1caca 243 * (or, in some cases, functions) is with MB_WAIT, then it is necessary to rely
984263bc
MD
244 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
245 * designated (mbuf_wait) time.
246 */
247struct mbuf *
248m_mballoc_wait(int caller, int type)
249{
250 struct mbuf *p;
251 int s;
252
253 s = splimp();
254 m_mballoc_wid++;
377d4740 255 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
256 m_mballoc_wid--;
257 splx(s);
258
259 /*
260 * Now that we (think) that we've got something, we will redo an
261 * MGET, but avoid getting into another instance of m_mballoc_wait()
262 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
263 * this way, purposely, in the [unlikely] case that an mbuf was
264 * freed but the sleep was not awakened in time.
265 */
266 p = NULL;
267 switch (caller) {
268 case MGET_C:
74f1caca 269 MGET(p, MB_DONTWAIT, type);
984263bc
MD
270 break;
271 case MGETHDR_C:
74f1caca 272 MGETHDR(p, MB_DONTWAIT, type);
984263bc
MD
273 break;
274 default:
275 panic("m_mballoc_wait: invalid caller (%d)", caller);
276 }
277
278 s = splimp();
279 if (p != NULL) { /* We waited and got something... */
280 mbstat.m_wait++;
281 /* Wake up another if we have more free. */
282 if (mmbfree != NULL)
283 MMBWAKEUP();
284 }
285 splx(s);
286 return (p);
287}
288
289#if MCLBYTES > PAGE_SIZE
290static int i_want_my_mcl;
291
292static void
293kproc_mclalloc(void)
294{
295 int status;
296
297 while (1) {
377d4740 298 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
984263bc
MD
299
300 for (; i_want_my_mcl; i_want_my_mcl--) {
74f1caca 301 if (m_clalloc(1, MB_WAIT) == 0)
984263bc
MD
302 printf("m_clalloc failed even in process context!\n");
303 }
304 }
305}
306
bc6dffab 307static struct thread *mclallocthread;
984263bc
MD
308static struct kproc_desc mclalloc_kp = {
309 "mclalloc",
310 kproc_mclalloc,
bc6dffab 311 &mclallocthread
984263bc 312};
bc6dffab 313SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
984263bc
MD
314 &mclalloc_kp);
315#endif
316
317/*
318 * Allocate some number of mbuf clusters
319 * and place on cluster free list.
320 * Must be called at splimp.
321 */
322/* ARGSUSED */
323int
8a3125c6 324m_clalloc(int ncl, int how)
984263bc 325{
1fd87d54
RG
326 caddr_t p;
327 int i;
984263bc
MD
328 int npg;
329
330 /*
331 * If we've hit the mcluster number limit, stop allocating from
332 * mb_map, (or trying to) in order to avoid dipping into the section
333 * of mb_map which we've "reserved" for mbufs.
334 */
335 if ((ncl + mbstat.m_clusters) > nmbclusters)
336 goto m_clalloc_fail;
337
338 /*
339 * Once we run out of map space, it will be impossible
340 * to get any more (nothing is ever freed back to the
341 * map). From this point on, we solely rely on freed
342 * mclusters.
343 */
344 if (mb_map_full)
345 goto m_clalloc_fail;
346
347#if MCLBYTES > PAGE_SIZE
74f1caca 348 if (how != MB_WAIT) {
984263bc
MD
349 i_want_my_mcl += ncl;
350 wakeup(&i_want_my_mcl);
351 mbstat.m_wait++;
352 p = 0;
353 } else {
7fa7744b 354 p = contigmalloc_map(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
984263bc
MD
355 ~0ul, PAGE_SIZE, 0, mb_map);
356 }
357#else
358 npg = ncl;
359 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
74f1caca 360 how != MB_WAIT ? M_NOWAIT : M_WAITOK);
984263bc
MD
361 ncl = ncl * PAGE_SIZE / MCLBYTES;
362#endif
363 /*
364 * Either the map is now full, or `how' is M_NOWAIT and there
365 * are no pages left.
366 */
367 if (p == NULL) {
368 static int last_report ; /* when we did that (in ticks) */
369m_clalloc_fail:
370 mbstat.m_drops++;
371 if (ticks < last_report || (ticks - last_report) >= hz) {
372 last_report = ticks;
373 printf("All mbuf clusters exhausted, please see tuning(7).\n");
374 }
375 return (0);
376 }
377
378 for (i = 0; i < ncl; i++) {
379 ((union mcluster *)p)->mcl_next = mclfree;
380 mclfree = (union mcluster *)p;
381 p += MCLBYTES;
382 mbstat.m_clfree++;
383 }
384 mbstat.m_clusters += ncl;
385 return (1);
386}
387
388/*
389 * Once the mb_map submap has been exhausted and the allocation is called with
74f1caca 390 * MB_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
984263bc
MD
391 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
392 * due to sudden mcluster availability.
393 */
394caddr_t
395m_clalloc_wait(void)
396{
397 caddr_t p;
398 int s;
399
984263bc 400 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
ef0fdad1 401 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
402
403 /* Sleep until something's available or until we expire. */
404 m_clalloc_wid++;
377d4740 405 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
406 m_clalloc_wid--;
407
408 /*
409 * Now that we (think) that we've got something, we will redo and
410 * MGET, but avoid getting into another instance of m_clalloc_wait()
411 */
74f1caca 412 p = m_mclalloc(MB_DONTWAIT);
984263bc
MD
413
414 s = splimp();
415 if (p != NULL) { /* We waited and got something... */
416 mbstat.m_wait++;
417 /* Wake up another if we have more free. */
418 if (mclfree != NULL)
419 MCLWAKEUP();
420 }
421
422 splx(s);
423 return (p);
424}
425
426/*
427 * When MGET fails, ask protocols to free space when short of memory,
428 * then re-attempt to allocate an mbuf.
429 */
430struct mbuf *
8a3125c6 431m_retry(int i, int t)
984263bc 432{
12496bdf
MD
433 struct mbuf *m;
434 int ms;
984263bc
MD
435
436 /*
437 * Must only do the reclaim if not in an interrupt context.
438 */
74f1caca 439 if (i == MB_WAIT) {
ef0fdad1 440 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 441 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
442 m_reclaim();
443 }
444
12496bdf
MD
445 ms = splimp();
446 if (mmbfree == NULL)
447 (void)m_mballoc(1, i);
448 m = mmbfree;
449 if (m != NULL) {
450 mmbfree = m->m_next;
451 mbtypes[MT_FREE]--;
452 m->m_type = t;
453 mbtypes[t]++;
454 m->m_next = NULL;
455 m->m_nextpkt = NULL;
456 m->m_data = m->m_dat;
457 m->m_flags = 0;
458 splx(ms);
984263bc 459 mbstat.m_wait++;
12496bdf 460 } else {
984263bc 461 static int last_report ; /* when we did that (in ticks) */
377d4740 462
12496bdf 463 splx(ms);
984263bc
MD
464 mbstat.m_drops++;
465 if (ticks < last_report || (ticks - last_report) >= hz) {
466 last_report = ticks;
467 printf("All mbufs exhausted, please see tuning(7).\n");
468 }
469 }
470
471 return (m);
472}
473
474/*
475 * As above; retry an MGETHDR.
476 */
477struct mbuf *
8a3125c6 478m_retryhdr(int i, int t)
984263bc 479{
12496bdf
MD
480 struct mbuf *m;
481 int ms;
984263bc
MD
482
483 /*
484 * Must only do the reclaim if not in an interrupt context.
485 */
74f1caca 486 if (i == MB_WAIT) {
ef0fdad1 487 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 488 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
489 m_reclaim();
490 }
491
12496bdf
MD
492 ms = splimp();
493 if (mmbfree == NULL)
494 (void)m_mballoc(1, i);
495 m = mmbfree;
496 if (m != NULL) {
497 mmbfree = m->m_next;
498 mbtypes[MT_FREE]--;
499 m->m_type = t;
500 mbtypes[t]++;
501 m->m_next = NULL;
502 m->m_nextpkt = NULL;
503 m->m_data = m->m_pktdat;
504 m->m_flags = M_PKTHDR;
505 m->m_pkthdr.rcvif = NULL;
506 SLIST_INIT(&m->m_pkthdr.tags);
507 m->m_pkthdr.csum_flags = 0;
508 splx(ms);
984263bc 509 mbstat.m_wait++;
12496bdf 510 } else {
984263bc 511 static int last_report ; /* when we did that (in ticks) */
12496bdf
MD
512
513 splx(ms);
984263bc
MD
514 mbstat.m_drops++;
515 if (ticks < last_report || (ticks - last_report) >= hz) {
516 last_report = ticks;
517 printf("All mbufs exhausted, please see tuning(7).\n");
518 }
519 }
520
521 return (m);
522}
523
524static void
8a3125c6 525m_reclaim(void)
984263bc 526{
1fd87d54
RG
527 struct domain *dp;
528 struct protosw *pr;
984263bc
MD
529 int s = splimp();
530
8a3125c6
MD
531 for (dp = domains; dp; dp = dp->dom_next) {
532 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
533 if (pr->pr_drain)
534 (*pr->pr_drain)();
8a3125c6
MD
535 }
536 }
984263bc
MD
537 splx(s);
538 mbstat.m_drain++;
539}
540
541/*
542 * Space allocation routines.
543 * These are also available as macros
544 * for critical paths.
545 */
546struct mbuf *
8a3125c6 547m_get(int how, int type)
984263bc 548{
12496bdf
MD
549 struct mbuf *m;
550 int ms;
551
552 ms = splimp();
553 if (mmbfree == NULL)
554 (void)m_mballoc(1, how);
555 m = mmbfree;
556 if (m != NULL) {
557 mmbfree = m->m_next;
558 mbtypes[MT_FREE]--;
559 m->m_type = type;
560 mbtypes[type]++;
561 m->m_next = NULL;
562 m->m_nextpkt = NULL;
563 m->m_data = m->m_dat;
564 m->m_flags = 0;
565 splx(ms);
566 } else {
567 splx(ms);
568 m = m_retry(how, type);
74f1caca 569 if (m == NULL && how == MB_WAIT)
12496bdf
MD
570 m = m_mballoc_wait(MGET_C, type);
571 }
984263bc
MD
572 return (m);
573}
574
575struct mbuf *
8a3125c6 576m_gethdr(int how, int type)
984263bc 577{
12496bdf
MD
578 struct mbuf *m;
579 int ms;
580
581 ms = splimp();
582 if (mmbfree == NULL)
583 (void)m_mballoc(1, how);
584 m = mmbfree;
585 if (m != NULL) {
586 mmbfree = m->m_next;
587 mbtypes[MT_FREE]--;
588 m->m_type = type;
589 mbtypes[type]++;
590 m->m_next = NULL;
591 m->m_nextpkt = NULL;
592 m->m_data = m->m_pktdat;
593 m->m_flags = M_PKTHDR;
594 m->m_pkthdr.rcvif = NULL;
595 SLIST_INIT(&m->m_pkthdr.tags);
596 m->m_pkthdr.csum_flags = 0;
597 splx(ms);
598 } else {
599 splx(ms);
600 m = m_retryhdr(how, type);
74f1caca 601 if (m == NULL && how == MB_WAIT)
12496bdf
MD
602 m = m_mballoc_wait(MGETHDR_C, type);
603 }
984263bc
MD
604 return (m);
605}
606
607struct mbuf *
8a3125c6 608m_getclr(int how, int type)
984263bc 609{
1fd87d54 610 struct mbuf *m;
984263bc
MD
611
612 MGET(m, how, type);
613 if (m == 0)
614 return (0);
615 bzero(mtod(m, caddr_t), MLEN);
616 return (m);
617}
618
619/*
620 * m_getcl() returns an mbuf with an attached cluster.
621 * Because many network drivers use this kind of buffers a lot, it is
622 * convenient to keep a small pool of free buffers of this kind.
623 * Even a small size such as 10 gives about 10% improvement in the
624 * forwarding rate in a bridge or router.
625 * The size of this free list is controlled by the sysctl variable
626 * mcl_pool_max. The list is populated on m_freem(), and used in
627 * m_getcl() if elements are available.
628 */
629static struct mbuf *mcl_pool;
630static int mcl_pool_now;
a3c3073c 631static int mcl_pool_max = 10;
984263bc
MD
632
633SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
634 "Maximum number of mbufs+cluster in free list");
635SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
636 "Current number of mbufs+cluster in free list");
637
638struct mbuf *
639m_getcl(int how, short type, int flags)
640{
641 int s = splimp();
642 struct mbuf *mp;
643
644 if (flags & M_PKTHDR) {
645 if (type == MT_DATA && mcl_pool) {
646 mp = mcl_pool;
647 mcl_pool = mp->m_nextpkt;
648 mcl_pool_now--;
649 splx(s);
650 mp->m_nextpkt = NULL;
651 mp->m_data = mp->m_ext.ext_buf;
652 mp->m_flags = M_PKTHDR|M_EXT;
653 mp->m_pkthdr.rcvif = NULL;
654 mp->m_pkthdr.csum_flags = 0;
655 return mp;
656 } else
657 MGETHDR(mp, how, type);
658 } else
659 MGET(mp, how, type);
660 if (mp) {
661 MCLGET(mp, how);
662 if ( (mp->m_flags & M_EXT) == 0) {
663 m_free(mp);
664 mp = NULL;
665 }
666 }
667 splx(s);
668 return mp;
669}
670
671/*
672 * struct mbuf *
673 * m_getm(m, len, how, type)
674 *
675 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
676 * best) and return a pointer to the top of the allocated chain. If m is
677 * non-null, then we assume that it is a single mbuf or an mbuf chain to
678 * which we want len bytes worth of mbufs and/or clusters attached, and so
679 * if we succeed in allocating it, we will just return a pointer to m.
680 *
681 * If we happen to fail at any point during the allocation, we will free
682 * up everything we have already allocated and return NULL.
683 *
684 */
685struct mbuf *
686m_getm(struct mbuf *m, int len, int how, int type)
687{
688 struct mbuf *top, *tail, *mp, *mtail = NULL;
689
690 KASSERT(len >= 0, ("len is < 0 in m_getm"));
691
692 MGET(mp, how, type);
693 if (mp == NULL)
694 return (NULL);
695 else if (len > MINCLSIZE) {
696 MCLGET(mp, how);
697 if ((mp->m_flags & M_EXT) == 0) {
698 m_free(mp);
699 return (NULL);
700 }
701 }
702 mp->m_len = 0;
703 len -= M_TRAILINGSPACE(mp);
704
705 if (m != NULL)
706 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
707 else
708 m = mp;
709
710 top = tail = mp;
711 while (len > 0) {
712 MGET(mp, how, type);
713 if (mp == NULL)
714 goto failed;
715
716 tail->m_next = mp;
717 tail = mp;
718 if (len > MINCLSIZE) {
719 MCLGET(mp, how);
720 if ((mp->m_flags & M_EXT) == 0)
721 goto failed;
722 }
723
724 mp->m_len = 0;
725 len -= M_TRAILINGSPACE(mp);
726 }
727
728 if (mtail != NULL)
729 mtail->m_next = top;
730 return (m);
731
732failed:
733 m_freem(top);
734 return (NULL);
735}
736
737/*
b6650ec0
MD
738 * m_mclalloc() - Allocates an mbuf cluster.
739 */
740caddr_t
741m_mclalloc(int how)
742{
743 caddr_t mp;
744 int s;
745
746 s = splimp();
747
748 if (mclfree == NULL)
749 m_clalloc(1, how);
750 mp = (caddr_t)mclfree;
751 if (mp != NULL) {
cb840899
MD
752 KKASSERT((struct mbuf *)mp >= mbutl &&
753 (struct mbuf *)mp < mbute);
b6650ec0
MD
754 mclrefcnt[mtocl(mp)]++;
755 mbstat.m_clfree--;
756 mclfree = ((union mcluster *)mp)->mcl_next;
757 splx(s);
758 return(mp);
759 }
760 splx(s);
74f1caca 761 if (how == MB_WAIT)
b6650ec0
MD
762 return(m_clalloc_wait());
763 return(NULL);
764}
765
766/*
767 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
768 */
769void
770m_mclget(struct mbuf *m, int how)
771{
772 m->m_ext.ext_buf = m_mclalloc(how);
773 if (m->m_ext.ext_buf != NULL) {
774 m->m_data = m->m_ext.ext_buf;
775 m->m_flags |= M_EXT;
776 m->m_ext.ext_free = NULL;
777 m->m_ext.ext_ref = NULL;
778 m->m_ext.ext_size = MCLBYTES;
779 }
780}
781
782static __inline void
783_m_mclfree(caddr_t data)
784{
785 union mcluster *mp = (union mcluster *)data;
786
787 KASSERT(mclrefcnt[mtocl(mp)] > 0, ("freeing free cluster"));
cb840899
MD
788 KKASSERT((struct mbuf *)mp >= mbutl &&
789 (struct mbuf *)mp < mbute);
b6650ec0
MD
790 if (--mclrefcnt[mtocl(mp)] == 0) {
791 mp->mcl_next = mclfree;
792 mclfree = mp;
793 mbstat.m_clfree++;
794 MCLWAKEUP();
795 }
796}
797
798void
799m_mclfree(caddr_t mp)
800{
801 int s = splimp();
802 _m_mclfree(mp);
803 splx(s);
804}
805
806/*
807 * m_free()
808 *
809 * Free a single mbuf and any associated external storage. The successor,
810 * if any, is returned.
984263bc 811 *
b6650ec0 812 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
813 * code does not call M_PREPEND properly.
814 * (example: call to bpf_mtap from drivers)
815 */
984263bc 816struct mbuf *
b6650ec0 817m_free(struct mbuf *m)
984263bc 818{
b6650ec0
MD
819 int s;
820 struct mbuf *n;
821
822 s = splimp();
823 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf"));
824 mbtypes[m->m_type]--;
825 if ((m->m_flags & M_PKTHDR) != 0)
826 m_tag_delete_chain(m, NULL);
827 if (m->m_flags & M_EXT) {
828 if (m->m_ext.ext_free != NULL) {
829 m->m_ext.ext_free(m->m_ext.ext_buf, m->m_ext.ext_size);
830 } else {
831 _m_mclfree(m->m_ext.ext_buf); /* inlined */
832 }
833 }
834 n = m->m_next;
835 m->m_type = MT_FREE;
836 mbtypes[MT_FREE]++;
837 m->m_next = mmbfree;
838 mmbfree = m;
839 MMBWAKEUP();
840 splx(s);
984263bc 841
984263bc
MD
842 return (n);
843}
844
845void
b6650ec0 846m_freem(struct mbuf *m)
984263bc
MD
847{
848 int s = splimp();
849
850 /*
851 * Try to keep a small pool of mbuf+cluster for quick use in
852 * device drivers. A good candidate is a M_PKTHDR buffer with
853 * only one cluster attached. Other mbufs, or those exceeding
854 * the pool size, are just m_free'd in the usual way.
855 * The following code makes sure that m_next, m_type,
856 * m_pkthdr.aux and m_ext.* are properly initialized.
857 * Other fields in the mbuf are initialized in m_getcl()
858 * upon allocation.
859 */
860 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
861 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
862 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
863 m_tag_delete_chain(m, NULL);
864 m->m_nextpkt = mcl_pool;
865 mcl_pool = m;
866 mcl_pool_now++;
867 } else {
868 while (m)
869 m = m_free(m);
870 }
871 splx(s);
872}
873
874/*
875 * Mbuffer utility routines.
876 */
877
878/*
879 * Lesser-used path for M_PREPEND:
880 * allocate new mbuf to prepend to chain,
881 * copy junk along.
882 */
883struct mbuf *
8a3125c6 884m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
885{
886 struct mbuf *mn;
887
888 MGET(mn, how, m->m_type);
889 if (mn == (struct mbuf *)NULL) {
890 m_freem(m);
891 return ((struct mbuf *)NULL);
892 }
893 if (m->m_flags & M_PKTHDR)
894 M_MOVE_PKTHDR(mn, m);
895 mn->m_next = m;
896 m = mn;
897 if (len < MHLEN)
898 MH_ALIGN(m, len);
899 m->m_len = len;
900 return (m);
901}
902
903/*
904 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
905 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 906 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
907 * Note that the copy is read-only, because clusters are not copied,
908 * only their reference counts are incremented.
909 */
910#define MCFail (mbstat.m_mcfail)
911
912struct mbuf *
8a3125c6 913m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 914{
1fd87d54
RG
915 struct mbuf *n, **np;
916 int off = off0;
984263bc
MD
917 struct mbuf *top;
918 int copyhdr = 0;
919
920 KASSERT(off >= 0, ("m_copym, negative off %d", off));
921 KASSERT(len >= 0, ("m_copym, negative len %d", len));
922 if (off == 0 && m->m_flags & M_PKTHDR)
923 copyhdr = 1;
924 while (off > 0) {
925 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
926 if (off < m->m_len)
927 break;
928 off -= m->m_len;
929 m = m->m_next;
930 }
931 np = &top;
932 top = 0;
933 while (len > 0) {
934 if (m == 0) {
935 KASSERT(len == M_COPYALL,
936 ("m_copym, length > size of mbuf chain"));
937 break;
938 }
939 MGET(n, wait, m->m_type);
940 *np = n;
941 if (n == 0)
942 goto nospace;
943 if (copyhdr) {
944 if (!m_dup_pkthdr(n, m, wait))
945 goto nospace;
946 if (len == M_COPYALL)
947 n->m_pkthdr.len -= off0;
948 else
949 n->m_pkthdr.len = len;
950 copyhdr = 0;
951 }
952 n->m_len = min(len, m->m_len - off);
953 if (m->m_flags & M_EXT) {
954 n->m_data = m->m_data + off;
955 if (m->m_ext.ext_ref == NULL) {
956 atomic_add_char(
957 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
958 } else {
959 int s = splimp();
960
961 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
962 m->m_ext.ext_size);
963 splx(s);
964 }
965 n->m_ext = m->m_ext;
966 n->m_flags |= M_EXT;
967 } else
968 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
969 (unsigned)n->m_len);
970 if (len != M_COPYALL)
971 len -= n->m_len;
972 off = 0;
973 m = m->m_next;
974 np = &n->m_next;
975 }
976 if (top == 0)
977 MCFail++;
978 return (top);
979nospace:
980 m_freem(top);
981 MCFail++;
982 return (0);
983}
984
985/*
986 * Copy an entire packet, including header (which must be present).
987 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
988 * Note that the copy is read-only, because clusters are not copied,
989 * only their reference counts are incremented.
990 * Preserve alignment of the first mbuf so if the creator has left
991 * some room at the beginning (e.g. for inserting protocol headers)
992 * the copies also have the room available.
993 */
994struct mbuf *
8a3125c6 995m_copypacket(struct mbuf *m, int how)
984263bc
MD
996{
997 struct mbuf *top, *n, *o;
998
999 MGET(n, how, m->m_type);
1000 top = n;
1001 if (!n)
1002 goto nospace;
1003
1004 if (!m_dup_pkthdr(n, m, how))
1005 goto nospace;
1006 n->m_len = m->m_len;
1007 if (m->m_flags & M_EXT) {
1008 n->m_data = m->m_data;
1009 if (m->m_ext.ext_ref == NULL)
1010 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1011 else {
1012 int s = splimp();
1013
1014 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1015 m->m_ext.ext_size);
1016 splx(s);
1017 }
1018 n->m_ext = m->m_ext;
1019 n->m_flags |= M_EXT;
1020 } else {
1021 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1022 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1023 }
1024
1025 m = m->m_next;
1026 while (m) {
1027 MGET(o, how, m->m_type);
1028 if (!o)
1029 goto nospace;
1030
1031 n->m_next = o;
1032 n = n->m_next;
1033
1034 n->m_len = m->m_len;
1035 if (m->m_flags & M_EXT) {
1036 n->m_data = m->m_data;
1037 if (m->m_ext.ext_ref == NULL) {
1038 atomic_add_char(
1039 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1040 } else {
1041 int s = splimp();
1042
1043 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1044 m->m_ext.ext_size);
1045 splx(s);
1046 }
1047 n->m_ext = m->m_ext;
1048 n->m_flags |= M_EXT;
1049 } else {
1050 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1051 }
1052
1053 m = m->m_next;
1054 }
1055 return top;
1056nospace:
1057 m_freem(top);
1058 MCFail++;
1059 return 0;
1060}
1061
1062/*
1063 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1064 * continuing for "len" bytes, into the indicated buffer.
1065 */
1066void
8a3125c6 1067m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1068{
1fd87d54 1069 unsigned count;
984263bc
MD
1070
1071 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1072 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1073 while (off > 0) {
1074 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1075 if (off < m->m_len)
1076 break;
1077 off -= m->m_len;
1078 m = m->m_next;
1079 }
1080 while (len > 0) {
1081 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1082 count = min(m->m_len - off, len);
1083 bcopy(mtod(m, caddr_t) + off, cp, count);
1084 len -= count;
1085 cp += count;
1086 off = 0;
1087 m = m->m_next;
1088 }
1089}
1090
1091/*
1092 * Copy a packet header mbuf chain into a completely new chain, including
1093 * copying any mbuf clusters. Use this instead of m_copypacket() when
1094 * you need a writable copy of an mbuf chain.
1095 */
1096struct mbuf *
8a3125c6 1097m_dup(struct mbuf *m, int how)
984263bc
MD
1098{
1099 struct mbuf **p, *top = NULL;
1100 int remain, moff, nsize;
1101
1102 /* Sanity check */
1103 if (m == NULL)
1104 return (0);
1105 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1106
1107 /* While there's more data, get a new mbuf, tack it on, and fill it */
1108 remain = m->m_pkthdr.len;
1109 moff = 0;
1110 p = &top;
1111 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1112 struct mbuf *n;
1113
1114 /* Get the next new mbuf */
1115 MGET(n, how, m->m_type);
1116 if (n == NULL)
1117 goto nospace;
1118 if (top == NULL) { /* first one, must be PKTHDR */
1119 if (!m_dup_pkthdr(n, m, how))
1120 goto nospace;
1121 nsize = MHLEN;
1122 } else /* not the first one */
1123 nsize = MLEN;
1124 if (remain >= MINCLSIZE) {
1125 MCLGET(n, how);
1126 if ((n->m_flags & M_EXT) == 0) {
1127 (void)m_free(n);
1128 goto nospace;
1129 }
1130 nsize = MCLBYTES;
1131 }
1132 n->m_len = 0;
1133
1134 /* Link it into the new chain */
1135 *p = n;
1136 p = &n->m_next;
1137
1138 /* Copy data from original mbuf(s) into new mbuf */
1139 while (n->m_len < nsize && m != NULL) {
1140 int chunk = min(nsize - n->m_len, m->m_len - moff);
1141
1142 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1143 moff += chunk;
1144 n->m_len += chunk;
1145 remain -= chunk;
1146 if (moff == m->m_len) {
1147 m = m->m_next;
1148 moff = 0;
1149 }
1150 }
1151
1152 /* Check correct total mbuf length */
1153 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1154 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1155 }
1156 return (top);
1157
1158nospace:
1159 m_freem(top);
1160 MCFail++;
1161 return (0);
1162}
1163
1164/*
1165 * Concatenate mbuf chain n to m.
1166 * Both chains must be of the same type (e.g. MT_DATA).
1167 * Any m_pkthdr is not updated.
1168 */
1169void
8a3125c6 1170m_cat(struct mbuf *m, struct mbuf *n)
984263bc
MD
1171{
1172 while (m->m_next)
1173 m = m->m_next;
1174 while (n) {
1175 if (m->m_flags & M_EXT ||
1176 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1177 /* just join the two chains */
1178 m->m_next = n;
1179 return;
1180 }
1181 /* splat the data from one into the other */
1182 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1183 (u_int)n->m_len);
1184 m->m_len += n->m_len;
1185 n = m_free(n);
1186 }
1187}
1188
1189void
8a3125c6 1190m_adj(struct mbuf *mp, int req_len)
984263bc 1191{
1fd87d54
RG
1192 int len = req_len;
1193 struct mbuf *m;
1194 int count;
984263bc
MD
1195
1196 if ((m = mp) == NULL)
1197 return;
1198 if (len >= 0) {
1199 /*
1200 * Trim from head.
1201 */
1202 while (m != NULL && len > 0) {
1203 if (m->m_len <= len) {
1204 len -= m->m_len;
1205 m->m_len = 0;
1206 m = m->m_next;
1207 } else {
1208 m->m_len -= len;
1209 m->m_data += len;
1210 len = 0;
1211 }
1212 }
1213 m = mp;
1214 if (mp->m_flags & M_PKTHDR)
1215 m->m_pkthdr.len -= (req_len - len);
1216 } else {
1217 /*
1218 * Trim from tail. Scan the mbuf chain,
1219 * calculating its length and finding the last mbuf.
1220 * If the adjustment only affects this mbuf, then just
1221 * adjust and return. Otherwise, rescan and truncate
1222 * after the remaining size.
1223 */
1224 len = -len;
1225 count = 0;
1226 for (;;) {
1227 count += m->m_len;
1228 if (m->m_next == (struct mbuf *)0)
1229 break;
1230 m = m->m_next;
1231 }
1232 if (m->m_len >= len) {
1233 m->m_len -= len;
1234 if (mp->m_flags & M_PKTHDR)
1235 mp->m_pkthdr.len -= len;
1236 return;
1237 }
1238 count -= len;
1239 if (count < 0)
1240 count = 0;
1241 /*
1242 * Correct length for chain is "count".
1243 * Find the mbuf with last data, adjust its length,
1244 * and toss data from remaining mbufs on chain.
1245 */
1246 m = mp;
1247 if (m->m_flags & M_PKTHDR)
1248 m->m_pkthdr.len = count;
1249 for (; m; m = m->m_next) {
1250 if (m->m_len >= count) {
1251 m->m_len = count;
1252 break;
1253 }
1254 count -= m->m_len;
1255 }
1256 while (m->m_next)
1257 (m = m->m_next) ->m_len = 0;
1258 }
1259}
1260
1261/*
1262 * Rearange an mbuf chain so that len bytes are contiguous
1263 * and in the data area of an mbuf (so that mtod and dtom
1264 * will work for a structure of size len). Returns the resulting
1265 * mbuf chain on success, frees it and returns null on failure.
1266 * If there is room, it will add up to max_protohdr-len extra bytes to the
1267 * contiguous region in an attempt to avoid being called next time.
1268 */
1269#define MPFail (mbstat.m_mpfail)
1270
1271struct mbuf *
8a3125c6 1272m_pullup(struct mbuf *n, int len)
984263bc 1273{
1fd87d54
RG
1274 struct mbuf *m;
1275 int count;
984263bc
MD
1276 int space;
1277
1278 /*
1279 * If first mbuf has no cluster, and has room for len bytes
1280 * without shifting current data, pullup into it,
1281 * otherwise allocate a new mbuf to prepend to the chain.
1282 */
1283 if ((n->m_flags & M_EXT) == 0 &&
1284 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1285 if (n->m_len >= len)
1286 return (n);
1287 m = n;
1288 n = n->m_next;
1289 len -= m->m_len;
1290 } else {
1291 if (len > MHLEN)
1292 goto bad;
74f1caca 1293 MGET(m, MB_DONTWAIT, n->m_type);
984263bc
MD
1294 if (m == 0)
1295 goto bad;
1296 m->m_len = 0;
1297 if (n->m_flags & M_PKTHDR)
1298 M_MOVE_PKTHDR(m, n);
1299 }
1300 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1301 do {
1302 count = min(min(max(len, max_protohdr), space), n->m_len);
1303 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1304 (unsigned)count);
1305 len -= count;
1306 m->m_len += count;
1307 n->m_len -= count;
1308 space -= count;
1309 if (n->m_len)
1310 n->m_data += count;
1311 else
1312 n = m_free(n);
1313 } while (len > 0 && n);
1314 if (len > 0) {
1315 (void) m_free(m);
1316 goto bad;
1317 }
1318 m->m_next = n;
1319 return (m);
1320bad:
1321 m_freem(n);
1322 MPFail++;
1323 return (0);
1324}
1325
1326/*
1327 * Partition an mbuf chain in two pieces, returning the tail --
1328 * all but the first len0 bytes. In case of failure, it returns NULL and
1329 * attempts to restore the chain to its original state.
1330 *
1331 * Note that the resulting mbufs might be read-only, because the new
1332 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1333 * the "breaking point" happens to lie within a cluster mbuf. Use the
1334 * M_WRITABLE() macro to check for this case.
1335 */
1336struct mbuf *
8a3125c6 1337m_split(struct mbuf *m0, int len0, int wait)
984263bc 1338{
1fd87d54 1339 struct mbuf *m, *n;
984263bc
MD
1340 unsigned len = len0, remain;
1341
1342 for (m = m0; m && len > m->m_len; m = m->m_next)
1343 len -= m->m_len;
1344 if (m == 0)
1345 return (0);
1346 remain = m->m_len - len;
1347 if (m0->m_flags & M_PKTHDR) {
1348 MGETHDR(n, wait, m0->m_type);
1349 if (n == 0)
1350 return (0);
1351 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1352 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1353 m0->m_pkthdr.len = len0;
1354 if (m->m_flags & M_EXT)
1355 goto extpacket;
1356 if (remain > MHLEN) {
1357 /* m can't be the lead packet */
1358 MH_ALIGN(n, 0);
1359 n->m_next = m_split(m, len, wait);
1360 if (n->m_next == 0) {
1361 (void) m_free(n);
1362 return (0);
1363 } else {
1364 n->m_len = 0;
1365 return (n);
1366 }
1367 } else
1368 MH_ALIGN(n, remain);
1369 } else if (remain == 0) {
1370 n = m->m_next;
1371 m->m_next = 0;
1372 return (n);
1373 } else {
1374 MGET(n, wait, m->m_type);
1375 if (n == 0)
1376 return (0);
1377 M_ALIGN(n, remain);
1378 }
1379extpacket:
1380 if (m->m_flags & M_EXT) {
1381 n->m_flags |= M_EXT;
1382 n->m_ext = m->m_ext;
1383 if (m->m_ext.ext_ref == NULL)
1384 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1385 else {
1386 int s = splimp();
1387
1388 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1389 m->m_ext.ext_size);
1390 splx(s);
1391 }
1392 n->m_data = m->m_data + len;
1393 } else {
1394 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1395 }
1396 n->m_len = remain;
1397 m->m_len = len;
1398 n->m_next = m->m_next;
1399 m->m_next = 0;
1400 return (n);
1401}
1402/*
1403 * Routine to copy from device local memory into mbufs.
1404 */
1405struct mbuf *
8a3125c6
MD
1406m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
1407 void (*copy) (char *from, caddr_t to, u_int len))
984263bc 1408{
1fd87d54 1409 struct mbuf *m;
984263bc 1410 struct mbuf *top = 0, **mp = &top;
1fd87d54
RG
1411 int off = off0, len;
1412 char *cp;
984263bc
MD
1413 char *epkt;
1414
1415 cp = buf;
1416 epkt = cp + totlen;
1417 if (off) {
1418 cp += off + 2 * sizeof(u_short);
1419 totlen -= 2 * sizeof(u_short);
1420 }
74f1caca 1421 MGETHDR(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1422 if (m == 0)
1423 return (0);
1424 m->m_pkthdr.rcvif = ifp;
1425 m->m_pkthdr.len = totlen;
1426 m->m_len = MHLEN;
1427
1428 while (totlen > 0) {
1429 if (top) {
74f1caca 1430 MGET(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1431 if (m == 0) {
1432 m_freem(top);
1433 return (0);
1434 }
1435 m->m_len = MLEN;
1436 }
1437 len = min(totlen, epkt - cp);
1438 if (len >= MINCLSIZE) {
74f1caca 1439 MCLGET(m, MB_DONTWAIT);
984263bc
MD
1440 if (m->m_flags & M_EXT)
1441 m->m_len = len = min(len, MCLBYTES);
1442 else
1443 len = m->m_len;
1444 } else {
1445 /*
1446 * Place initial small packet/header at end of mbuf.
1447 */
1448 if (len < m->m_len) {
1449 if (top == 0 && len + max_linkhdr <= m->m_len)
1450 m->m_data += max_linkhdr;
1451 m->m_len = len;
1452 } else
1453 len = m->m_len;
1454 }
1455 if (copy)
1456 copy(cp, mtod(m, caddr_t), (unsigned)len);
1457 else
1458 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1459 cp += len;
1460 *mp = m;
1461 mp = &m->m_next;
1462 totlen -= len;
1463 if (cp == epkt)
1464 cp = buf;
1465 }
1466 return (top);
1467}
1468
1469/*
1470 * Copy data from a buffer back into the indicated mbuf chain,
1471 * starting "off" bytes from the beginning, extending the mbuf
1472 * chain if necessary.
1473 */
1474void
8a3125c6 1475m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1476{
1fd87d54
RG
1477 int mlen;
1478 struct mbuf *m = m0, *n;
984263bc
MD
1479 int totlen = 0;
1480
1481 if (m0 == 0)
1482 return;
1483 while (off > (mlen = m->m_len)) {
1484 off -= mlen;
1485 totlen += mlen;
1486 if (m->m_next == 0) {
74f1caca 1487 n = m_getclr(MB_DONTWAIT, m->m_type);
984263bc
MD
1488 if (n == 0)
1489 goto out;
1490 n->m_len = min(MLEN, len + off);
1491 m->m_next = n;
1492 }
1493 m = m->m_next;
1494 }
1495 while (len > 0) {
1496 mlen = min (m->m_len - off, len);
1497 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1498 cp += mlen;
1499 len -= mlen;
1500 mlen += off;
1501 off = 0;
1502 totlen += mlen;
1503 if (len == 0)
1504 break;
1505 if (m->m_next == 0) {
74f1caca 1506 n = m_get(MB_DONTWAIT, m->m_type);
984263bc
MD
1507 if (n == 0)
1508 break;
1509 n->m_len = min(MLEN, len);
1510 m->m_next = n;
1511 }
1512 m = m->m_next;
1513 }
1514out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1515 m->m_pkthdr.len = totlen;
1516}
1517
1518void
1519m_print(const struct mbuf *m)
1520{
1521 int len;
1522 const struct mbuf *m2;
1523
1524 len = m->m_pkthdr.len;
1525 m2 = m;
1526 while (len) {
1527 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1528 len -= m2->m_len;
1529 m2 = m2->m_next;
1530 }
1531 return;
1532}
1533
1534/*
1535 * "Move" mbuf pkthdr from "from" to "to".
1536 * "from" must have M_PKTHDR set, and "to" must be empty.
1537 */
1538void
1539m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1540{
1541 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1542
1543 to->m_flags = from->m_flags & M_COPYFLAGS;
1544 to->m_data = to->m_pktdat;
1545 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1546 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1547 from->m_flags &= ~M_PKTHDR;
1548}
1549
1550/*
1551 * Duplicate "from"'s mbuf pkthdr in "to".
1552 * "from" must have M_PKTHDR set, and "to" must be empty.
1553 * In particular, this does a deep copy of the packet tags.
1554 */
1555int
f15db79e 1556m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc
MD
1557{
1558 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1559 if ((to->m_flags & M_EXT) == 0)
1560 to->m_data = to->m_pktdat;
1561 to->m_pkthdr = from->m_pkthdr;
1562 SLIST_INIT(&to->m_pkthdr.tags);
1563 return (m_tag_copy_chain(to, from, how));
1564}
1565
1566/*
1567 * Defragment a mbuf chain, returning the shortest possible
1568 * chain of mbufs and clusters. If allocation fails and
1569 * this cannot be completed, NULL will be returned, but
1570 * the passed in chain will be unchanged. Upon success,
1571 * the original chain will be freed, and the new chain
1572 * will be returned.
1573 *
1574 * If a non-packet header is passed in, the original
1575 * mbuf (chain?) will be returned unharmed.
1576 */
1577struct mbuf *
1578m_defrag(struct mbuf *m0, int how)
1579{
1580 struct mbuf *m_new = NULL, *m_final = NULL;
1581 int progress = 0, length;
1582
1583 if (!(m0->m_flags & M_PKTHDR))
1584 return (m0);
1585
1586#ifdef MBUF_STRESS_TEST
1587 if (m_defragrandomfailures) {
1588 int temp = arc4random() & 0xff;
1589 if (temp == 0xba)
1590 goto nospace;
1591 }
1592#endif
1593
1594 if (m0->m_pkthdr.len > MHLEN)
1595 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1596 else
1597 m_final = m_gethdr(how, MT_DATA);
1598
1599 if (m_final == NULL)
1600 goto nospace;
1601
1602 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1603 goto nospace;
1604
1605 m_new = m_final;
1606
1607 while (progress < m0->m_pkthdr.len) {
1608 length = m0->m_pkthdr.len - progress;
1609 if (length > MCLBYTES)
1610 length = MCLBYTES;
1611
1612 if (m_new == NULL) {
1613 if (length > MLEN)
1614 m_new = m_getcl(how, MT_DATA, 0);
1615 else
1616 m_new = m_get(how, MT_DATA);
1617 if (m_new == NULL)
1618 goto nospace;
1619 }
1620
1621 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1622 progress += length;
1623 m_new->m_len = length;
1624 if (m_new != m_final)
1625 m_cat(m_final, m_new);
1626 m_new = NULL;
1627 }
1628 if (m0->m_next == NULL)
1629 m_defraguseless++;
1630 m_freem(m0);
1631 m0 = m_final;
1632 m_defragpackets++;
1633 m_defragbytes += m0->m_pkthdr.len;
1634 return (m0);
1635nospace:
1636 m_defragfailure++;
1637 if (m_new)
1638 m_free(m_new);
1639 if (m_final)
1640 m_freem(m_final);
1641 return (NULL);
1642}
0c33f36d
JH
1643
1644/*
1645 * Move data from uio into mbufs.
1646 * A length of zero means copy the whole uio.
1647 */
1648struct mbuf *
1649m_uiomove(struct uio *uio, int wait, int len0)
1650{
1651 struct mbuf *head; /* result mbuf chain */
1652 struct mbuf *m; /* current working mbuf */
1653 struct mbuf **mp;
1654 int resid, datalen, error;
1655
1656 resid = (len0 == 0) ? uio->uio_resid : min(len0, uio->uio_resid);
1657
1658 head = NULL;
1659 mp = &head;
1660 do {
1661 if (resid > MHLEN) {
1662 m = m_getcl(wait, MT_DATA, head == NULL ? M_PKTHDR : 0);
1663 if (m == NULL)
1664 goto failed;
1665 if (m->m_flags & M_PKTHDR)
1666 m->m_pkthdr.len = 0;
1667 } else {
1668 if (head == NULL) {
1669 MGETHDR(m, wait, MT_DATA);
1670 if (m == NULL)
1671 goto failed;
1672 m->m_pkthdr.len = 0;
1673 /* Leave room for protocol headers. */
1674 if (resid < MHLEN)
1675 MH_ALIGN(m, resid);
1676 } else {
1677 MGET(m, wait, MT_DATA);
1678 if (m == NULL)
1679 goto failed;
1680 }
1681 }
1682 datalen = min(MCLBYTES, resid);
1683 error = uiomove(mtod(m, caddr_t), datalen, uio);
1684 if (error) {
1685 m_free(m);
1686 goto failed;
1687 }
1688 m->m_len = datalen;
1689 *mp = m;
1690 mp = &m->m_next;
1691 head->m_pkthdr.len += datalen;
1692 resid -= datalen;
1693 } while (resid > 0);
1694
1695 return (head);
1696
1697failed:
1698 if (head)
1699 m_freem(head);
1700 return (NULL);
1701}