ms_cmd has changed to a union, update the test code.
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
0c33f36d 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
984263bc
MD
3 * Copyright (c) 1982, 1986, 1988, 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
35 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
74f1caca 36 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.17 2004/06/02 14:42:57 eirikn Exp $
984263bc
MD
37 */
38
39#include "opt_param.h"
40#include "opt_mbuf_stress_test.h"
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/kernel.h>
46#include <sys/sysctl.h>
47#include <sys/domain.h>
48#include <sys/protosw.h>
0c33f36d 49#include <sys/uio.h>
ef0fdad1 50#include <sys/thread.h>
a2a5ad0d 51#include <sys/globaldata.h>
984263bc
MD
52
53#include <vm/vm.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_extern.h>
56
57#ifdef INVARIANTS
58#include <machine/cpu.h>
59#endif
60
402ed7e1 61static void mbinit (void *);
984263bc
MD
62SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
63
64struct mbuf *mbutl;
cb840899 65struct mbuf *mbute;
984263bc
MD
66char *mclrefcnt;
67struct mbstat mbstat;
68u_long mbtypes[MT_NTYPES];
69struct mbuf *mmbfree;
70union mcluster *mclfree;
71int max_linkhdr;
72int max_protohdr;
73int max_hdr;
74int max_datalen;
75int m_defragpackets;
76int m_defragbytes;
77int m_defraguseless;
78int m_defragfailure;
79#ifdef MBUF_STRESS_TEST
80int m_defragrandomfailures;
81#endif
82
83int nmbclusters;
84int nmbufs;
85u_int m_mballoc_wid = 0;
86u_int m_clalloc_wid = 0;
87
984263bc
MD
88SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
89 &max_linkhdr, 0, "");
90SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
91 &max_protohdr, 0, "");
92SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
93SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
94 &max_datalen, 0, "");
95SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
96 &mbuf_wait, 0, "");
97SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
98SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
99 sizeof(mbtypes), "LU", "");
100SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
101 &nmbclusters, 0, "Maximum number of mbuf clusters available");
102SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
103 "Maximum number of mbufs available");
104SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
105 &m_defragpackets, 0, "");
106SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
107 &m_defragbytes, 0, "");
108SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
109 &m_defraguseless, 0, "");
110SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
111 &m_defragfailure, 0, "");
112#ifdef MBUF_STRESS_TEST
113SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
114 &m_defragrandomfailures, 0, "");
115#endif
116
402ed7e1 117static void m_reclaim (void);
984263bc
MD
118
119#ifndef NMBCLUSTERS
120#define NMBCLUSTERS (512 + maxusers * 16)
121#endif
122#ifndef NMBUFS
123#define NMBUFS (nmbclusters * 4)
124#endif
125
126/*
127 * Perform sanity checks of tunables declared above.
128 */
129static void
130tunable_mbinit(void *dummy)
131{
132
133 /*
134 * This has to be done before VM init.
135 */
136 nmbclusters = NMBCLUSTERS;
137 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
138 nmbufs = NMBUFS;
139 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
140 /* Sanity checks */
141 if (nmbufs < nmbclusters * 2)
142 nmbufs = nmbclusters * 2;
143
144 return;
145}
146SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
147
148/* "number of clusters of pages" */
149#define NCL_INIT 1
150
151#define NMB_INIT 16
152
153/* ARGSUSED*/
154static void
155mbinit(dummy)
156 void *dummy;
157{
158 int s;
159
160 mmbfree = NULL; mclfree = NULL;
161 mbstat.m_msize = MSIZE;
162 mbstat.m_mclbytes = MCLBYTES;
163 mbstat.m_minclsize = MINCLSIZE;
164 mbstat.m_mlen = MLEN;
165 mbstat.m_mhlen = MHLEN;
166
167 s = splimp();
74f1caca 168 if (m_mballoc(NMB_INIT, MB_DONTWAIT) == 0)
984263bc
MD
169 goto bad;
170#if MCLBYTES <= PAGE_SIZE
74f1caca 171 if (m_clalloc(NCL_INIT, MB_DONTWAIT) == 0)
984263bc
MD
172 goto bad;
173#else
174 /* It's OK to call contigmalloc in this context. */
74f1caca 175 if (m_clalloc(16, MB_WAIT) == 0)
984263bc
MD
176 goto bad;
177#endif
178 splx(s);
179 return;
180bad:
181 panic("mbinit");
182}
183
184/*
185 * Allocate at least nmb mbufs and place on mbuf free list.
186 * Must be called at splimp.
187 */
188/* ARGSUSED */
189int
190m_mballoc(nmb, how)
1fd87d54 191 int nmb;
984263bc
MD
192 int how;
193{
1fd87d54
RG
194 caddr_t p;
195 int i;
984263bc
MD
196 int nbytes;
197
198 /*
199 * If we've hit the mbuf limit, stop allocating from mb_map,
200 * (or trying to) in order to avoid dipping into the section of
201 * mb_map which we've "reserved" for clusters.
202 */
203 if ((nmb + mbstat.m_mbufs) > nmbufs)
204 return (0);
205
206 /*
207 * Once we run out of map space, it will be impossible to get
208 * any more (nothing is ever freed back to the map)
209 * -- however you are not dead as m_reclaim might
210 * still be able to free a substantial amount of space.
211 *
212 * XXX Furthermore, we can also work with "recycled" mbufs (when
74f1caca 213 * we're calling with MB_WAIT the sleep procedure will be woken
984263bc
MD
214 * up when an mbuf is freed. See m_mballoc_wait()).
215 */
216 if (mb_map_full)
217 return (0);
218
219 nbytes = round_page(nmb * MSIZE);
220 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
74f1caca 221 if (p == 0 && how == MB_WAIT) {
984263bc
MD
222 mbstat.m_wait++;
223 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
224 }
225
226 /*
227 * Either the map is now full, or `how' is M_NOWAIT and there
228 * are no pages left.
229 */
230 if (p == NULL)
231 return (0);
232
233 nmb = nbytes / MSIZE;
234 for (i = 0; i < nmb; i++) {
235 ((struct mbuf *)p)->m_next = mmbfree;
236 mmbfree = (struct mbuf *)p;
237 p += MSIZE;
238 }
239 mbstat.m_mbufs += nmb;
240 mbtypes[MT_FREE] += nmb;
241 return (1);
242}
243
244/*
245 * Once the mb_map has been exhausted and if the call to the allocation macros
74f1caca 246 * (or, in some cases, functions) is with MB_WAIT, then it is necessary to rely
984263bc
MD
247 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
248 * designated (mbuf_wait) time.
249 */
250struct mbuf *
251m_mballoc_wait(int caller, int type)
252{
253 struct mbuf *p;
254 int s;
255
256 s = splimp();
257 m_mballoc_wid++;
377d4740 258 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
259 m_mballoc_wid--;
260 splx(s);
261
262 /*
263 * Now that we (think) that we've got something, we will redo an
264 * MGET, but avoid getting into another instance of m_mballoc_wait()
265 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
266 * this way, purposely, in the [unlikely] case that an mbuf was
267 * freed but the sleep was not awakened in time.
268 */
269 p = NULL;
270 switch (caller) {
271 case MGET_C:
74f1caca 272 MGET(p, MB_DONTWAIT, type);
984263bc
MD
273 break;
274 case MGETHDR_C:
74f1caca 275 MGETHDR(p, MB_DONTWAIT, type);
984263bc
MD
276 break;
277 default:
278 panic("m_mballoc_wait: invalid caller (%d)", caller);
279 }
280
281 s = splimp();
282 if (p != NULL) { /* We waited and got something... */
283 mbstat.m_wait++;
284 /* Wake up another if we have more free. */
285 if (mmbfree != NULL)
286 MMBWAKEUP();
287 }
288 splx(s);
289 return (p);
290}
291
292#if MCLBYTES > PAGE_SIZE
293static int i_want_my_mcl;
294
295static void
296kproc_mclalloc(void)
297{
298 int status;
299
300 while (1) {
377d4740 301 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
984263bc
MD
302
303 for (; i_want_my_mcl; i_want_my_mcl--) {
74f1caca 304 if (m_clalloc(1, MB_WAIT) == 0)
984263bc
MD
305 printf("m_clalloc failed even in process context!\n");
306 }
307 }
308}
309
bc6dffab 310static struct thread *mclallocthread;
984263bc
MD
311static struct kproc_desc mclalloc_kp = {
312 "mclalloc",
313 kproc_mclalloc,
bc6dffab 314 &mclallocthread
984263bc 315};
bc6dffab 316SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
984263bc
MD
317 &mclalloc_kp);
318#endif
319
320/*
321 * Allocate some number of mbuf clusters
322 * and place on cluster free list.
323 * Must be called at splimp.
324 */
325/* ARGSUSED */
326int
327m_clalloc(ncl, how)
1fd87d54 328 int ncl;
984263bc
MD
329 int how;
330{
1fd87d54
RG
331 caddr_t p;
332 int i;
984263bc
MD
333 int npg;
334
335 /*
336 * If we've hit the mcluster number limit, stop allocating from
337 * mb_map, (or trying to) in order to avoid dipping into the section
338 * of mb_map which we've "reserved" for mbufs.
339 */
340 if ((ncl + mbstat.m_clusters) > nmbclusters)
341 goto m_clalloc_fail;
342
343 /*
344 * Once we run out of map space, it will be impossible
345 * to get any more (nothing is ever freed back to the
346 * map). From this point on, we solely rely on freed
347 * mclusters.
348 */
349 if (mb_map_full)
350 goto m_clalloc_fail;
351
352#if MCLBYTES > PAGE_SIZE
74f1caca 353 if (how != MB_WAIT) {
984263bc
MD
354 i_want_my_mcl += ncl;
355 wakeup(&i_want_my_mcl);
356 mbstat.m_wait++;
357 p = 0;
358 } else {
7fa7744b 359 p = contigmalloc_map(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
984263bc
MD
360 ~0ul, PAGE_SIZE, 0, mb_map);
361 }
362#else
363 npg = ncl;
364 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
74f1caca 365 how != MB_WAIT ? M_NOWAIT : M_WAITOK);
984263bc
MD
366 ncl = ncl * PAGE_SIZE / MCLBYTES;
367#endif
368 /*
369 * Either the map is now full, or `how' is M_NOWAIT and there
370 * are no pages left.
371 */
372 if (p == NULL) {
373 static int last_report ; /* when we did that (in ticks) */
374m_clalloc_fail:
375 mbstat.m_drops++;
376 if (ticks < last_report || (ticks - last_report) >= hz) {
377 last_report = ticks;
378 printf("All mbuf clusters exhausted, please see tuning(7).\n");
379 }
380 return (0);
381 }
382
383 for (i = 0; i < ncl; i++) {
384 ((union mcluster *)p)->mcl_next = mclfree;
385 mclfree = (union mcluster *)p;
386 p += MCLBYTES;
387 mbstat.m_clfree++;
388 }
389 mbstat.m_clusters += ncl;
390 return (1);
391}
392
393/*
394 * Once the mb_map submap has been exhausted and the allocation is called with
74f1caca 395 * MB_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
984263bc
MD
396 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
397 * due to sudden mcluster availability.
398 */
399caddr_t
400m_clalloc_wait(void)
401{
402 caddr_t p;
403 int s;
404
984263bc 405 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
ef0fdad1 406 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
407
408 /* Sleep until something's available or until we expire. */
409 m_clalloc_wid++;
377d4740 410 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
411 m_clalloc_wid--;
412
413 /*
414 * Now that we (think) that we've got something, we will redo and
415 * MGET, but avoid getting into another instance of m_clalloc_wait()
416 */
74f1caca 417 p = m_mclalloc(MB_DONTWAIT);
984263bc
MD
418
419 s = splimp();
420 if (p != NULL) { /* We waited and got something... */
421 mbstat.m_wait++;
422 /* Wake up another if we have more free. */
423 if (mclfree != NULL)
424 MCLWAKEUP();
425 }
426
427 splx(s);
428 return (p);
429}
430
431/*
432 * When MGET fails, ask protocols to free space when short of memory,
433 * then re-attempt to allocate an mbuf.
434 */
435struct mbuf *
436m_retry(i, t)
437 int i, t;
438{
12496bdf
MD
439 struct mbuf *m;
440 int ms;
984263bc
MD
441
442 /*
443 * Must only do the reclaim if not in an interrupt context.
444 */
74f1caca 445 if (i == MB_WAIT) {
ef0fdad1 446 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 447 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
448 m_reclaim();
449 }
450
12496bdf
MD
451 ms = splimp();
452 if (mmbfree == NULL)
453 (void)m_mballoc(1, i);
454 m = mmbfree;
455 if (m != NULL) {
456 mmbfree = m->m_next;
457 mbtypes[MT_FREE]--;
458 m->m_type = t;
459 mbtypes[t]++;
460 m->m_next = NULL;
461 m->m_nextpkt = NULL;
462 m->m_data = m->m_dat;
463 m->m_flags = 0;
464 splx(ms);
984263bc 465 mbstat.m_wait++;
12496bdf 466 } else {
984263bc 467 static int last_report ; /* when we did that (in ticks) */
377d4740 468
12496bdf 469 splx(ms);
984263bc
MD
470 mbstat.m_drops++;
471 if (ticks < last_report || (ticks - last_report) >= hz) {
472 last_report = ticks;
473 printf("All mbufs exhausted, please see tuning(7).\n");
474 }
475 }
476
477 return (m);
478}
479
480/*
481 * As above; retry an MGETHDR.
482 */
483struct mbuf *
484m_retryhdr(i, t)
485 int i, t;
486{
12496bdf
MD
487 struct mbuf *m;
488 int ms;
984263bc
MD
489
490 /*
491 * Must only do the reclaim if not in an interrupt context.
492 */
74f1caca 493 if (i == MB_WAIT) {
ef0fdad1 494 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 495 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
496 m_reclaim();
497 }
498
12496bdf
MD
499 ms = splimp();
500 if (mmbfree == NULL)
501 (void)m_mballoc(1, i);
502 m = mmbfree;
503 if (m != NULL) {
504 mmbfree = m->m_next;
505 mbtypes[MT_FREE]--;
506 m->m_type = t;
507 mbtypes[t]++;
508 m->m_next = NULL;
509 m->m_nextpkt = NULL;
510 m->m_data = m->m_pktdat;
511 m->m_flags = M_PKTHDR;
512 m->m_pkthdr.rcvif = NULL;
513 SLIST_INIT(&m->m_pkthdr.tags);
514 m->m_pkthdr.csum_flags = 0;
515 splx(ms);
984263bc 516 mbstat.m_wait++;
12496bdf 517 } else {
984263bc 518 static int last_report ; /* when we did that (in ticks) */
12496bdf
MD
519
520 splx(ms);
984263bc
MD
521 mbstat.m_drops++;
522 if (ticks < last_report || (ticks - last_report) >= hz) {
523 last_report = ticks;
524 printf("All mbufs exhausted, please see tuning(7).\n");
525 }
526 }
527
528 return (m);
529}
530
531static void
532m_reclaim()
533{
1fd87d54
RG
534 struct domain *dp;
535 struct protosw *pr;
984263bc
MD
536 int s = splimp();
537
538 for (dp = domains; dp; dp = dp->dom_next)
539 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
540 if (pr->pr_drain)
541 (*pr->pr_drain)();
542 splx(s);
543 mbstat.m_drain++;
544}
545
546/*
547 * Space allocation routines.
548 * These are also available as macros
549 * for critical paths.
550 */
551struct mbuf *
552m_get(how, type)
553 int how, type;
554{
12496bdf
MD
555 struct mbuf *m;
556 int ms;
557
558 ms = splimp();
559 if (mmbfree == NULL)
560 (void)m_mballoc(1, how);
561 m = mmbfree;
562 if (m != NULL) {
563 mmbfree = m->m_next;
564 mbtypes[MT_FREE]--;
565 m->m_type = type;
566 mbtypes[type]++;
567 m->m_next = NULL;
568 m->m_nextpkt = NULL;
569 m->m_data = m->m_dat;
570 m->m_flags = 0;
571 splx(ms);
572 } else {
573 splx(ms);
574 m = m_retry(how, type);
74f1caca 575 if (m == NULL && how == MB_WAIT)
12496bdf
MD
576 m = m_mballoc_wait(MGET_C, type);
577 }
984263bc
MD
578 return (m);
579}
580
581struct mbuf *
582m_gethdr(how, type)
583 int how, type;
584{
12496bdf
MD
585 struct mbuf *m;
586 int ms;
587
588 ms = splimp();
589 if (mmbfree == NULL)
590 (void)m_mballoc(1, how);
591 m = mmbfree;
592 if (m != NULL) {
593 mmbfree = m->m_next;
594 mbtypes[MT_FREE]--;
595 m->m_type = type;
596 mbtypes[type]++;
597 m->m_next = NULL;
598 m->m_nextpkt = NULL;
599 m->m_data = m->m_pktdat;
600 m->m_flags = M_PKTHDR;
601 m->m_pkthdr.rcvif = NULL;
602 SLIST_INIT(&m->m_pkthdr.tags);
603 m->m_pkthdr.csum_flags = 0;
604 splx(ms);
605 } else {
606 splx(ms);
607 m = m_retryhdr(how, type);
74f1caca 608 if (m == NULL && how == MB_WAIT)
12496bdf
MD
609 m = m_mballoc_wait(MGETHDR_C, type);
610 }
984263bc
MD
611 return (m);
612}
613
614struct mbuf *
615m_getclr(how, type)
616 int how, type;
617{
1fd87d54 618 struct mbuf *m;
984263bc
MD
619
620 MGET(m, how, type);
621 if (m == 0)
622 return (0);
623 bzero(mtod(m, caddr_t), MLEN);
624 return (m);
625}
626
627/*
628 * m_getcl() returns an mbuf with an attached cluster.
629 * Because many network drivers use this kind of buffers a lot, it is
630 * convenient to keep a small pool of free buffers of this kind.
631 * Even a small size such as 10 gives about 10% improvement in the
632 * forwarding rate in a bridge or router.
633 * The size of this free list is controlled by the sysctl variable
634 * mcl_pool_max. The list is populated on m_freem(), and used in
635 * m_getcl() if elements are available.
636 */
637static struct mbuf *mcl_pool;
638static int mcl_pool_now;
639static int mcl_pool_max = 0;
640
641SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
642 "Maximum number of mbufs+cluster in free list");
643SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
644 "Current number of mbufs+cluster in free list");
645
646struct mbuf *
647m_getcl(int how, short type, int flags)
648{
649 int s = splimp();
650 struct mbuf *mp;
651
652 if (flags & M_PKTHDR) {
653 if (type == MT_DATA && mcl_pool) {
654 mp = mcl_pool;
655 mcl_pool = mp->m_nextpkt;
656 mcl_pool_now--;
657 splx(s);
658 mp->m_nextpkt = NULL;
659 mp->m_data = mp->m_ext.ext_buf;
660 mp->m_flags = M_PKTHDR|M_EXT;
661 mp->m_pkthdr.rcvif = NULL;
662 mp->m_pkthdr.csum_flags = 0;
663 return mp;
664 } else
665 MGETHDR(mp, how, type);
666 } else
667 MGET(mp, how, type);
668 if (mp) {
669 MCLGET(mp, how);
670 if ( (mp->m_flags & M_EXT) == 0) {
671 m_free(mp);
672 mp = NULL;
673 }
674 }
675 splx(s);
676 return mp;
677}
678
679/*
680 * struct mbuf *
681 * m_getm(m, len, how, type)
682 *
683 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
684 * best) and return a pointer to the top of the allocated chain. If m is
685 * non-null, then we assume that it is a single mbuf or an mbuf chain to
686 * which we want len bytes worth of mbufs and/or clusters attached, and so
687 * if we succeed in allocating it, we will just return a pointer to m.
688 *
689 * If we happen to fail at any point during the allocation, we will free
690 * up everything we have already allocated and return NULL.
691 *
692 */
693struct mbuf *
694m_getm(struct mbuf *m, int len, int how, int type)
695{
696 struct mbuf *top, *tail, *mp, *mtail = NULL;
697
698 KASSERT(len >= 0, ("len is < 0 in m_getm"));
699
700 MGET(mp, how, type);
701 if (mp == NULL)
702 return (NULL);
703 else if (len > MINCLSIZE) {
704 MCLGET(mp, how);
705 if ((mp->m_flags & M_EXT) == 0) {
706 m_free(mp);
707 return (NULL);
708 }
709 }
710 mp->m_len = 0;
711 len -= M_TRAILINGSPACE(mp);
712
713 if (m != NULL)
714 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
715 else
716 m = mp;
717
718 top = tail = mp;
719 while (len > 0) {
720 MGET(mp, how, type);
721 if (mp == NULL)
722 goto failed;
723
724 tail->m_next = mp;
725 tail = mp;
726 if (len > MINCLSIZE) {
727 MCLGET(mp, how);
728 if ((mp->m_flags & M_EXT) == 0)
729 goto failed;
730 }
731
732 mp->m_len = 0;
733 len -= M_TRAILINGSPACE(mp);
734 }
735
736 if (mtail != NULL)
737 mtail->m_next = top;
738 return (m);
739
740failed:
741 m_freem(top);
742 return (NULL);
743}
744
745/*
b6650ec0
MD
746 * m_mclalloc() - Allocates an mbuf cluster.
747 */
748caddr_t
749m_mclalloc(int how)
750{
751 caddr_t mp;
752 int s;
753
754 s = splimp();
755
756 if (mclfree == NULL)
757 m_clalloc(1, how);
758 mp = (caddr_t)mclfree;
759 if (mp != NULL) {
cb840899
MD
760 KKASSERT((struct mbuf *)mp >= mbutl &&
761 (struct mbuf *)mp < mbute);
b6650ec0
MD
762 mclrefcnt[mtocl(mp)]++;
763 mbstat.m_clfree--;
764 mclfree = ((union mcluster *)mp)->mcl_next;
765 splx(s);
766 return(mp);
767 }
768 splx(s);
74f1caca 769 if (how == MB_WAIT)
b6650ec0
MD
770 return(m_clalloc_wait());
771 return(NULL);
772}
773
774/*
775 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
776 */
777void
778m_mclget(struct mbuf *m, int how)
779{
780 m->m_ext.ext_buf = m_mclalloc(how);
781 if (m->m_ext.ext_buf != NULL) {
782 m->m_data = m->m_ext.ext_buf;
783 m->m_flags |= M_EXT;
784 m->m_ext.ext_free = NULL;
785 m->m_ext.ext_ref = NULL;
786 m->m_ext.ext_size = MCLBYTES;
787 }
788}
789
790static __inline void
791_m_mclfree(caddr_t data)
792{
793 union mcluster *mp = (union mcluster *)data;
794
795 KASSERT(mclrefcnt[mtocl(mp)] > 0, ("freeing free cluster"));
cb840899
MD
796 KKASSERT((struct mbuf *)mp >= mbutl &&
797 (struct mbuf *)mp < mbute);
b6650ec0
MD
798 if (--mclrefcnt[mtocl(mp)] == 0) {
799 mp->mcl_next = mclfree;
800 mclfree = mp;
801 mbstat.m_clfree++;
802 MCLWAKEUP();
803 }
804}
805
806void
807m_mclfree(caddr_t mp)
808{
809 int s = splimp();
810 _m_mclfree(mp);
811 splx(s);
812}
813
814/*
815 * m_free()
816 *
817 * Free a single mbuf and any associated external storage. The successor,
818 * if any, is returned.
984263bc 819 *
b6650ec0 820 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
821 * code does not call M_PREPEND properly.
822 * (example: call to bpf_mtap from drivers)
823 */
984263bc 824struct mbuf *
b6650ec0 825m_free(struct mbuf *m)
984263bc 826{
b6650ec0
MD
827 int s;
828 struct mbuf *n;
829
830 s = splimp();
831 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf"));
832 mbtypes[m->m_type]--;
833 if ((m->m_flags & M_PKTHDR) != 0)
834 m_tag_delete_chain(m, NULL);
835 if (m->m_flags & M_EXT) {
836 if (m->m_ext.ext_free != NULL) {
837 m->m_ext.ext_free(m->m_ext.ext_buf, m->m_ext.ext_size);
838 } else {
839 _m_mclfree(m->m_ext.ext_buf); /* inlined */
840 }
841 }
842 n = m->m_next;
843 m->m_type = MT_FREE;
844 mbtypes[MT_FREE]++;
845 m->m_next = mmbfree;
846 mmbfree = m;
847 MMBWAKEUP();
848 splx(s);
984263bc 849
984263bc
MD
850 return (n);
851}
852
853void
b6650ec0 854m_freem(struct mbuf *m)
984263bc
MD
855{
856 int s = splimp();
857
858 /*
859 * Try to keep a small pool of mbuf+cluster for quick use in
860 * device drivers. A good candidate is a M_PKTHDR buffer with
861 * only one cluster attached. Other mbufs, or those exceeding
862 * the pool size, are just m_free'd in the usual way.
863 * The following code makes sure that m_next, m_type,
864 * m_pkthdr.aux and m_ext.* are properly initialized.
865 * Other fields in the mbuf are initialized in m_getcl()
866 * upon allocation.
867 */
868 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
869 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
870 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
871 m_tag_delete_chain(m, NULL);
872 m->m_nextpkt = mcl_pool;
873 mcl_pool = m;
874 mcl_pool_now++;
875 } else {
876 while (m)
877 m = m_free(m);
878 }
879 splx(s);
880}
881
882/*
883 * Mbuffer utility routines.
884 */
885
886/*
887 * Lesser-used path for M_PREPEND:
888 * allocate new mbuf to prepend to chain,
889 * copy junk along.
890 */
891struct mbuf *
892m_prepend(m, len, how)
1fd87d54 893 struct mbuf *m;
984263bc
MD
894 int len, how;
895{
896 struct mbuf *mn;
897
898 MGET(mn, how, m->m_type);
899 if (mn == (struct mbuf *)NULL) {
900 m_freem(m);
901 return ((struct mbuf *)NULL);
902 }
903 if (m->m_flags & M_PKTHDR)
904 M_MOVE_PKTHDR(mn, m);
905 mn->m_next = m;
906 m = mn;
907 if (len < MHLEN)
908 MH_ALIGN(m, len);
909 m->m_len = len;
910 return (m);
911}
912
913/*
914 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
915 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 916 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
917 * Note that the copy is read-only, because clusters are not copied,
918 * only their reference counts are incremented.
919 */
920#define MCFail (mbstat.m_mcfail)
921
922struct mbuf *
923m_copym(m, off0, len, wait)
f15db79e 924 const struct mbuf *m;
984263bc 925 int off0, wait;
1fd87d54 926 int len;
984263bc 927{
1fd87d54
RG
928 struct mbuf *n, **np;
929 int off = off0;
984263bc
MD
930 struct mbuf *top;
931 int copyhdr = 0;
932
933 KASSERT(off >= 0, ("m_copym, negative off %d", off));
934 KASSERT(len >= 0, ("m_copym, negative len %d", len));
935 if (off == 0 && m->m_flags & M_PKTHDR)
936 copyhdr = 1;
937 while (off > 0) {
938 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
939 if (off < m->m_len)
940 break;
941 off -= m->m_len;
942 m = m->m_next;
943 }
944 np = &top;
945 top = 0;
946 while (len > 0) {
947 if (m == 0) {
948 KASSERT(len == M_COPYALL,
949 ("m_copym, length > size of mbuf chain"));
950 break;
951 }
952 MGET(n, wait, m->m_type);
953 *np = n;
954 if (n == 0)
955 goto nospace;
956 if (copyhdr) {
957 if (!m_dup_pkthdr(n, m, wait))
958 goto nospace;
959 if (len == M_COPYALL)
960 n->m_pkthdr.len -= off0;
961 else
962 n->m_pkthdr.len = len;
963 copyhdr = 0;
964 }
965 n->m_len = min(len, m->m_len - off);
966 if (m->m_flags & M_EXT) {
967 n->m_data = m->m_data + off;
968 if (m->m_ext.ext_ref == NULL) {
969 atomic_add_char(
970 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
971 } else {
972 int s = splimp();
973
974 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
975 m->m_ext.ext_size);
976 splx(s);
977 }
978 n->m_ext = m->m_ext;
979 n->m_flags |= M_EXT;
980 } else
981 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
982 (unsigned)n->m_len);
983 if (len != M_COPYALL)
984 len -= n->m_len;
985 off = 0;
986 m = m->m_next;
987 np = &n->m_next;
988 }
989 if (top == 0)
990 MCFail++;
991 return (top);
992nospace:
993 m_freem(top);
994 MCFail++;
995 return (0);
996}
997
998/*
999 * Copy an entire packet, including header (which must be present).
1000 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1001 * Note that the copy is read-only, because clusters are not copied,
1002 * only their reference counts are incremented.
1003 * Preserve alignment of the first mbuf so if the creator has left
1004 * some room at the beginning (e.g. for inserting protocol headers)
1005 * the copies also have the room available.
1006 */
1007struct mbuf *
1008m_copypacket(m, how)
1009 struct mbuf *m;
1010 int how;
1011{
1012 struct mbuf *top, *n, *o;
1013
1014 MGET(n, how, m->m_type);
1015 top = n;
1016 if (!n)
1017 goto nospace;
1018
1019 if (!m_dup_pkthdr(n, m, how))
1020 goto nospace;
1021 n->m_len = m->m_len;
1022 if (m->m_flags & M_EXT) {
1023 n->m_data = m->m_data;
1024 if (m->m_ext.ext_ref == NULL)
1025 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1026 else {
1027 int s = splimp();
1028
1029 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1030 m->m_ext.ext_size);
1031 splx(s);
1032 }
1033 n->m_ext = m->m_ext;
1034 n->m_flags |= M_EXT;
1035 } else {
1036 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1037 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1038 }
1039
1040 m = m->m_next;
1041 while (m) {
1042 MGET(o, how, m->m_type);
1043 if (!o)
1044 goto nospace;
1045
1046 n->m_next = o;
1047 n = n->m_next;
1048
1049 n->m_len = m->m_len;
1050 if (m->m_flags & M_EXT) {
1051 n->m_data = m->m_data;
1052 if (m->m_ext.ext_ref == NULL) {
1053 atomic_add_char(
1054 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1055 } else {
1056 int s = splimp();
1057
1058 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1059 m->m_ext.ext_size);
1060 splx(s);
1061 }
1062 n->m_ext = m->m_ext;
1063 n->m_flags |= M_EXT;
1064 } else {
1065 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1066 }
1067
1068 m = m->m_next;
1069 }
1070 return top;
1071nospace:
1072 m_freem(top);
1073 MCFail++;
1074 return 0;
1075}
1076
1077/*
1078 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1079 * continuing for "len" bytes, into the indicated buffer.
1080 */
1081void
1082m_copydata(m, off, len, cp)
f15db79e 1083 const struct mbuf *m;
1fd87d54
RG
1084 int off;
1085 int len;
984263bc
MD
1086 caddr_t cp;
1087{
1fd87d54 1088 unsigned count;
984263bc
MD
1089
1090 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1091 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1092 while (off > 0) {
1093 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1094 if (off < m->m_len)
1095 break;
1096 off -= m->m_len;
1097 m = m->m_next;
1098 }
1099 while (len > 0) {
1100 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1101 count = min(m->m_len - off, len);
1102 bcopy(mtod(m, caddr_t) + off, cp, count);
1103 len -= count;
1104 cp += count;
1105 off = 0;
1106 m = m->m_next;
1107 }
1108}
1109
1110/*
1111 * Copy a packet header mbuf chain into a completely new chain, including
1112 * copying any mbuf clusters. Use this instead of m_copypacket() when
1113 * you need a writable copy of an mbuf chain.
1114 */
1115struct mbuf *
1116m_dup(m, how)
1117 struct mbuf *m;
1118 int how;
1119{
1120 struct mbuf **p, *top = NULL;
1121 int remain, moff, nsize;
1122
1123 /* Sanity check */
1124 if (m == NULL)
1125 return (0);
1126 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1127
1128 /* While there's more data, get a new mbuf, tack it on, and fill it */
1129 remain = m->m_pkthdr.len;
1130 moff = 0;
1131 p = &top;
1132 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1133 struct mbuf *n;
1134
1135 /* Get the next new mbuf */
1136 MGET(n, how, m->m_type);
1137 if (n == NULL)
1138 goto nospace;
1139 if (top == NULL) { /* first one, must be PKTHDR */
1140 if (!m_dup_pkthdr(n, m, how))
1141 goto nospace;
1142 nsize = MHLEN;
1143 } else /* not the first one */
1144 nsize = MLEN;
1145 if (remain >= MINCLSIZE) {
1146 MCLGET(n, how);
1147 if ((n->m_flags & M_EXT) == 0) {
1148 (void)m_free(n);
1149 goto nospace;
1150 }
1151 nsize = MCLBYTES;
1152 }
1153 n->m_len = 0;
1154
1155 /* Link it into the new chain */
1156 *p = n;
1157 p = &n->m_next;
1158
1159 /* Copy data from original mbuf(s) into new mbuf */
1160 while (n->m_len < nsize && m != NULL) {
1161 int chunk = min(nsize - n->m_len, m->m_len - moff);
1162
1163 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1164 moff += chunk;
1165 n->m_len += chunk;
1166 remain -= chunk;
1167 if (moff == m->m_len) {
1168 m = m->m_next;
1169 moff = 0;
1170 }
1171 }
1172
1173 /* Check correct total mbuf length */
1174 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1175 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1176 }
1177 return (top);
1178
1179nospace:
1180 m_freem(top);
1181 MCFail++;
1182 return (0);
1183}
1184
1185/*
1186 * Concatenate mbuf chain n to m.
1187 * Both chains must be of the same type (e.g. MT_DATA).
1188 * Any m_pkthdr is not updated.
1189 */
1190void
1191m_cat(m, n)
1fd87d54 1192 struct mbuf *m, *n;
984263bc
MD
1193{
1194 while (m->m_next)
1195 m = m->m_next;
1196 while (n) {
1197 if (m->m_flags & M_EXT ||
1198 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1199 /* just join the two chains */
1200 m->m_next = n;
1201 return;
1202 }
1203 /* splat the data from one into the other */
1204 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1205 (u_int)n->m_len);
1206 m->m_len += n->m_len;
1207 n = m_free(n);
1208 }
1209}
1210
1211void
1212m_adj(mp, req_len)
1213 struct mbuf *mp;
1214 int req_len;
1215{
1fd87d54
RG
1216 int len = req_len;
1217 struct mbuf *m;
1218 int count;
984263bc
MD
1219
1220 if ((m = mp) == NULL)
1221 return;
1222 if (len >= 0) {
1223 /*
1224 * Trim from head.
1225 */
1226 while (m != NULL && len > 0) {
1227 if (m->m_len <= len) {
1228 len -= m->m_len;
1229 m->m_len = 0;
1230 m = m->m_next;
1231 } else {
1232 m->m_len -= len;
1233 m->m_data += len;
1234 len = 0;
1235 }
1236 }
1237 m = mp;
1238 if (mp->m_flags & M_PKTHDR)
1239 m->m_pkthdr.len -= (req_len - len);
1240 } else {
1241 /*
1242 * Trim from tail. Scan the mbuf chain,
1243 * calculating its length and finding the last mbuf.
1244 * If the adjustment only affects this mbuf, then just
1245 * adjust and return. Otherwise, rescan and truncate
1246 * after the remaining size.
1247 */
1248 len = -len;
1249 count = 0;
1250 for (;;) {
1251 count += m->m_len;
1252 if (m->m_next == (struct mbuf *)0)
1253 break;
1254 m = m->m_next;
1255 }
1256 if (m->m_len >= len) {
1257 m->m_len -= len;
1258 if (mp->m_flags & M_PKTHDR)
1259 mp->m_pkthdr.len -= len;
1260 return;
1261 }
1262 count -= len;
1263 if (count < 0)
1264 count = 0;
1265 /*
1266 * Correct length for chain is "count".
1267 * Find the mbuf with last data, adjust its length,
1268 * and toss data from remaining mbufs on chain.
1269 */
1270 m = mp;
1271 if (m->m_flags & M_PKTHDR)
1272 m->m_pkthdr.len = count;
1273 for (; m; m = m->m_next) {
1274 if (m->m_len >= count) {
1275 m->m_len = count;
1276 break;
1277 }
1278 count -= m->m_len;
1279 }
1280 while (m->m_next)
1281 (m = m->m_next) ->m_len = 0;
1282 }
1283}
1284
1285/*
1286 * Rearange an mbuf chain so that len bytes are contiguous
1287 * and in the data area of an mbuf (so that mtod and dtom
1288 * will work for a structure of size len). Returns the resulting
1289 * mbuf chain on success, frees it and returns null on failure.
1290 * If there is room, it will add up to max_protohdr-len extra bytes to the
1291 * contiguous region in an attempt to avoid being called next time.
1292 */
1293#define MPFail (mbstat.m_mpfail)
1294
1295struct mbuf *
1296m_pullup(n, len)
1fd87d54 1297 struct mbuf *n;
984263bc
MD
1298 int len;
1299{
1fd87d54
RG
1300 struct mbuf *m;
1301 int count;
984263bc
MD
1302 int space;
1303
1304 /*
1305 * If first mbuf has no cluster, and has room for len bytes
1306 * without shifting current data, pullup into it,
1307 * otherwise allocate a new mbuf to prepend to the chain.
1308 */
1309 if ((n->m_flags & M_EXT) == 0 &&
1310 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1311 if (n->m_len >= len)
1312 return (n);
1313 m = n;
1314 n = n->m_next;
1315 len -= m->m_len;
1316 } else {
1317 if (len > MHLEN)
1318 goto bad;
74f1caca 1319 MGET(m, MB_DONTWAIT, n->m_type);
984263bc
MD
1320 if (m == 0)
1321 goto bad;
1322 m->m_len = 0;
1323 if (n->m_flags & M_PKTHDR)
1324 M_MOVE_PKTHDR(m, n);
1325 }
1326 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1327 do {
1328 count = min(min(max(len, max_protohdr), space), n->m_len);
1329 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1330 (unsigned)count);
1331 len -= count;
1332 m->m_len += count;
1333 n->m_len -= count;
1334 space -= count;
1335 if (n->m_len)
1336 n->m_data += count;
1337 else
1338 n = m_free(n);
1339 } while (len > 0 && n);
1340 if (len > 0) {
1341 (void) m_free(m);
1342 goto bad;
1343 }
1344 m->m_next = n;
1345 return (m);
1346bad:
1347 m_freem(n);
1348 MPFail++;
1349 return (0);
1350}
1351
1352/*
1353 * Partition an mbuf chain in two pieces, returning the tail --
1354 * all but the first len0 bytes. In case of failure, it returns NULL and
1355 * attempts to restore the chain to its original state.
1356 *
1357 * Note that the resulting mbufs might be read-only, because the new
1358 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1359 * the "breaking point" happens to lie within a cluster mbuf. Use the
1360 * M_WRITABLE() macro to check for this case.
1361 */
1362struct mbuf *
1363m_split(m0, len0, wait)
1fd87d54 1364 struct mbuf *m0;
984263bc
MD
1365 int len0, wait;
1366{
1fd87d54 1367 struct mbuf *m, *n;
984263bc
MD
1368 unsigned len = len0, remain;
1369
1370 for (m = m0; m && len > m->m_len; m = m->m_next)
1371 len -= m->m_len;
1372 if (m == 0)
1373 return (0);
1374 remain = m->m_len - len;
1375 if (m0->m_flags & M_PKTHDR) {
1376 MGETHDR(n, wait, m0->m_type);
1377 if (n == 0)
1378 return (0);
1379 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1380 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1381 m0->m_pkthdr.len = len0;
1382 if (m->m_flags & M_EXT)
1383 goto extpacket;
1384 if (remain > MHLEN) {
1385 /* m can't be the lead packet */
1386 MH_ALIGN(n, 0);
1387 n->m_next = m_split(m, len, wait);
1388 if (n->m_next == 0) {
1389 (void) m_free(n);
1390 return (0);
1391 } else {
1392 n->m_len = 0;
1393 return (n);
1394 }
1395 } else
1396 MH_ALIGN(n, remain);
1397 } else if (remain == 0) {
1398 n = m->m_next;
1399 m->m_next = 0;
1400 return (n);
1401 } else {
1402 MGET(n, wait, m->m_type);
1403 if (n == 0)
1404 return (0);
1405 M_ALIGN(n, remain);
1406 }
1407extpacket:
1408 if (m->m_flags & M_EXT) {
1409 n->m_flags |= M_EXT;
1410 n->m_ext = m->m_ext;
1411 if (m->m_ext.ext_ref == NULL)
1412 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1413 else {
1414 int s = splimp();
1415
1416 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1417 m->m_ext.ext_size);
1418 splx(s);
1419 }
1420 n->m_data = m->m_data + len;
1421 } else {
1422 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1423 }
1424 n->m_len = remain;
1425 m->m_len = len;
1426 n->m_next = m->m_next;
1427 m->m_next = 0;
1428 return (n);
1429}
1430/*
1431 * Routine to copy from device local memory into mbufs.
1432 */
1433struct mbuf *
1434m_devget(buf, totlen, off0, ifp, copy)
1435 char *buf;
1436 int totlen, off0;
1437 struct ifnet *ifp;
402ed7e1 1438 void (*copy) (char *from, caddr_t to, u_int len);
984263bc 1439{
1fd87d54 1440 struct mbuf *m;
984263bc 1441 struct mbuf *top = 0, **mp = &top;
1fd87d54
RG
1442 int off = off0, len;
1443 char *cp;
984263bc
MD
1444 char *epkt;
1445
1446 cp = buf;
1447 epkt = cp + totlen;
1448 if (off) {
1449 cp += off + 2 * sizeof(u_short);
1450 totlen -= 2 * sizeof(u_short);
1451 }
74f1caca 1452 MGETHDR(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1453 if (m == 0)
1454 return (0);
1455 m->m_pkthdr.rcvif = ifp;
1456 m->m_pkthdr.len = totlen;
1457 m->m_len = MHLEN;
1458
1459 while (totlen > 0) {
1460 if (top) {
74f1caca 1461 MGET(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1462 if (m == 0) {
1463 m_freem(top);
1464 return (0);
1465 }
1466 m->m_len = MLEN;
1467 }
1468 len = min(totlen, epkt - cp);
1469 if (len >= MINCLSIZE) {
74f1caca 1470 MCLGET(m, MB_DONTWAIT);
984263bc
MD
1471 if (m->m_flags & M_EXT)
1472 m->m_len = len = min(len, MCLBYTES);
1473 else
1474 len = m->m_len;
1475 } else {
1476 /*
1477 * Place initial small packet/header at end of mbuf.
1478 */
1479 if (len < m->m_len) {
1480 if (top == 0 && len + max_linkhdr <= m->m_len)
1481 m->m_data += max_linkhdr;
1482 m->m_len = len;
1483 } else
1484 len = m->m_len;
1485 }
1486 if (copy)
1487 copy(cp, mtod(m, caddr_t), (unsigned)len);
1488 else
1489 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1490 cp += len;
1491 *mp = m;
1492 mp = &m->m_next;
1493 totlen -= len;
1494 if (cp == epkt)
1495 cp = buf;
1496 }
1497 return (top);
1498}
1499
1500/*
1501 * Copy data from a buffer back into the indicated mbuf chain,
1502 * starting "off" bytes from the beginning, extending the mbuf
1503 * chain if necessary.
1504 */
1505void
1506m_copyback(m0, off, len, cp)
1507 struct mbuf *m0;
1fd87d54
RG
1508 int off;
1509 int len;
984263bc
MD
1510 caddr_t cp;
1511{
1fd87d54
RG
1512 int mlen;
1513 struct mbuf *m = m0, *n;
984263bc
MD
1514 int totlen = 0;
1515
1516 if (m0 == 0)
1517 return;
1518 while (off > (mlen = m->m_len)) {
1519 off -= mlen;
1520 totlen += mlen;
1521 if (m->m_next == 0) {
74f1caca 1522 n = m_getclr(MB_DONTWAIT, m->m_type);
984263bc
MD
1523 if (n == 0)
1524 goto out;
1525 n->m_len = min(MLEN, len + off);
1526 m->m_next = n;
1527 }
1528 m = m->m_next;
1529 }
1530 while (len > 0) {
1531 mlen = min (m->m_len - off, len);
1532 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1533 cp += mlen;
1534 len -= mlen;
1535 mlen += off;
1536 off = 0;
1537 totlen += mlen;
1538 if (len == 0)
1539 break;
1540 if (m->m_next == 0) {
74f1caca 1541 n = m_get(MB_DONTWAIT, m->m_type);
984263bc
MD
1542 if (n == 0)
1543 break;
1544 n->m_len = min(MLEN, len);
1545 m->m_next = n;
1546 }
1547 m = m->m_next;
1548 }
1549out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1550 m->m_pkthdr.len = totlen;
1551}
1552
1553void
1554m_print(const struct mbuf *m)
1555{
1556 int len;
1557 const struct mbuf *m2;
1558
1559 len = m->m_pkthdr.len;
1560 m2 = m;
1561 while (len) {
1562 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1563 len -= m2->m_len;
1564 m2 = m2->m_next;
1565 }
1566 return;
1567}
1568
1569/*
1570 * "Move" mbuf pkthdr from "from" to "to".
1571 * "from" must have M_PKTHDR set, and "to" must be empty.
1572 */
1573void
1574m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1575{
1576 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1577
1578 to->m_flags = from->m_flags & M_COPYFLAGS;
1579 to->m_data = to->m_pktdat;
1580 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1581 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1582 from->m_flags &= ~M_PKTHDR;
1583}
1584
1585/*
1586 * Duplicate "from"'s mbuf pkthdr in "to".
1587 * "from" must have M_PKTHDR set, and "to" must be empty.
1588 * In particular, this does a deep copy of the packet tags.
1589 */
1590int
f15db79e 1591m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc
MD
1592{
1593 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1594 if ((to->m_flags & M_EXT) == 0)
1595 to->m_data = to->m_pktdat;
1596 to->m_pkthdr = from->m_pkthdr;
1597 SLIST_INIT(&to->m_pkthdr.tags);
1598 return (m_tag_copy_chain(to, from, how));
1599}
1600
1601/*
1602 * Defragment a mbuf chain, returning the shortest possible
1603 * chain of mbufs and clusters. If allocation fails and
1604 * this cannot be completed, NULL will be returned, but
1605 * the passed in chain will be unchanged. Upon success,
1606 * the original chain will be freed, and the new chain
1607 * will be returned.
1608 *
1609 * If a non-packet header is passed in, the original
1610 * mbuf (chain?) will be returned unharmed.
1611 */
1612struct mbuf *
1613m_defrag(struct mbuf *m0, int how)
1614{
1615 struct mbuf *m_new = NULL, *m_final = NULL;
1616 int progress = 0, length;
1617
1618 if (!(m0->m_flags & M_PKTHDR))
1619 return (m0);
1620
1621#ifdef MBUF_STRESS_TEST
1622 if (m_defragrandomfailures) {
1623 int temp = arc4random() & 0xff;
1624 if (temp == 0xba)
1625 goto nospace;
1626 }
1627#endif
1628
1629 if (m0->m_pkthdr.len > MHLEN)
1630 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1631 else
1632 m_final = m_gethdr(how, MT_DATA);
1633
1634 if (m_final == NULL)
1635 goto nospace;
1636
1637 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1638 goto nospace;
1639
1640 m_new = m_final;
1641
1642 while (progress < m0->m_pkthdr.len) {
1643 length = m0->m_pkthdr.len - progress;
1644 if (length > MCLBYTES)
1645 length = MCLBYTES;
1646
1647 if (m_new == NULL) {
1648 if (length > MLEN)
1649 m_new = m_getcl(how, MT_DATA, 0);
1650 else
1651 m_new = m_get(how, MT_DATA);
1652 if (m_new == NULL)
1653 goto nospace;
1654 }
1655
1656 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1657 progress += length;
1658 m_new->m_len = length;
1659 if (m_new != m_final)
1660 m_cat(m_final, m_new);
1661 m_new = NULL;
1662 }
1663 if (m0->m_next == NULL)
1664 m_defraguseless++;
1665 m_freem(m0);
1666 m0 = m_final;
1667 m_defragpackets++;
1668 m_defragbytes += m0->m_pkthdr.len;
1669 return (m0);
1670nospace:
1671 m_defragfailure++;
1672 if (m_new)
1673 m_free(m_new);
1674 if (m_final)
1675 m_freem(m_final);
1676 return (NULL);
1677}
0c33f36d
JH
1678
1679/*
1680 * Move data from uio into mbufs.
1681 * A length of zero means copy the whole uio.
1682 */
1683struct mbuf *
1684m_uiomove(struct uio *uio, int wait, int len0)
1685{
1686 struct mbuf *head; /* result mbuf chain */
1687 struct mbuf *m; /* current working mbuf */
1688 struct mbuf **mp;
1689 int resid, datalen, error;
1690
1691 resid = (len0 == 0) ? uio->uio_resid : min(len0, uio->uio_resid);
1692
1693 head = NULL;
1694 mp = &head;
1695 do {
1696 if (resid > MHLEN) {
1697 m = m_getcl(wait, MT_DATA, head == NULL ? M_PKTHDR : 0);
1698 if (m == NULL)
1699 goto failed;
1700 if (m->m_flags & M_PKTHDR)
1701 m->m_pkthdr.len = 0;
1702 } else {
1703 if (head == NULL) {
1704 MGETHDR(m, wait, MT_DATA);
1705 if (m == NULL)
1706 goto failed;
1707 m->m_pkthdr.len = 0;
1708 /* Leave room for protocol headers. */
1709 if (resid < MHLEN)
1710 MH_ALIGN(m, resid);
1711 } else {
1712 MGET(m, wait, MT_DATA);
1713 if (m == NULL)
1714 goto failed;
1715 }
1716 }
1717 datalen = min(MCLBYTES, resid);
1718 error = uiomove(mtod(m, caddr_t), datalen, uio);
1719 if (error) {
1720 m_free(m);
1721 goto failed;
1722 }
1723 m->m_len = datalen;
1724 *mp = m;
1725 mp = &m->m_next;
1726 head->m_pkthdr.len += datalen;
1727 resid -= datalen;
1728 } while (resid > 0);
1729
1730 return (head);
1731
1732failed:
1733 if (head)
1734 m_freem(head);
1735 return (NULL);
1736}