Remove an unnecessary cli that was causing 'trap 12 with interrupts disabled'
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
a2a5ad0d 35 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.5 2003/07/10 04:47:54 dillon Exp $
984263bc
MD
36 */
37
38#include "opt_param.h"
39#include "opt_mbuf_stress_test.h"
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/kernel.h>
45#include <sys/sysctl.h>
46#include <sys/domain.h>
47#include <sys/protosw.h>
ef0fdad1 48#include <sys/thread.h>
a2a5ad0d 49#include <sys/globaldata.h>
984263bc
MD
50
51#include <vm/vm.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_extern.h>
54
55#ifdef INVARIANTS
56#include <machine/cpu.h>
57#endif
58
59static void mbinit __P((void *));
60SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
61
62struct mbuf *mbutl;
63char *mclrefcnt;
64struct mbstat mbstat;
65u_long mbtypes[MT_NTYPES];
66struct mbuf *mmbfree;
67union mcluster *mclfree;
68int max_linkhdr;
69int max_protohdr;
70int max_hdr;
71int max_datalen;
72int m_defragpackets;
73int m_defragbytes;
74int m_defraguseless;
75int m_defragfailure;
76#ifdef MBUF_STRESS_TEST
77int m_defragrandomfailures;
78#endif
79
80int nmbclusters;
81int nmbufs;
82u_int m_mballoc_wid = 0;
83u_int m_clalloc_wid = 0;
84
85SYSCTL_DECL(_kern_ipc);
86SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
87 &max_linkhdr, 0, "");
88SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
89 &max_protohdr, 0, "");
90SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
91SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
92 &max_datalen, 0, "");
93SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
94 &mbuf_wait, 0, "");
95SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
96SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
97 sizeof(mbtypes), "LU", "");
98SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
99 &nmbclusters, 0, "Maximum number of mbuf clusters available");
100SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
101 "Maximum number of mbufs available");
102SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
103 &m_defragpackets, 0, "");
104SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
105 &m_defragbytes, 0, "");
106SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
107 &m_defraguseless, 0, "");
108SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
109 &m_defragfailure, 0, "");
110#ifdef MBUF_STRESS_TEST
111SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
112 &m_defragrandomfailures, 0, "");
113#endif
114
115static void m_reclaim __P((void));
116
117#ifndef NMBCLUSTERS
118#define NMBCLUSTERS (512 + maxusers * 16)
119#endif
120#ifndef NMBUFS
121#define NMBUFS (nmbclusters * 4)
122#endif
123
124/*
125 * Perform sanity checks of tunables declared above.
126 */
127static void
128tunable_mbinit(void *dummy)
129{
130
131 /*
132 * This has to be done before VM init.
133 */
134 nmbclusters = NMBCLUSTERS;
135 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
136 nmbufs = NMBUFS;
137 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
138 /* Sanity checks */
139 if (nmbufs < nmbclusters * 2)
140 nmbufs = nmbclusters * 2;
141
142 return;
143}
144SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
145
146/* "number of clusters of pages" */
147#define NCL_INIT 1
148
149#define NMB_INIT 16
150
151/* ARGSUSED*/
152static void
153mbinit(dummy)
154 void *dummy;
155{
156 int s;
157
158 mmbfree = NULL; mclfree = NULL;
159 mbstat.m_msize = MSIZE;
160 mbstat.m_mclbytes = MCLBYTES;
161 mbstat.m_minclsize = MINCLSIZE;
162 mbstat.m_mlen = MLEN;
163 mbstat.m_mhlen = MHLEN;
164
165 s = splimp();
166 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
167 goto bad;
168#if MCLBYTES <= PAGE_SIZE
169 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
170 goto bad;
171#else
172 /* It's OK to call contigmalloc in this context. */
173 if (m_clalloc(16, M_WAIT) == 0)
174 goto bad;
175#endif
176 splx(s);
177 return;
178bad:
179 panic("mbinit");
180}
181
182/*
183 * Allocate at least nmb mbufs and place on mbuf free list.
184 * Must be called at splimp.
185 */
186/* ARGSUSED */
187int
188m_mballoc(nmb, how)
189 register int nmb;
190 int how;
191{
192 register caddr_t p;
193 register int i;
194 int nbytes;
195
196 /*
197 * If we've hit the mbuf limit, stop allocating from mb_map,
198 * (or trying to) in order to avoid dipping into the section of
199 * mb_map which we've "reserved" for clusters.
200 */
201 if ((nmb + mbstat.m_mbufs) > nmbufs)
202 return (0);
203
204 /*
205 * Once we run out of map space, it will be impossible to get
206 * any more (nothing is ever freed back to the map)
207 * -- however you are not dead as m_reclaim might
208 * still be able to free a substantial amount of space.
209 *
210 * XXX Furthermore, we can also work with "recycled" mbufs (when
211 * we're calling with M_WAIT the sleep procedure will be woken
212 * up when an mbuf is freed. See m_mballoc_wait()).
213 */
214 if (mb_map_full)
215 return (0);
216
217 nbytes = round_page(nmb * MSIZE);
218 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
219 if (p == 0 && how == M_WAIT) {
220 mbstat.m_wait++;
221 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
222 }
223
224 /*
225 * Either the map is now full, or `how' is M_NOWAIT and there
226 * are no pages left.
227 */
228 if (p == NULL)
229 return (0);
230
231 nmb = nbytes / MSIZE;
232 for (i = 0; i < nmb; i++) {
233 ((struct mbuf *)p)->m_next = mmbfree;
234 mmbfree = (struct mbuf *)p;
235 p += MSIZE;
236 }
237 mbstat.m_mbufs += nmb;
238 mbtypes[MT_FREE] += nmb;
239 return (1);
240}
241
242/*
243 * Once the mb_map has been exhausted and if the call to the allocation macros
244 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
245 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
246 * designated (mbuf_wait) time.
247 */
248struct mbuf *
249m_mballoc_wait(int caller, int type)
250{
251 struct mbuf *p;
252 int s;
253
254 s = splimp();
255 m_mballoc_wid++;
256 if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
257 m_mballoc_wid--;
258 splx(s);
259
260 /*
261 * Now that we (think) that we've got something, we will redo an
262 * MGET, but avoid getting into another instance of m_mballoc_wait()
263 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
264 * this way, purposely, in the [unlikely] case that an mbuf was
265 * freed but the sleep was not awakened in time.
266 */
267 p = NULL;
268 switch (caller) {
269 case MGET_C:
270 MGET(p, M_DONTWAIT, type);
271 break;
272 case MGETHDR_C:
273 MGETHDR(p, M_DONTWAIT, type);
274 break;
275 default:
276 panic("m_mballoc_wait: invalid caller (%d)", caller);
277 }
278
279 s = splimp();
280 if (p != NULL) { /* We waited and got something... */
281 mbstat.m_wait++;
282 /* Wake up another if we have more free. */
283 if (mmbfree != NULL)
284 MMBWAKEUP();
285 }
286 splx(s);
287 return (p);
288}
289
290#if MCLBYTES > PAGE_SIZE
291static int i_want_my_mcl;
292
293static void
294kproc_mclalloc(void)
295{
296 int status;
297
298 while (1) {
299 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
300
301 for (; i_want_my_mcl; i_want_my_mcl--) {
302 if (m_clalloc(1, M_WAIT) == 0)
303 printf("m_clalloc failed even in process context!\n");
304 }
305 }
306}
307
bc6dffab 308static struct thread *mclallocthread;
984263bc
MD
309static struct kproc_desc mclalloc_kp = {
310 "mclalloc",
311 kproc_mclalloc,
bc6dffab 312 &mclallocthread
984263bc 313};
bc6dffab 314SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
984263bc
MD
315 &mclalloc_kp);
316#endif
317
318/*
319 * Allocate some number of mbuf clusters
320 * and place on cluster free list.
321 * Must be called at splimp.
322 */
323/* ARGSUSED */
324int
325m_clalloc(ncl, how)
326 register int ncl;
327 int how;
328{
329 register caddr_t p;
330 register int i;
331 int npg;
332
333 /*
334 * If we've hit the mcluster number limit, stop allocating from
335 * mb_map, (or trying to) in order to avoid dipping into the section
336 * of mb_map which we've "reserved" for mbufs.
337 */
338 if ((ncl + mbstat.m_clusters) > nmbclusters)
339 goto m_clalloc_fail;
340
341 /*
342 * Once we run out of map space, it will be impossible
343 * to get any more (nothing is ever freed back to the
344 * map). From this point on, we solely rely on freed
345 * mclusters.
346 */
347 if (mb_map_full)
348 goto m_clalloc_fail;
349
350#if MCLBYTES > PAGE_SIZE
351 if (how != M_WAIT) {
352 i_want_my_mcl += ncl;
353 wakeup(&i_want_my_mcl);
354 mbstat.m_wait++;
355 p = 0;
356 } else {
357 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
358 ~0ul, PAGE_SIZE, 0, mb_map);
359 }
360#else
361 npg = ncl;
362 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
363 how != M_WAIT ? M_NOWAIT : M_WAITOK);
364 ncl = ncl * PAGE_SIZE / MCLBYTES;
365#endif
366 /*
367 * Either the map is now full, or `how' is M_NOWAIT and there
368 * are no pages left.
369 */
370 if (p == NULL) {
371 static int last_report ; /* when we did that (in ticks) */
372m_clalloc_fail:
373 mbstat.m_drops++;
374 if (ticks < last_report || (ticks - last_report) >= hz) {
375 last_report = ticks;
376 printf("All mbuf clusters exhausted, please see tuning(7).\n");
377 }
378 return (0);
379 }
380
381 for (i = 0; i < ncl; i++) {
382 ((union mcluster *)p)->mcl_next = mclfree;
383 mclfree = (union mcluster *)p;
384 p += MCLBYTES;
385 mbstat.m_clfree++;
386 }
387 mbstat.m_clusters += ncl;
388 return (1);
389}
390
391/*
392 * Once the mb_map submap has been exhausted and the allocation is called with
393 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
394 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
395 * due to sudden mcluster availability.
396 */
397caddr_t
398m_clalloc_wait(void)
399{
400 caddr_t p;
401 int s;
402
403#ifdef __i386__
404 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
ef0fdad1 405 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
406#endif
407
408 /* Sleep until something's available or until we expire. */
409 m_clalloc_wid++;
410 if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
411 m_clalloc_wid--;
412
413 /*
414 * Now that we (think) that we've got something, we will redo and
415 * MGET, but avoid getting into another instance of m_clalloc_wait()
416 */
417 p = NULL;
418 MCLALLOC(p, M_DONTWAIT);
419
420 s = splimp();
421 if (p != NULL) { /* We waited and got something... */
422 mbstat.m_wait++;
423 /* Wake up another if we have more free. */
424 if (mclfree != NULL)
425 MCLWAKEUP();
426 }
427
428 splx(s);
429 return (p);
430}
431
432/*
433 * When MGET fails, ask protocols to free space when short of memory,
434 * then re-attempt to allocate an mbuf.
435 */
436struct mbuf *
437m_retry(i, t)
438 int i, t;
439{
440 register struct mbuf *m;
441
442 /*
443 * Must only do the reclaim if not in an interrupt context.
444 */
445 if (i == M_WAIT) {
446#ifdef __i386__
ef0fdad1 447 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc
MD
448 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
449#endif
450 m_reclaim();
451 }
452
453 /*
454 * Both m_mballoc_wait and m_retry must be nulled because
455 * when the MGET macro is run from here, we deffinately do _not_
456 * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
457 */
458#define m_mballoc_wait(caller,type) (struct mbuf *)0
459#define m_retry(i, t) (struct mbuf *)0
460 MGET(m, i, t);
461#undef m_retry
462#undef m_mballoc_wait
463
464 if (m != NULL)
465 mbstat.m_wait++;
466 else {
467 static int last_report ; /* when we did that (in ticks) */
468 mbstat.m_drops++;
469 if (ticks < last_report || (ticks - last_report) >= hz) {
470 last_report = ticks;
471 printf("All mbufs exhausted, please see tuning(7).\n");
472 }
473 }
474
475 return (m);
476}
477
478/*
479 * As above; retry an MGETHDR.
480 */
481struct mbuf *
482m_retryhdr(i, t)
483 int i, t;
484{
485 register struct mbuf *m;
486
487 /*
488 * Must only do the reclaim if not in an interrupt context.
489 */
490 if (i == M_WAIT) {
491#ifdef __i386__
ef0fdad1 492 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc
MD
493 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
494#endif
495 m_reclaim();
496 }
497
498#define m_mballoc_wait(caller,type) (struct mbuf *)0
499#define m_retryhdr(i, t) (struct mbuf *)0
500 MGETHDR(m, i, t);
501#undef m_retryhdr
502#undef m_mballoc_wait
503
504 if (m != NULL)
505 mbstat.m_wait++;
506 else {
507 static int last_report ; /* when we did that (in ticks) */
508 mbstat.m_drops++;
509 if (ticks < last_report || (ticks - last_report) >= hz) {
510 last_report = ticks;
511 printf("All mbufs exhausted, please see tuning(7).\n");
512 }
513 }
514
515 return (m);
516}
517
518static void
519m_reclaim()
520{
521 register struct domain *dp;
522 register struct protosw *pr;
523 int s = splimp();
524
525 for (dp = domains; dp; dp = dp->dom_next)
526 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
527 if (pr->pr_drain)
528 (*pr->pr_drain)();
529 splx(s);
530 mbstat.m_drain++;
531}
532
533/*
534 * Space allocation routines.
535 * These are also available as macros
536 * for critical paths.
537 */
538struct mbuf *
539m_get(how, type)
540 int how, type;
541{
542 register struct mbuf *m;
543
544 MGET(m, how, type);
545 return (m);
546}
547
548struct mbuf *
549m_gethdr(how, type)
550 int how, type;
551{
552 register struct mbuf *m;
553
554 MGETHDR(m, how, type);
555 return (m);
556}
557
558struct mbuf *
559m_getclr(how, type)
560 int how, type;
561{
562 register struct mbuf *m;
563
564 MGET(m, how, type);
565 if (m == 0)
566 return (0);
567 bzero(mtod(m, caddr_t), MLEN);
568 return (m);
569}
570
571/*
572 * m_getcl() returns an mbuf with an attached cluster.
573 * Because many network drivers use this kind of buffers a lot, it is
574 * convenient to keep a small pool of free buffers of this kind.
575 * Even a small size such as 10 gives about 10% improvement in the
576 * forwarding rate in a bridge or router.
577 * The size of this free list is controlled by the sysctl variable
578 * mcl_pool_max. The list is populated on m_freem(), and used in
579 * m_getcl() if elements are available.
580 */
581static struct mbuf *mcl_pool;
582static int mcl_pool_now;
583static int mcl_pool_max = 0;
584
585SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
586 "Maximum number of mbufs+cluster in free list");
587SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
588 "Current number of mbufs+cluster in free list");
589
590struct mbuf *
591m_getcl(int how, short type, int flags)
592{
593 int s = splimp();
594 struct mbuf *mp;
595
596 if (flags & M_PKTHDR) {
597 if (type == MT_DATA && mcl_pool) {
598 mp = mcl_pool;
599 mcl_pool = mp->m_nextpkt;
600 mcl_pool_now--;
601 splx(s);
602 mp->m_nextpkt = NULL;
603 mp->m_data = mp->m_ext.ext_buf;
604 mp->m_flags = M_PKTHDR|M_EXT;
605 mp->m_pkthdr.rcvif = NULL;
606 mp->m_pkthdr.csum_flags = 0;
607 return mp;
608 } else
609 MGETHDR(mp, how, type);
610 } else
611 MGET(mp, how, type);
612 if (mp) {
613 MCLGET(mp, how);
614 if ( (mp->m_flags & M_EXT) == 0) {
615 m_free(mp);
616 mp = NULL;
617 }
618 }
619 splx(s);
620 return mp;
621}
622
623/*
624 * struct mbuf *
625 * m_getm(m, len, how, type)
626 *
627 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
628 * best) and return a pointer to the top of the allocated chain. If m is
629 * non-null, then we assume that it is a single mbuf or an mbuf chain to
630 * which we want len bytes worth of mbufs and/or clusters attached, and so
631 * if we succeed in allocating it, we will just return a pointer to m.
632 *
633 * If we happen to fail at any point during the allocation, we will free
634 * up everything we have already allocated and return NULL.
635 *
636 */
637struct mbuf *
638m_getm(struct mbuf *m, int len, int how, int type)
639{
640 struct mbuf *top, *tail, *mp, *mtail = NULL;
641
642 KASSERT(len >= 0, ("len is < 0 in m_getm"));
643
644 MGET(mp, how, type);
645 if (mp == NULL)
646 return (NULL);
647 else if (len > MINCLSIZE) {
648 MCLGET(mp, how);
649 if ((mp->m_flags & M_EXT) == 0) {
650 m_free(mp);
651 return (NULL);
652 }
653 }
654 mp->m_len = 0;
655 len -= M_TRAILINGSPACE(mp);
656
657 if (m != NULL)
658 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
659 else
660 m = mp;
661
662 top = tail = mp;
663 while (len > 0) {
664 MGET(mp, how, type);
665 if (mp == NULL)
666 goto failed;
667
668 tail->m_next = mp;
669 tail = mp;
670 if (len > MINCLSIZE) {
671 MCLGET(mp, how);
672 if ((mp->m_flags & M_EXT) == 0)
673 goto failed;
674 }
675
676 mp->m_len = 0;
677 len -= M_TRAILINGSPACE(mp);
678 }
679
680 if (mtail != NULL)
681 mtail->m_next = top;
682 return (m);
683
684failed:
685 m_freem(top);
686 return (NULL);
687}
688
689/*
690 * MFREE(struct mbuf *m, struct mbuf *n)
691 * Free a single mbuf and associated external storage.
692 * Place the successor, if any, in n.
693 *
694 * we do need to check non-first mbuf for m_aux, since some of existing
695 * code does not call M_PREPEND properly.
696 * (example: call to bpf_mtap from drivers)
697 */
698#define MFREE(m, n) MBUFLOCK( \
699 struct mbuf *_mm = (m); \
700 \
701 KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
702 mbtypes[_mm->m_type]--; \
703 if ((_mm->m_flags & M_PKTHDR) != 0) \
704 m_tag_delete_chain(_mm, NULL); \
705 if (_mm->m_flags & M_EXT) \
706 MEXTFREE1(m); \
707 (n) = _mm->m_next; \
708 _mm->m_type = MT_FREE; \
709 mbtypes[MT_FREE]++; \
710 _mm->m_next = mmbfree; \
711 mmbfree = _mm; \
712 MMBWAKEUP(); \
713)
714
715struct mbuf *
716m_free(m)
717 struct mbuf *m;
718{
719 register struct mbuf *n;
720
721 MFREE(m, n);
722 return (n);
723}
724
725void
726m_freem(m)
727 struct mbuf *m;
728{
729 int s = splimp();
730
731 /*
732 * Try to keep a small pool of mbuf+cluster for quick use in
733 * device drivers. A good candidate is a M_PKTHDR buffer with
734 * only one cluster attached. Other mbufs, or those exceeding
735 * the pool size, are just m_free'd in the usual way.
736 * The following code makes sure that m_next, m_type,
737 * m_pkthdr.aux and m_ext.* are properly initialized.
738 * Other fields in the mbuf are initialized in m_getcl()
739 * upon allocation.
740 */
741 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
742 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
743 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
744 m_tag_delete_chain(m, NULL);
745 m->m_nextpkt = mcl_pool;
746 mcl_pool = m;
747 mcl_pool_now++;
748 } else {
749 while (m)
750 m = m_free(m);
751 }
752 splx(s);
753}
754
755/*
756 * Mbuffer utility routines.
757 */
758
759/*
760 * Lesser-used path for M_PREPEND:
761 * allocate new mbuf to prepend to chain,
762 * copy junk along.
763 */
764struct mbuf *
765m_prepend(m, len, how)
766 register struct mbuf *m;
767 int len, how;
768{
769 struct mbuf *mn;
770
771 MGET(mn, how, m->m_type);
772 if (mn == (struct mbuf *)NULL) {
773 m_freem(m);
774 return ((struct mbuf *)NULL);
775 }
776 if (m->m_flags & M_PKTHDR)
777 M_MOVE_PKTHDR(mn, m);
778 mn->m_next = m;
779 m = mn;
780 if (len < MHLEN)
781 MH_ALIGN(m, len);
782 m->m_len = len;
783 return (m);
784}
785
786/*
787 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
788 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
789 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
790 * Note that the copy is read-only, because clusters are not copied,
791 * only their reference counts are incremented.
792 */
793#define MCFail (mbstat.m_mcfail)
794
795struct mbuf *
796m_copym(m, off0, len, wait)
797 register struct mbuf *m;
798 int off0, wait;
799 register int len;
800{
801 register struct mbuf *n, **np;
802 register int off = off0;
803 struct mbuf *top;
804 int copyhdr = 0;
805
806 KASSERT(off >= 0, ("m_copym, negative off %d", off));
807 KASSERT(len >= 0, ("m_copym, negative len %d", len));
808 if (off == 0 && m->m_flags & M_PKTHDR)
809 copyhdr = 1;
810 while (off > 0) {
811 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
812 if (off < m->m_len)
813 break;
814 off -= m->m_len;
815 m = m->m_next;
816 }
817 np = &top;
818 top = 0;
819 while (len > 0) {
820 if (m == 0) {
821 KASSERT(len == M_COPYALL,
822 ("m_copym, length > size of mbuf chain"));
823 break;
824 }
825 MGET(n, wait, m->m_type);
826 *np = n;
827 if (n == 0)
828 goto nospace;
829 if (copyhdr) {
830 if (!m_dup_pkthdr(n, m, wait))
831 goto nospace;
832 if (len == M_COPYALL)
833 n->m_pkthdr.len -= off0;
834 else
835 n->m_pkthdr.len = len;
836 copyhdr = 0;
837 }
838 n->m_len = min(len, m->m_len - off);
839 if (m->m_flags & M_EXT) {
840 n->m_data = m->m_data + off;
841 if (m->m_ext.ext_ref == NULL) {
842 atomic_add_char(
843 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
844 } else {
845 int s = splimp();
846
847 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
848 m->m_ext.ext_size);
849 splx(s);
850 }
851 n->m_ext = m->m_ext;
852 n->m_flags |= M_EXT;
853 } else
854 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
855 (unsigned)n->m_len);
856 if (len != M_COPYALL)
857 len -= n->m_len;
858 off = 0;
859 m = m->m_next;
860 np = &n->m_next;
861 }
862 if (top == 0)
863 MCFail++;
864 return (top);
865nospace:
866 m_freem(top);
867 MCFail++;
868 return (0);
869}
870
871/*
872 * Copy an entire packet, including header (which must be present).
873 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
874 * Note that the copy is read-only, because clusters are not copied,
875 * only their reference counts are incremented.
876 * Preserve alignment of the first mbuf so if the creator has left
877 * some room at the beginning (e.g. for inserting protocol headers)
878 * the copies also have the room available.
879 */
880struct mbuf *
881m_copypacket(m, how)
882 struct mbuf *m;
883 int how;
884{
885 struct mbuf *top, *n, *o;
886
887 MGET(n, how, m->m_type);
888 top = n;
889 if (!n)
890 goto nospace;
891
892 if (!m_dup_pkthdr(n, m, how))
893 goto nospace;
894 n->m_len = m->m_len;
895 if (m->m_flags & M_EXT) {
896 n->m_data = m->m_data;
897 if (m->m_ext.ext_ref == NULL)
898 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
899 else {
900 int s = splimp();
901
902 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
903 m->m_ext.ext_size);
904 splx(s);
905 }
906 n->m_ext = m->m_ext;
907 n->m_flags |= M_EXT;
908 } else {
909 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
910 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
911 }
912
913 m = m->m_next;
914 while (m) {
915 MGET(o, how, m->m_type);
916 if (!o)
917 goto nospace;
918
919 n->m_next = o;
920 n = n->m_next;
921
922 n->m_len = m->m_len;
923 if (m->m_flags & M_EXT) {
924 n->m_data = m->m_data;
925 if (m->m_ext.ext_ref == NULL) {
926 atomic_add_char(
927 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
928 } else {
929 int s = splimp();
930
931 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
932 m->m_ext.ext_size);
933 splx(s);
934 }
935 n->m_ext = m->m_ext;
936 n->m_flags |= M_EXT;
937 } else {
938 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
939 }
940
941 m = m->m_next;
942 }
943 return top;
944nospace:
945 m_freem(top);
946 MCFail++;
947 return 0;
948}
949
950/*
951 * Copy data from an mbuf chain starting "off" bytes from the beginning,
952 * continuing for "len" bytes, into the indicated buffer.
953 */
954void
955m_copydata(m, off, len, cp)
956 register struct mbuf *m;
957 register int off;
958 register int len;
959 caddr_t cp;
960{
961 register unsigned count;
962
963 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
964 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
965 while (off > 0) {
966 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
967 if (off < m->m_len)
968 break;
969 off -= m->m_len;
970 m = m->m_next;
971 }
972 while (len > 0) {
973 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
974 count = min(m->m_len - off, len);
975 bcopy(mtod(m, caddr_t) + off, cp, count);
976 len -= count;
977 cp += count;
978 off = 0;
979 m = m->m_next;
980 }
981}
982
983/*
984 * Copy a packet header mbuf chain into a completely new chain, including
985 * copying any mbuf clusters. Use this instead of m_copypacket() when
986 * you need a writable copy of an mbuf chain.
987 */
988struct mbuf *
989m_dup(m, how)
990 struct mbuf *m;
991 int how;
992{
993 struct mbuf **p, *top = NULL;
994 int remain, moff, nsize;
995
996 /* Sanity check */
997 if (m == NULL)
998 return (0);
999 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1000
1001 /* While there's more data, get a new mbuf, tack it on, and fill it */
1002 remain = m->m_pkthdr.len;
1003 moff = 0;
1004 p = &top;
1005 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1006 struct mbuf *n;
1007
1008 /* Get the next new mbuf */
1009 MGET(n, how, m->m_type);
1010 if (n == NULL)
1011 goto nospace;
1012 if (top == NULL) { /* first one, must be PKTHDR */
1013 if (!m_dup_pkthdr(n, m, how))
1014 goto nospace;
1015 nsize = MHLEN;
1016 } else /* not the first one */
1017 nsize = MLEN;
1018 if (remain >= MINCLSIZE) {
1019 MCLGET(n, how);
1020 if ((n->m_flags & M_EXT) == 0) {
1021 (void)m_free(n);
1022 goto nospace;
1023 }
1024 nsize = MCLBYTES;
1025 }
1026 n->m_len = 0;
1027
1028 /* Link it into the new chain */
1029 *p = n;
1030 p = &n->m_next;
1031
1032 /* Copy data from original mbuf(s) into new mbuf */
1033 while (n->m_len < nsize && m != NULL) {
1034 int chunk = min(nsize - n->m_len, m->m_len - moff);
1035
1036 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1037 moff += chunk;
1038 n->m_len += chunk;
1039 remain -= chunk;
1040 if (moff == m->m_len) {
1041 m = m->m_next;
1042 moff = 0;
1043 }
1044 }
1045
1046 /* Check correct total mbuf length */
1047 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1048 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1049 }
1050 return (top);
1051
1052nospace:
1053 m_freem(top);
1054 MCFail++;
1055 return (0);
1056}
1057
1058/*
1059 * Concatenate mbuf chain n to m.
1060 * Both chains must be of the same type (e.g. MT_DATA).
1061 * Any m_pkthdr is not updated.
1062 */
1063void
1064m_cat(m, n)
1065 register struct mbuf *m, *n;
1066{
1067 while (m->m_next)
1068 m = m->m_next;
1069 while (n) {
1070 if (m->m_flags & M_EXT ||
1071 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1072 /* just join the two chains */
1073 m->m_next = n;
1074 return;
1075 }
1076 /* splat the data from one into the other */
1077 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1078 (u_int)n->m_len);
1079 m->m_len += n->m_len;
1080 n = m_free(n);
1081 }
1082}
1083
1084void
1085m_adj(mp, req_len)
1086 struct mbuf *mp;
1087 int req_len;
1088{
1089 register int len = req_len;
1090 register struct mbuf *m;
1091 register int count;
1092
1093 if ((m = mp) == NULL)
1094 return;
1095 if (len >= 0) {
1096 /*
1097 * Trim from head.
1098 */
1099 while (m != NULL && len > 0) {
1100 if (m->m_len <= len) {
1101 len -= m->m_len;
1102 m->m_len = 0;
1103 m = m->m_next;
1104 } else {
1105 m->m_len -= len;
1106 m->m_data += len;
1107 len = 0;
1108 }
1109 }
1110 m = mp;
1111 if (mp->m_flags & M_PKTHDR)
1112 m->m_pkthdr.len -= (req_len - len);
1113 } else {
1114 /*
1115 * Trim from tail. Scan the mbuf chain,
1116 * calculating its length and finding the last mbuf.
1117 * If the adjustment only affects this mbuf, then just
1118 * adjust and return. Otherwise, rescan and truncate
1119 * after the remaining size.
1120 */
1121 len = -len;
1122 count = 0;
1123 for (;;) {
1124 count += m->m_len;
1125 if (m->m_next == (struct mbuf *)0)
1126 break;
1127 m = m->m_next;
1128 }
1129 if (m->m_len >= len) {
1130 m->m_len -= len;
1131 if (mp->m_flags & M_PKTHDR)
1132 mp->m_pkthdr.len -= len;
1133 return;
1134 }
1135 count -= len;
1136 if (count < 0)
1137 count = 0;
1138 /*
1139 * Correct length for chain is "count".
1140 * Find the mbuf with last data, adjust its length,
1141 * and toss data from remaining mbufs on chain.
1142 */
1143 m = mp;
1144 if (m->m_flags & M_PKTHDR)
1145 m->m_pkthdr.len = count;
1146 for (; m; m = m->m_next) {
1147 if (m->m_len >= count) {
1148 m->m_len = count;
1149 break;
1150 }
1151 count -= m->m_len;
1152 }
1153 while (m->m_next)
1154 (m = m->m_next) ->m_len = 0;
1155 }
1156}
1157
1158/*
1159 * Rearange an mbuf chain so that len bytes are contiguous
1160 * and in the data area of an mbuf (so that mtod and dtom
1161 * will work for a structure of size len). Returns the resulting
1162 * mbuf chain on success, frees it and returns null on failure.
1163 * If there is room, it will add up to max_protohdr-len extra bytes to the
1164 * contiguous region in an attempt to avoid being called next time.
1165 */
1166#define MPFail (mbstat.m_mpfail)
1167
1168struct mbuf *
1169m_pullup(n, len)
1170 register struct mbuf *n;
1171 int len;
1172{
1173 register struct mbuf *m;
1174 register int count;
1175 int space;
1176
1177 /*
1178 * If first mbuf has no cluster, and has room for len bytes
1179 * without shifting current data, pullup into it,
1180 * otherwise allocate a new mbuf to prepend to the chain.
1181 */
1182 if ((n->m_flags & M_EXT) == 0 &&
1183 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1184 if (n->m_len >= len)
1185 return (n);
1186 m = n;
1187 n = n->m_next;
1188 len -= m->m_len;
1189 } else {
1190 if (len > MHLEN)
1191 goto bad;
1192 MGET(m, M_DONTWAIT, n->m_type);
1193 if (m == 0)
1194 goto bad;
1195 m->m_len = 0;
1196 if (n->m_flags & M_PKTHDR)
1197 M_MOVE_PKTHDR(m, n);
1198 }
1199 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1200 do {
1201 count = min(min(max(len, max_protohdr), space), n->m_len);
1202 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1203 (unsigned)count);
1204 len -= count;
1205 m->m_len += count;
1206 n->m_len -= count;
1207 space -= count;
1208 if (n->m_len)
1209 n->m_data += count;
1210 else
1211 n = m_free(n);
1212 } while (len > 0 && n);
1213 if (len > 0) {
1214 (void) m_free(m);
1215 goto bad;
1216 }
1217 m->m_next = n;
1218 return (m);
1219bad:
1220 m_freem(n);
1221 MPFail++;
1222 return (0);
1223}
1224
1225/*
1226 * Partition an mbuf chain in two pieces, returning the tail --
1227 * all but the first len0 bytes. In case of failure, it returns NULL and
1228 * attempts to restore the chain to its original state.
1229 *
1230 * Note that the resulting mbufs might be read-only, because the new
1231 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1232 * the "breaking point" happens to lie within a cluster mbuf. Use the
1233 * M_WRITABLE() macro to check for this case.
1234 */
1235struct mbuf *
1236m_split(m0, len0, wait)
1237 register struct mbuf *m0;
1238 int len0, wait;
1239{
1240 register struct mbuf *m, *n;
1241 unsigned len = len0, remain;
1242
1243 for (m = m0; m && len > m->m_len; m = m->m_next)
1244 len -= m->m_len;
1245 if (m == 0)
1246 return (0);
1247 remain = m->m_len - len;
1248 if (m0->m_flags & M_PKTHDR) {
1249 MGETHDR(n, wait, m0->m_type);
1250 if (n == 0)
1251 return (0);
1252 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1253 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1254 m0->m_pkthdr.len = len0;
1255 if (m->m_flags & M_EXT)
1256 goto extpacket;
1257 if (remain > MHLEN) {
1258 /* m can't be the lead packet */
1259 MH_ALIGN(n, 0);
1260 n->m_next = m_split(m, len, wait);
1261 if (n->m_next == 0) {
1262 (void) m_free(n);
1263 return (0);
1264 } else {
1265 n->m_len = 0;
1266 return (n);
1267 }
1268 } else
1269 MH_ALIGN(n, remain);
1270 } else if (remain == 0) {
1271 n = m->m_next;
1272 m->m_next = 0;
1273 return (n);
1274 } else {
1275 MGET(n, wait, m->m_type);
1276 if (n == 0)
1277 return (0);
1278 M_ALIGN(n, remain);
1279 }
1280extpacket:
1281 if (m->m_flags & M_EXT) {
1282 n->m_flags |= M_EXT;
1283 n->m_ext = m->m_ext;
1284 if (m->m_ext.ext_ref == NULL)
1285 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1286 else {
1287 int s = splimp();
1288
1289 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1290 m->m_ext.ext_size);
1291 splx(s);
1292 }
1293 n->m_data = m->m_data + len;
1294 } else {
1295 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1296 }
1297 n->m_len = remain;
1298 m->m_len = len;
1299 n->m_next = m->m_next;
1300 m->m_next = 0;
1301 return (n);
1302}
1303/*
1304 * Routine to copy from device local memory into mbufs.
1305 */
1306struct mbuf *
1307m_devget(buf, totlen, off0, ifp, copy)
1308 char *buf;
1309 int totlen, off0;
1310 struct ifnet *ifp;
1311 void (*copy) __P((char *from, caddr_t to, u_int len));
1312{
1313 register struct mbuf *m;
1314 struct mbuf *top = 0, **mp = &top;
1315 register int off = off0, len;
1316 register char *cp;
1317 char *epkt;
1318
1319 cp = buf;
1320 epkt = cp + totlen;
1321 if (off) {
1322 cp += off + 2 * sizeof(u_short);
1323 totlen -= 2 * sizeof(u_short);
1324 }
1325 MGETHDR(m, M_DONTWAIT, MT_DATA);
1326 if (m == 0)
1327 return (0);
1328 m->m_pkthdr.rcvif = ifp;
1329 m->m_pkthdr.len = totlen;
1330 m->m_len = MHLEN;
1331
1332 while (totlen > 0) {
1333 if (top) {
1334 MGET(m, M_DONTWAIT, MT_DATA);
1335 if (m == 0) {
1336 m_freem(top);
1337 return (0);
1338 }
1339 m->m_len = MLEN;
1340 }
1341 len = min(totlen, epkt - cp);
1342 if (len >= MINCLSIZE) {
1343 MCLGET(m, M_DONTWAIT);
1344 if (m->m_flags & M_EXT)
1345 m->m_len = len = min(len, MCLBYTES);
1346 else
1347 len = m->m_len;
1348 } else {
1349 /*
1350 * Place initial small packet/header at end of mbuf.
1351 */
1352 if (len < m->m_len) {
1353 if (top == 0 && len + max_linkhdr <= m->m_len)
1354 m->m_data += max_linkhdr;
1355 m->m_len = len;
1356 } else
1357 len = m->m_len;
1358 }
1359 if (copy)
1360 copy(cp, mtod(m, caddr_t), (unsigned)len);
1361 else
1362 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1363 cp += len;
1364 *mp = m;
1365 mp = &m->m_next;
1366 totlen -= len;
1367 if (cp == epkt)
1368 cp = buf;
1369 }
1370 return (top);
1371}
1372
1373/*
1374 * Copy data from a buffer back into the indicated mbuf chain,
1375 * starting "off" bytes from the beginning, extending the mbuf
1376 * chain if necessary.
1377 */
1378void
1379m_copyback(m0, off, len, cp)
1380 struct mbuf *m0;
1381 register int off;
1382 register int len;
1383 caddr_t cp;
1384{
1385 register int mlen;
1386 register struct mbuf *m = m0, *n;
1387 int totlen = 0;
1388
1389 if (m0 == 0)
1390 return;
1391 while (off > (mlen = m->m_len)) {
1392 off -= mlen;
1393 totlen += mlen;
1394 if (m->m_next == 0) {
1395 n = m_getclr(M_DONTWAIT, m->m_type);
1396 if (n == 0)
1397 goto out;
1398 n->m_len = min(MLEN, len + off);
1399 m->m_next = n;
1400 }
1401 m = m->m_next;
1402 }
1403 while (len > 0) {
1404 mlen = min (m->m_len - off, len);
1405 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1406 cp += mlen;
1407 len -= mlen;
1408 mlen += off;
1409 off = 0;
1410 totlen += mlen;
1411 if (len == 0)
1412 break;
1413 if (m->m_next == 0) {
1414 n = m_get(M_DONTWAIT, m->m_type);
1415 if (n == 0)
1416 break;
1417 n->m_len = min(MLEN, len);
1418 m->m_next = n;
1419 }
1420 m = m->m_next;
1421 }
1422out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1423 m->m_pkthdr.len = totlen;
1424}
1425
1426void
1427m_print(const struct mbuf *m)
1428{
1429 int len;
1430 const struct mbuf *m2;
1431
1432 len = m->m_pkthdr.len;
1433 m2 = m;
1434 while (len) {
1435 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1436 len -= m2->m_len;
1437 m2 = m2->m_next;
1438 }
1439 return;
1440}
1441
1442/*
1443 * "Move" mbuf pkthdr from "from" to "to".
1444 * "from" must have M_PKTHDR set, and "to" must be empty.
1445 */
1446void
1447m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1448{
1449 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1450
1451 to->m_flags = from->m_flags & M_COPYFLAGS;
1452 to->m_data = to->m_pktdat;
1453 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1454 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1455 from->m_flags &= ~M_PKTHDR;
1456}
1457
1458/*
1459 * Duplicate "from"'s mbuf pkthdr in "to".
1460 * "from" must have M_PKTHDR set, and "to" must be empty.
1461 * In particular, this does a deep copy of the packet tags.
1462 */
1463int
1464m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1465{
1466 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1467 if ((to->m_flags & M_EXT) == 0)
1468 to->m_data = to->m_pktdat;
1469 to->m_pkthdr = from->m_pkthdr;
1470 SLIST_INIT(&to->m_pkthdr.tags);
1471 return (m_tag_copy_chain(to, from, how));
1472}
1473
1474/*
1475 * Defragment a mbuf chain, returning the shortest possible
1476 * chain of mbufs and clusters. If allocation fails and
1477 * this cannot be completed, NULL will be returned, but
1478 * the passed in chain will be unchanged. Upon success,
1479 * the original chain will be freed, and the new chain
1480 * will be returned.
1481 *
1482 * If a non-packet header is passed in, the original
1483 * mbuf (chain?) will be returned unharmed.
1484 */
1485struct mbuf *
1486m_defrag(struct mbuf *m0, int how)
1487{
1488 struct mbuf *m_new = NULL, *m_final = NULL;
1489 int progress = 0, length;
1490
1491 if (!(m0->m_flags & M_PKTHDR))
1492 return (m0);
1493
1494#ifdef MBUF_STRESS_TEST
1495 if (m_defragrandomfailures) {
1496 int temp = arc4random() & 0xff;
1497 if (temp == 0xba)
1498 goto nospace;
1499 }
1500#endif
1501
1502 if (m0->m_pkthdr.len > MHLEN)
1503 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1504 else
1505 m_final = m_gethdr(how, MT_DATA);
1506
1507 if (m_final == NULL)
1508 goto nospace;
1509
1510 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1511 goto nospace;
1512
1513 m_new = m_final;
1514
1515 while (progress < m0->m_pkthdr.len) {
1516 length = m0->m_pkthdr.len - progress;
1517 if (length > MCLBYTES)
1518 length = MCLBYTES;
1519
1520 if (m_new == NULL) {
1521 if (length > MLEN)
1522 m_new = m_getcl(how, MT_DATA, 0);
1523 else
1524 m_new = m_get(how, MT_DATA);
1525 if (m_new == NULL)
1526 goto nospace;
1527 }
1528
1529 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1530 progress += length;
1531 m_new->m_len = length;
1532 if (m_new != m_final)
1533 m_cat(m_final, m_new);
1534 m_new = NULL;
1535 }
1536 if (m0->m_next == NULL)
1537 m_defraguseless++;
1538 m_freem(m0);
1539 m0 = m_final;
1540 m_defragpackets++;
1541 m_defragbytes += m0->m_pkthdr.len;
1542 return (m0);
1543nospace:
1544 m_defragfailure++;
1545 if (m_new)
1546 m_free(m_new);
1547 if (m_final)
1548 m_freem(m_final);
1549 return (NULL);
1550}