Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
35 */
36
37#include "opt_param.h"
38#include "opt_mbuf_stress_test.h"
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/kernel.h>
44#include <sys/sysctl.h>
45#include <sys/domain.h>
46#include <sys/protosw.h>
47
48#include <vm/vm.h>
49#include <vm/vm_kern.h>
50#include <vm/vm_extern.h>
51
52#ifdef INVARIANTS
53#include <machine/cpu.h>
54#endif
55
56static void mbinit __P((void *));
57SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
58
59struct mbuf *mbutl;
60char *mclrefcnt;
61struct mbstat mbstat;
62u_long mbtypes[MT_NTYPES];
63struct mbuf *mmbfree;
64union mcluster *mclfree;
65int max_linkhdr;
66int max_protohdr;
67int max_hdr;
68int max_datalen;
69int m_defragpackets;
70int m_defragbytes;
71int m_defraguseless;
72int m_defragfailure;
73#ifdef MBUF_STRESS_TEST
74int m_defragrandomfailures;
75#endif
76
77int nmbclusters;
78int nmbufs;
79u_int m_mballoc_wid = 0;
80u_int m_clalloc_wid = 0;
81
82SYSCTL_DECL(_kern_ipc);
83SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
84 &max_linkhdr, 0, "");
85SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
86 &max_protohdr, 0, "");
87SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
88SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
89 &max_datalen, 0, "");
90SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
91 &mbuf_wait, 0, "");
92SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
93SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
94 sizeof(mbtypes), "LU", "");
95SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
96 &nmbclusters, 0, "Maximum number of mbuf clusters available");
97SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
98 "Maximum number of mbufs available");
99SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
100 &m_defragpackets, 0, "");
101SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
102 &m_defragbytes, 0, "");
103SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
104 &m_defraguseless, 0, "");
105SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
106 &m_defragfailure, 0, "");
107#ifdef MBUF_STRESS_TEST
108SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
109 &m_defragrandomfailures, 0, "");
110#endif
111
112static void m_reclaim __P((void));
113
114#ifndef NMBCLUSTERS
115#define NMBCLUSTERS (512 + maxusers * 16)
116#endif
117#ifndef NMBUFS
118#define NMBUFS (nmbclusters * 4)
119#endif
120
121/*
122 * Perform sanity checks of tunables declared above.
123 */
124static void
125tunable_mbinit(void *dummy)
126{
127
128 /*
129 * This has to be done before VM init.
130 */
131 nmbclusters = NMBCLUSTERS;
132 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
133 nmbufs = NMBUFS;
134 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
135 /* Sanity checks */
136 if (nmbufs < nmbclusters * 2)
137 nmbufs = nmbclusters * 2;
138
139 return;
140}
141SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
142
143/* "number of clusters of pages" */
144#define NCL_INIT 1
145
146#define NMB_INIT 16
147
148/* ARGSUSED*/
149static void
150mbinit(dummy)
151 void *dummy;
152{
153 int s;
154
155 mmbfree = NULL; mclfree = NULL;
156 mbstat.m_msize = MSIZE;
157 mbstat.m_mclbytes = MCLBYTES;
158 mbstat.m_minclsize = MINCLSIZE;
159 mbstat.m_mlen = MLEN;
160 mbstat.m_mhlen = MHLEN;
161
162 s = splimp();
163 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
164 goto bad;
165#if MCLBYTES <= PAGE_SIZE
166 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
167 goto bad;
168#else
169 /* It's OK to call contigmalloc in this context. */
170 if (m_clalloc(16, M_WAIT) == 0)
171 goto bad;
172#endif
173 splx(s);
174 return;
175bad:
176 panic("mbinit");
177}
178
179/*
180 * Allocate at least nmb mbufs and place on mbuf free list.
181 * Must be called at splimp.
182 */
183/* ARGSUSED */
184int
185m_mballoc(nmb, how)
186 register int nmb;
187 int how;
188{
189 register caddr_t p;
190 register int i;
191 int nbytes;
192
193 /*
194 * If we've hit the mbuf limit, stop allocating from mb_map,
195 * (or trying to) in order to avoid dipping into the section of
196 * mb_map which we've "reserved" for clusters.
197 */
198 if ((nmb + mbstat.m_mbufs) > nmbufs)
199 return (0);
200
201 /*
202 * Once we run out of map space, it will be impossible to get
203 * any more (nothing is ever freed back to the map)
204 * -- however you are not dead as m_reclaim might
205 * still be able to free a substantial amount of space.
206 *
207 * XXX Furthermore, we can also work with "recycled" mbufs (when
208 * we're calling with M_WAIT the sleep procedure will be woken
209 * up when an mbuf is freed. See m_mballoc_wait()).
210 */
211 if (mb_map_full)
212 return (0);
213
214 nbytes = round_page(nmb * MSIZE);
215 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
216 if (p == 0 && how == M_WAIT) {
217 mbstat.m_wait++;
218 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
219 }
220
221 /*
222 * Either the map is now full, or `how' is M_NOWAIT and there
223 * are no pages left.
224 */
225 if (p == NULL)
226 return (0);
227
228 nmb = nbytes / MSIZE;
229 for (i = 0; i < nmb; i++) {
230 ((struct mbuf *)p)->m_next = mmbfree;
231 mmbfree = (struct mbuf *)p;
232 p += MSIZE;
233 }
234 mbstat.m_mbufs += nmb;
235 mbtypes[MT_FREE] += nmb;
236 return (1);
237}
238
239/*
240 * Once the mb_map has been exhausted and if the call to the allocation macros
241 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
242 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
243 * designated (mbuf_wait) time.
244 */
245struct mbuf *
246m_mballoc_wait(int caller, int type)
247{
248 struct mbuf *p;
249 int s;
250
251 s = splimp();
252 m_mballoc_wid++;
253 if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
254 m_mballoc_wid--;
255 splx(s);
256
257 /*
258 * Now that we (think) that we've got something, we will redo an
259 * MGET, but avoid getting into another instance of m_mballoc_wait()
260 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
261 * this way, purposely, in the [unlikely] case that an mbuf was
262 * freed but the sleep was not awakened in time.
263 */
264 p = NULL;
265 switch (caller) {
266 case MGET_C:
267 MGET(p, M_DONTWAIT, type);
268 break;
269 case MGETHDR_C:
270 MGETHDR(p, M_DONTWAIT, type);
271 break;
272 default:
273 panic("m_mballoc_wait: invalid caller (%d)", caller);
274 }
275
276 s = splimp();
277 if (p != NULL) { /* We waited and got something... */
278 mbstat.m_wait++;
279 /* Wake up another if we have more free. */
280 if (mmbfree != NULL)
281 MMBWAKEUP();
282 }
283 splx(s);
284 return (p);
285}
286
287#if MCLBYTES > PAGE_SIZE
288static int i_want_my_mcl;
289
290static void
291kproc_mclalloc(void)
292{
293 int status;
294
295 while (1) {
296 tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
297
298 for (; i_want_my_mcl; i_want_my_mcl--) {
299 if (m_clalloc(1, M_WAIT) == 0)
300 printf("m_clalloc failed even in process context!\n");
301 }
302 }
303}
304
305static struct proc *mclallocproc;
306static struct kproc_desc mclalloc_kp = {
307 "mclalloc",
308 kproc_mclalloc,
309 &mclallocproc
310};
311SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
312 &mclalloc_kp);
313#endif
314
315/*
316 * Allocate some number of mbuf clusters
317 * and place on cluster free list.
318 * Must be called at splimp.
319 */
320/* ARGSUSED */
321int
322m_clalloc(ncl, how)
323 register int ncl;
324 int how;
325{
326 register caddr_t p;
327 register int i;
328 int npg;
329
330 /*
331 * If we've hit the mcluster number limit, stop allocating from
332 * mb_map, (or trying to) in order to avoid dipping into the section
333 * of mb_map which we've "reserved" for mbufs.
334 */
335 if ((ncl + mbstat.m_clusters) > nmbclusters)
336 goto m_clalloc_fail;
337
338 /*
339 * Once we run out of map space, it will be impossible
340 * to get any more (nothing is ever freed back to the
341 * map). From this point on, we solely rely on freed
342 * mclusters.
343 */
344 if (mb_map_full)
345 goto m_clalloc_fail;
346
347#if MCLBYTES > PAGE_SIZE
348 if (how != M_WAIT) {
349 i_want_my_mcl += ncl;
350 wakeup(&i_want_my_mcl);
351 mbstat.m_wait++;
352 p = 0;
353 } else {
354 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
355 ~0ul, PAGE_SIZE, 0, mb_map);
356 }
357#else
358 npg = ncl;
359 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
360 how != M_WAIT ? M_NOWAIT : M_WAITOK);
361 ncl = ncl * PAGE_SIZE / MCLBYTES;
362#endif
363 /*
364 * Either the map is now full, or `how' is M_NOWAIT and there
365 * are no pages left.
366 */
367 if (p == NULL) {
368 static int last_report ; /* when we did that (in ticks) */
369m_clalloc_fail:
370 mbstat.m_drops++;
371 if (ticks < last_report || (ticks - last_report) >= hz) {
372 last_report = ticks;
373 printf("All mbuf clusters exhausted, please see tuning(7).\n");
374 }
375 return (0);
376 }
377
378 for (i = 0; i < ncl; i++) {
379 ((union mcluster *)p)->mcl_next = mclfree;
380 mclfree = (union mcluster *)p;
381 p += MCLBYTES;
382 mbstat.m_clfree++;
383 }
384 mbstat.m_clusters += ncl;
385 return (1);
386}
387
388/*
389 * Once the mb_map submap has been exhausted and the allocation is called with
390 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
391 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
392 * due to sudden mcluster availability.
393 */
394caddr_t
395m_clalloc_wait(void)
396{
397 caddr_t p;
398 int s;
399
400#ifdef __i386__
401 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
402 KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
403#endif
404
405 /* Sleep until something's available or until we expire. */
406 m_clalloc_wid++;
407 if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
408 m_clalloc_wid--;
409
410 /*
411 * Now that we (think) that we've got something, we will redo and
412 * MGET, but avoid getting into another instance of m_clalloc_wait()
413 */
414 p = NULL;
415 MCLALLOC(p, M_DONTWAIT);
416
417 s = splimp();
418 if (p != NULL) { /* We waited and got something... */
419 mbstat.m_wait++;
420 /* Wake up another if we have more free. */
421 if (mclfree != NULL)
422 MCLWAKEUP();
423 }
424
425 splx(s);
426 return (p);
427}
428
429/*
430 * When MGET fails, ask protocols to free space when short of memory,
431 * then re-attempt to allocate an mbuf.
432 */
433struct mbuf *
434m_retry(i, t)
435 int i, t;
436{
437 register struct mbuf *m;
438
439 /*
440 * Must only do the reclaim if not in an interrupt context.
441 */
442 if (i == M_WAIT) {
443#ifdef __i386__
444 KASSERT(intr_nesting_level == 0,
445 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
446#endif
447 m_reclaim();
448 }
449
450 /*
451 * Both m_mballoc_wait and m_retry must be nulled because
452 * when the MGET macro is run from here, we deffinately do _not_
453 * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
454 */
455#define m_mballoc_wait(caller,type) (struct mbuf *)0
456#define m_retry(i, t) (struct mbuf *)0
457 MGET(m, i, t);
458#undef m_retry
459#undef m_mballoc_wait
460
461 if (m != NULL)
462 mbstat.m_wait++;
463 else {
464 static int last_report ; /* when we did that (in ticks) */
465 mbstat.m_drops++;
466 if (ticks < last_report || (ticks - last_report) >= hz) {
467 last_report = ticks;
468 printf("All mbufs exhausted, please see tuning(7).\n");
469 }
470 }
471
472 return (m);
473}
474
475/*
476 * As above; retry an MGETHDR.
477 */
478struct mbuf *
479m_retryhdr(i, t)
480 int i, t;
481{
482 register struct mbuf *m;
483
484 /*
485 * Must only do the reclaim if not in an interrupt context.
486 */
487 if (i == M_WAIT) {
488#ifdef __i386__
489 KASSERT(intr_nesting_level == 0,
490 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
491#endif
492 m_reclaim();
493 }
494
495#define m_mballoc_wait(caller,type) (struct mbuf *)0
496#define m_retryhdr(i, t) (struct mbuf *)0
497 MGETHDR(m, i, t);
498#undef m_retryhdr
499#undef m_mballoc_wait
500
501 if (m != NULL)
502 mbstat.m_wait++;
503 else {
504 static int last_report ; /* when we did that (in ticks) */
505 mbstat.m_drops++;
506 if (ticks < last_report || (ticks - last_report) >= hz) {
507 last_report = ticks;
508 printf("All mbufs exhausted, please see tuning(7).\n");
509 }
510 }
511
512 return (m);
513}
514
515static void
516m_reclaim()
517{
518 register struct domain *dp;
519 register struct protosw *pr;
520 int s = splimp();
521
522 for (dp = domains; dp; dp = dp->dom_next)
523 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
524 if (pr->pr_drain)
525 (*pr->pr_drain)();
526 splx(s);
527 mbstat.m_drain++;
528}
529
530/*
531 * Space allocation routines.
532 * These are also available as macros
533 * for critical paths.
534 */
535struct mbuf *
536m_get(how, type)
537 int how, type;
538{
539 register struct mbuf *m;
540
541 MGET(m, how, type);
542 return (m);
543}
544
545struct mbuf *
546m_gethdr(how, type)
547 int how, type;
548{
549 register struct mbuf *m;
550
551 MGETHDR(m, how, type);
552 return (m);
553}
554
555struct mbuf *
556m_getclr(how, type)
557 int how, type;
558{
559 register struct mbuf *m;
560
561 MGET(m, how, type);
562 if (m == 0)
563 return (0);
564 bzero(mtod(m, caddr_t), MLEN);
565 return (m);
566}
567
568/*
569 * m_getcl() returns an mbuf with an attached cluster.
570 * Because many network drivers use this kind of buffers a lot, it is
571 * convenient to keep a small pool of free buffers of this kind.
572 * Even a small size such as 10 gives about 10% improvement in the
573 * forwarding rate in a bridge or router.
574 * The size of this free list is controlled by the sysctl variable
575 * mcl_pool_max. The list is populated on m_freem(), and used in
576 * m_getcl() if elements are available.
577 */
578static struct mbuf *mcl_pool;
579static int mcl_pool_now;
580static int mcl_pool_max = 0;
581
582SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
583 "Maximum number of mbufs+cluster in free list");
584SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
585 "Current number of mbufs+cluster in free list");
586
587struct mbuf *
588m_getcl(int how, short type, int flags)
589{
590 int s = splimp();
591 struct mbuf *mp;
592
593 if (flags & M_PKTHDR) {
594 if (type == MT_DATA && mcl_pool) {
595 mp = mcl_pool;
596 mcl_pool = mp->m_nextpkt;
597 mcl_pool_now--;
598 splx(s);
599 mp->m_nextpkt = NULL;
600 mp->m_data = mp->m_ext.ext_buf;
601 mp->m_flags = M_PKTHDR|M_EXT;
602 mp->m_pkthdr.rcvif = NULL;
603 mp->m_pkthdr.csum_flags = 0;
604 return mp;
605 } else
606 MGETHDR(mp, how, type);
607 } else
608 MGET(mp, how, type);
609 if (mp) {
610 MCLGET(mp, how);
611 if ( (mp->m_flags & M_EXT) == 0) {
612 m_free(mp);
613 mp = NULL;
614 }
615 }
616 splx(s);
617 return mp;
618}
619
620/*
621 * struct mbuf *
622 * m_getm(m, len, how, type)
623 *
624 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
625 * best) and return a pointer to the top of the allocated chain. If m is
626 * non-null, then we assume that it is a single mbuf or an mbuf chain to
627 * which we want len bytes worth of mbufs and/or clusters attached, and so
628 * if we succeed in allocating it, we will just return a pointer to m.
629 *
630 * If we happen to fail at any point during the allocation, we will free
631 * up everything we have already allocated and return NULL.
632 *
633 */
634struct mbuf *
635m_getm(struct mbuf *m, int len, int how, int type)
636{
637 struct mbuf *top, *tail, *mp, *mtail = NULL;
638
639 KASSERT(len >= 0, ("len is < 0 in m_getm"));
640
641 MGET(mp, how, type);
642 if (mp == NULL)
643 return (NULL);
644 else if (len > MINCLSIZE) {
645 MCLGET(mp, how);
646 if ((mp->m_flags & M_EXT) == 0) {
647 m_free(mp);
648 return (NULL);
649 }
650 }
651 mp->m_len = 0;
652 len -= M_TRAILINGSPACE(mp);
653
654 if (m != NULL)
655 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
656 else
657 m = mp;
658
659 top = tail = mp;
660 while (len > 0) {
661 MGET(mp, how, type);
662 if (mp == NULL)
663 goto failed;
664
665 tail->m_next = mp;
666 tail = mp;
667 if (len > MINCLSIZE) {
668 MCLGET(mp, how);
669 if ((mp->m_flags & M_EXT) == 0)
670 goto failed;
671 }
672
673 mp->m_len = 0;
674 len -= M_TRAILINGSPACE(mp);
675 }
676
677 if (mtail != NULL)
678 mtail->m_next = top;
679 return (m);
680
681failed:
682 m_freem(top);
683 return (NULL);
684}
685
686/*
687 * MFREE(struct mbuf *m, struct mbuf *n)
688 * Free a single mbuf and associated external storage.
689 * Place the successor, if any, in n.
690 *
691 * we do need to check non-first mbuf for m_aux, since some of existing
692 * code does not call M_PREPEND properly.
693 * (example: call to bpf_mtap from drivers)
694 */
695#define MFREE(m, n) MBUFLOCK( \
696 struct mbuf *_mm = (m); \
697 \
698 KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
699 mbtypes[_mm->m_type]--; \
700 if ((_mm->m_flags & M_PKTHDR) != 0) \
701 m_tag_delete_chain(_mm, NULL); \
702 if (_mm->m_flags & M_EXT) \
703 MEXTFREE1(m); \
704 (n) = _mm->m_next; \
705 _mm->m_type = MT_FREE; \
706 mbtypes[MT_FREE]++; \
707 _mm->m_next = mmbfree; \
708 mmbfree = _mm; \
709 MMBWAKEUP(); \
710)
711
712struct mbuf *
713m_free(m)
714 struct mbuf *m;
715{
716 register struct mbuf *n;
717
718 MFREE(m, n);
719 return (n);
720}
721
722void
723m_freem(m)
724 struct mbuf *m;
725{
726 int s = splimp();
727
728 /*
729 * Try to keep a small pool of mbuf+cluster for quick use in
730 * device drivers. A good candidate is a M_PKTHDR buffer with
731 * only one cluster attached. Other mbufs, or those exceeding
732 * the pool size, are just m_free'd in the usual way.
733 * The following code makes sure that m_next, m_type,
734 * m_pkthdr.aux and m_ext.* are properly initialized.
735 * Other fields in the mbuf are initialized in m_getcl()
736 * upon allocation.
737 */
738 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
739 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
740 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
741 m_tag_delete_chain(m, NULL);
742 m->m_nextpkt = mcl_pool;
743 mcl_pool = m;
744 mcl_pool_now++;
745 } else {
746 while (m)
747 m = m_free(m);
748 }
749 splx(s);
750}
751
752/*
753 * Mbuffer utility routines.
754 */
755
756/*
757 * Lesser-used path for M_PREPEND:
758 * allocate new mbuf to prepend to chain,
759 * copy junk along.
760 */
761struct mbuf *
762m_prepend(m, len, how)
763 register struct mbuf *m;
764 int len, how;
765{
766 struct mbuf *mn;
767
768 MGET(mn, how, m->m_type);
769 if (mn == (struct mbuf *)NULL) {
770 m_freem(m);
771 return ((struct mbuf *)NULL);
772 }
773 if (m->m_flags & M_PKTHDR)
774 M_MOVE_PKTHDR(mn, m);
775 mn->m_next = m;
776 m = mn;
777 if (len < MHLEN)
778 MH_ALIGN(m, len);
779 m->m_len = len;
780 return (m);
781}
782
783/*
784 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
785 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
786 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
787 * Note that the copy is read-only, because clusters are not copied,
788 * only their reference counts are incremented.
789 */
790#define MCFail (mbstat.m_mcfail)
791
792struct mbuf *
793m_copym(m, off0, len, wait)
794 register struct mbuf *m;
795 int off0, wait;
796 register int len;
797{
798 register struct mbuf *n, **np;
799 register int off = off0;
800 struct mbuf *top;
801 int copyhdr = 0;
802
803 KASSERT(off >= 0, ("m_copym, negative off %d", off));
804 KASSERT(len >= 0, ("m_copym, negative len %d", len));
805 if (off == 0 && m->m_flags & M_PKTHDR)
806 copyhdr = 1;
807 while (off > 0) {
808 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
809 if (off < m->m_len)
810 break;
811 off -= m->m_len;
812 m = m->m_next;
813 }
814 np = &top;
815 top = 0;
816 while (len > 0) {
817 if (m == 0) {
818 KASSERT(len == M_COPYALL,
819 ("m_copym, length > size of mbuf chain"));
820 break;
821 }
822 MGET(n, wait, m->m_type);
823 *np = n;
824 if (n == 0)
825 goto nospace;
826 if (copyhdr) {
827 if (!m_dup_pkthdr(n, m, wait))
828 goto nospace;
829 if (len == M_COPYALL)
830 n->m_pkthdr.len -= off0;
831 else
832 n->m_pkthdr.len = len;
833 copyhdr = 0;
834 }
835 n->m_len = min(len, m->m_len - off);
836 if (m->m_flags & M_EXT) {
837 n->m_data = m->m_data + off;
838 if (m->m_ext.ext_ref == NULL) {
839 atomic_add_char(
840 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
841 } else {
842 int s = splimp();
843
844 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
845 m->m_ext.ext_size);
846 splx(s);
847 }
848 n->m_ext = m->m_ext;
849 n->m_flags |= M_EXT;
850 } else
851 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
852 (unsigned)n->m_len);
853 if (len != M_COPYALL)
854 len -= n->m_len;
855 off = 0;
856 m = m->m_next;
857 np = &n->m_next;
858 }
859 if (top == 0)
860 MCFail++;
861 return (top);
862nospace:
863 m_freem(top);
864 MCFail++;
865 return (0);
866}
867
868/*
869 * Copy an entire packet, including header (which must be present).
870 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
871 * Note that the copy is read-only, because clusters are not copied,
872 * only their reference counts are incremented.
873 * Preserve alignment of the first mbuf so if the creator has left
874 * some room at the beginning (e.g. for inserting protocol headers)
875 * the copies also have the room available.
876 */
877struct mbuf *
878m_copypacket(m, how)
879 struct mbuf *m;
880 int how;
881{
882 struct mbuf *top, *n, *o;
883
884 MGET(n, how, m->m_type);
885 top = n;
886 if (!n)
887 goto nospace;
888
889 if (!m_dup_pkthdr(n, m, how))
890 goto nospace;
891 n->m_len = m->m_len;
892 if (m->m_flags & M_EXT) {
893 n->m_data = m->m_data;
894 if (m->m_ext.ext_ref == NULL)
895 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
896 else {
897 int s = splimp();
898
899 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
900 m->m_ext.ext_size);
901 splx(s);
902 }
903 n->m_ext = m->m_ext;
904 n->m_flags |= M_EXT;
905 } else {
906 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
907 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
908 }
909
910 m = m->m_next;
911 while (m) {
912 MGET(o, how, m->m_type);
913 if (!o)
914 goto nospace;
915
916 n->m_next = o;
917 n = n->m_next;
918
919 n->m_len = m->m_len;
920 if (m->m_flags & M_EXT) {
921 n->m_data = m->m_data;
922 if (m->m_ext.ext_ref == NULL) {
923 atomic_add_char(
924 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
925 } else {
926 int s = splimp();
927
928 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
929 m->m_ext.ext_size);
930 splx(s);
931 }
932 n->m_ext = m->m_ext;
933 n->m_flags |= M_EXT;
934 } else {
935 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
936 }
937
938 m = m->m_next;
939 }
940 return top;
941nospace:
942 m_freem(top);
943 MCFail++;
944 return 0;
945}
946
947/*
948 * Copy data from an mbuf chain starting "off" bytes from the beginning,
949 * continuing for "len" bytes, into the indicated buffer.
950 */
951void
952m_copydata(m, off, len, cp)
953 register struct mbuf *m;
954 register int off;
955 register int len;
956 caddr_t cp;
957{
958 register unsigned count;
959
960 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
961 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
962 while (off > 0) {
963 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
964 if (off < m->m_len)
965 break;
966 off -= m->m_len;
967 m = m->m_next;
968 }
969 while (len > 0) {
970 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
971 count = min(m->m_len - off, len);
972 bcopy(mtod(m, caddr_t) + off, cp, count);
973 len -= count;
974 cp += count;
975 off = 0;
976 m = m->m_next;
977 }
978}
979
980/*
981 * Copy a packet header mbuf chain into a completely new chain, including
982 * copying any mbuf clusters. Use this instead of m_copypacket() when
983 * you need a writable copy of an mbuf chain.
984 */
985struct mbuf *
986m_dup(m, how)
987 struct mbuf *m;
988 int how;
989{
990 struct mbuf **p, *top = NULL;
991 int remain, moff, nsize;
992
993 /* Sanity check */
994 if (m == NULL)
995 return (0);
996 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
997
998 /* While there's more data, get a new mbuf, tack it on, and fill it */
999 remain = m->m_pkthdr.len;
1000 moff = 0;
1001 p = &top;
1002 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1003 struct mbuf *n;
1004
1005 /* Get the next new mbuf */
1006 MGET(n, how, m->m_type);
1007 if (n == NULL)
1008 goto nospace;
1009 if (top == NULL) { /* first one, must be PKTHDR */
1010 if (!m_dup_pkthdr(n, m, how))
1011 goto nospace;
1012 nsize = MHLEN;
1013 } else /* not the first one */
1014 nsize = MLEN;
1015 if (remain >= MINCLSIZE) {
1016 MCLGET(n, how);
1017 if ((n->m_flags & M_EXT) == 0) {
1018 (void)m_free(n);
1019 goto nospace;
1020 }
1021 nsize = MCLBYTES;
1022 }
1023 n->m_len = 0;
1024
1025 /* Link it into the new chain */
1026 *p = n;
1027 p = &n->m_next;
1028
1029 /* Copy data from original mbuf(s) into new mbuf */
1030 while (n->m_len < nsize && m != NULL) {
1031 int chunk = min(nsize - n->m_len, m->m_len - moff);
1032
1033 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1034 moff += chunk;
1035 n->m_len += chunk;
1036 remain -= chunk;
1037 if (moff == m->m_len) {
1038 m = m->m_next;
1039 moff = 0;
1040 }
1041 }
1042
1043 /* Check correct total mbuf length */
1044 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1045 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1046 }
1047 return (top);
1048
1049nospace:
1050 m_freem(top);
1051 MCFail++;
1052 return (0);
1053}
1054
1055/*
1056 * Concatenate mbuf chain n to m.
1057 * Both chains must be of the same type (e.g. MT_DATA).
1058 * Any m_pkthdr is not updated.
1059 */
1060void
1061m_cat(m, n)
1062 register struct mbuf *m, *n;
1063{
1064 while (m->m_next)
1065 m = m->m_next;
1066 while (n) {
1067 if (m->m_flags & M_EXT ||
1068 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1069 /* just join the two chains */
1070 m->m_next = n;
1071 return;
1072 }
1073 /* splat the data from one into the other */
1074 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1075 (u_int)n->m_len);
1076 m->m_len += n->m_len;
1077 n = m_free(n);
1078 }
1079}
1080
1081void
1082m_adj(mp, req_len)
1083 struct mbuf *mp;
1084 int req_len;
1085{
1086 register int len = req_len;
1087 register struct mbuf *m;
1088 register int count;
1089
1090 if ((m = mp) == NULL)
1091 return;
1092 if (len >= 0) {
1093 /*
1094 * Trim from head.
1095 */
1096 while (m != NULL && len > 0) {
1097 if (m->m_len <= len) {
1098 len -= m->m_len;
1099 m->m_len = 0;
1100 m = m->m_next;
1101 } else {
1102 m->m_len -= len;
1103 m->m_data += len;
1104 len = 0;
1105 }
1106 }
1107 m = mp;
1108 if (mp->m_flags & M_PKTHDR)
1109 m->m_pkthdr.len -= (req_len - len);
1110 } else {
1111 /*
1112 * Trim from tail. Scan the mbuf chain,
1113 * calculating its length and finding the last mbuf.
1114 * If the adjustment only affects this mbuf, then just
1115 * adjust and return. Otherwise, rescan and truncate
1116 * after the remaining size.
1117 */
1118 len = -len;
1119 count = 0;
1120 for (;;) {
1121 count += m->m_len;
1122 if (m->m_next == (struct mbuf *)0)
1123 break;
1124 m = m->m_next;
1125 }
1126 if (m->m_len >= len) {
1127 m->m_len -= len;
1128 if (mp->m_flags & M_PKTHDR)
1129 mp->m_pkthdr.len -= len;
1130 return;
1131 }
1132 count -= len;
1133 if (count < 0)
1134 count = 0;
1135 /*
1136 * Correct length for chain is "count".
1137 * Find the mbuf with last data, adjust its length,
1138 * and toss data from remaining mbufs on chain.
1139 */
1140 m = mp;
1141 if (m->m_flags & M_PKTHDR)
1142 m->m_pkthdr.len = count;
1143 for (; m; m = m->m_next) {
1144 if (m->m_len >= count) {
1145 m->m_len = count;
1146 break;
1147 }
1148 count -= m->m_len;
1149 }
1150 while (m->m_next)
1151 (m = m->m_next) ->m_len = 0;
1152 }
1153}
1154
1155/*
1156 * Rearange an mbuf chain so that len bytes are contiguous
1157 * and in the data area of an mbuf (so that mtod and dtom
1158 * will work for a structure of size len). Returns the resulting
1159 * mbuf chain on success, frees it and returns null on failure.
1160 * If there is room, it will add up to max_protohdr-len extra bytes to the
1161 * contiguous region in an attempt to avoid being called next time.
1162 */
1163#define MPFail (mbstat.m_mpfail)
1164
1165struct mbuf *
1166m_pullup(n, len)
1167 register struct mbuf *n;
1168 int len;
1169{
1170 register struct mbuf *m;
1171 register int count;
1172 int space;
1173
1174 /*
1175 * If first mbuf has no cluster, and has room for len bytes
1176 * without shifting current data, pullup into it,
1177 * otherwise allocate a new mbuf to prepend to the chain.
1178 */
1179 if ((n->m_flags & M_EXT) == 0 &&
1180 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1181 if (n->m_len >= len)
1182 return (n);
1183 m = n;
1184 n = n->m_next;
1185 len -= m->m_len;
1186 } else {
1187 if (len > MHLEN)
1188 goto bad;
1189 MGET(m, M_DONTWAIT, n->m_type);
1190 if (m == 0)
1191 goto bad;
1192 m->m_len = 0;
1193 if (n->m_flags & M_PKTHDR)
1194 M_MOVE_PKTHDR(m, n);
1195 }
1196 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1197 do {
1198 count = min(min(max(len, max_protohdr), space), n->m_len);
1199 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1200 (unsigned)count);
1201 len -= count;
1202 m->m_len += count;
1203 n->m_len -= count;
1204 space -= count;
1205 if (n->m_len)
1206 n->m_data += count;
1207 else
1208 n = m_free(n);
1209 } while (len > 0 && n);
1210 if (len > 0) {
1211 (void) m_free(m);
1212 goto bad;
1213 }
1214 m->m_next = n;
1215 return (m);
1216bad:
1217 m_freem(n);
1218 MPFail++;
1219 return (0);
1220}
1221
1222/*
1223 * Partition an mbuf chain in two pieces, returning the tail --
1224 * all but the first len0 bytes. In case of failure, it returns NULL and
1225 * attempts to restore the chain to its original state.
1226 *
1227 * Note that the resulting mbufs might be read-only, because the new
1228 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1229 * the "breaking point" happens to lie within a cluster mbuf. Use the
1230 * M_WRITABLE() macro to check for this case.
1231 */
1232struct mbuf *
1233m_split(m0, len0, wait)
1234 register struct mbuf *m0;
1235 int len0, wait;
1236{
1237 register struct mbuf *m, *n;
1238 unsigned len = len0, remain;
1239
1240 for (m = m0; m && len > m->m_len; m = m->m_next)
1241 len -= m->m_len;
1242 if (m == 0)
1243 return (0);
1244 remain = m->m_len - len;
1245 if (m0->m_flags & M_PKTHDR) {
1246 MGETHDR(n, wait, m0->m_type);
1247 if (n == 0)
1248 return (0);
1249 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1250 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1251 m0->m_pkthdr.len = len0;
1252 if (m->m_flags & M_EXT)
1253 goto extpacket;
1254 if (remain > MHLEN) {
1255 /* m can't be the lead packet */
1256 MH_ALIGN(n, 0);
1257 n->m_next = m_split(m, len, wait);
1258 if (n->m_next == 0) {
1259 (void) m_free(n);
1260 return (0);
1261 } else {
1262 n->m_len = 0;
1263 return (n);
1264 }
1265 } else
1266 MH_ALIGN(n, remain);
1267 } else if (remain == 0) {
1268 n = m->m_next;
1269 m->m_next = 0;
1270 return (n);
1271 } else {
1272 MGET(n, wait, m->m_type);
1273 if (n == 0)
1274 return (0);
1275 M_ALIGN(n, remain);
1276 }
1277extpacket:
1278 if (m->m_flags & M_EXT) {
1279 n->m_flags |= M_EXT;
1280 n->m_ext = m->m_ext;
1281 if (m->m_ext.ext_ref == NULL)
1282 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1283 else {
1284 int s = splimp();
1285
1286 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1287 m->m_ext.ext_size);
1288 splx(s);
1289 }
1290 n->m_data = m->m_data + len;
1291 } else {
1292 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1293 }
1294 n->m_len = remain;
1295 m->m_len = len;
1296 n->m_next = m->m_next;
1297 m->m_next = 0;
1298 return (n);
1299}
1300/*
1301 * Routine to copy from device local memory into mbufs.
1302 */
1303struct mbuf *
1304m_devget(buf, totlen, off0, ifp, copy)
1305 char *buf;
1306 int totlen, off0;
1307 struct ifnet *ifp;
1308 void (*copy) __P((char *from, caddr_t to, u_int len));
1309{
1310 register struct mbuf *m;
1311 struct mbuf *top = 0, **mp = &top;
1312 register int off = off0, len;
1313 register char *cp;
1314 char *epkt;
1315
1316 cp = buf;
1317 epkt = cp + totlen;
1318 if (off) {
1319 cp += off + 2 * sizeof(u_short);
1320 totlen -= 2 * sizeof(u_short);
1321 }
1322 MGETHDR(m, M_DONTWAIT, MT_DATA);
1323 if (m == 0)
1324 return (0);
1325 m->m_pkthdr.rcvif = ifp;
1326 m->m_pkthdr.len = totlen;
1327 m->m_len = MHLEN;
1328
1329 while (totlen > 0) {
1330 if (top) {
1331 MGET(m, M_DONTWAIT, MT_DATA);
1332 if (m == 0) {
1333 m_freem(top);
1334 return (0);
1335 }
1336 m->m_len = MLEN;
1337 }
1338 len = min(totlen, epkt - cp);
1339 if (len >= MINCLSIZE) {
1340 MCLGET(m, M_DONTWAIT);
1341 if (m->m_flags & M_EXT)
1342 m->m_len = len = min(len, MCLBYTES);
1343 else
1344 len = m->m_len;
1345 } else {
1346 /*
1347 * Place initial small packet/header at end of mbuf.
1348 */
1349 if (len < m->m_len) {
1350 if (top == 0 && len + max_linkhdr <= m->m_len)
1351 m->m_data += max_linkhdr;
1352 m->m_len = len;
1353 } else
1354 len = m->m_len;
1355 }
1356 if (copy)
1357 copy(cp, mtod(m, caddr_t), (unsigned)len);
1358 else
1359 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1360 cp += len;
1361 *mp = m;
1362 mp = &m->m_next;
1363 totlen -= len;
1364 if (cp == epkt)
1365 cp = buf;
1366 }
1367 return (top);
1368}
1369
1370/*
1371 * Copy data from a buffer back into the indicated mbuf chain,
1372 * starting "off" bytes from the beginning, extending the mbuf
1373 * chain if necessary.
1374 */
1375void
1376m_copyback(m0, off, len, cp)
1377 struct mbuf *m0;
1378 register int off;
1379 register int len;
1380 caddr_t cp;
1381{
1382 register int mlen;
1383 register struct mbuf *m = m0, *n;
1384 int totlen = 0;
1385
1386 if (m0 == 0)
1387 return;
1388 while (off > (mlen = m->m_len)) {
1389 off -= mlen;
1390 totlen += mlen;
1391 if (m->m_next == 0) {
1392 n = m_getclr(M_DONTWAIT, m->m_type);
1393 if (n == 0)
1394 goto out;
1395 n->m_len = min(MLEN, len + off);
1396 m->m_next = n;
1397 }
1398 m = m->m_next;
1399 }
1400 while (len > 0) {
1401 mlen = min (m->m_len - off, len);
1402 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1403 cp += mlen;
1404 len -= mlen;
1405 mlen += off;
1406 off = 0;
1407 totlen += mlen;
1408 if (len == 0)
1409 break;
1410 if (m->m_next == 0) {
1411 n = m_get(M_DONTWAIT, m->m_type);
1412 if (n == 0)
1413 break;
1414 n->m_len = min(MLEN, len);
1415 m->m_next = n;
1416 }
1417 m = m->m_next;
1418 }
1419out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1420 m->m_pkthdr.len = totlen;
1421}
1422
1423void
1424m_print(const struct mbuf *m)
1425{
1426 int len;
1427 const struct mbuf *m2;
1428
1429 len = m->m_pkthdr.len;
1430 m2 = m;
1431 while (len) {
1432 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1433 len -= m2->m_len;
1434 m2 = m2->m_next;
1435 }
1436 return;
1437}
1438
1439/*
1440 * "Move" mbuf pkthdr from "from" to "to".
1441 * "from" must have M_PKTHDR set, and "to" must be empty.
1442 */
1443void
1444m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1445{
1446 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1447
1448 to->m_flags = from->m_flags & M_COPYFLAGS;
1449 to->m_data = to->m_pktdat;
1450 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1451 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1452 from->m_flags &= ~M_PKTHDR;
1453}
1454
1455/*
1456 * Duplicate "from"'s mbuf pkthdr in "to".
1457 * "from" must have M_PKTHDR set, and "to" must be empty.
1458 * In particular, this does a deep copy of the packet tags.
1459 */
1460int
1461m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
1462{
1463 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1464 if ((to->m_flags & M_EXT) == 0)
1465 to->m_data = to->m_pktdat;
1466 to->m_pkthdr = from->m_pkthdr;
1467 SLIST_INIT(&to->m_pkthdr.tags);
1468 return (m_tag_copy_chain(to, from, how));
1469}
1470
1471/*
1472 * Defragment a mbuf chain, returning the shortest possible
1473 * chain of mbufs and clusters. If allocation fails and
1474 * this cannot be completed, NULL will be returned, but
1475 * the passed in chain will be unchanged. Upon success,
1476 * the original chain will be freed, and the new chain
1477 * will be returned.
1478 *
1479 * If a non-packet header is passed in, the original
1480 * mbuf (chain?) will be returned unharmed.
1481 */
1482struct mbuf *
1483m_defrag(struct mbuf *m0, int how)
1484{
1485 struct mbuf *m_new = NULL, *m_final = NULL;
1486 int progress = 0, length;
1487
1488 if (!(m0->m_flags & M_PKTHDR))
1489 return (m0);
1490
1491#ifdef MBUF_STRESS_TEST
1492 if (m_defragrandomfailures) {
1493 int temp = arc4random() & 0xff;
1494 if (temp == 0xba)
1495 goto nospace;
1496 }
1497#endif
1498
1499 if (m0->m_pkthdr.len > MHLEN)
1500 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1501 else
1502 m_final = m_gethdr(how, MT_DATA);
1503
1504 if (m_final == NULL)
1505 goto nospace;
1506
1507 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1508 goto nospace;
1509
1510 m_new = m_final;
1511
1512 while (progress < m0->m_pkthdr.len) {
1513 length = m0->m_pkthdr.len - progress;
1514 if (length > MCLBYTES)
1515 length = MCLBYTES;
1516
1517 if (m_new == NULL) {
1518 if (length > MLEN)
1519 m_new = m_getcl(how, MT_DATA, 0);
1520 else
1521 m_new = m_get(how, MT_DATA);
1522 if (m_new == NULL)
1523 goto nospace;
1524 }
1525
1526 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1527 progress += length;
1528 m_new->m_len = length;
1529 if (m_new != m_final)
1530 m_cat(m_final, m_new);
1531 m_new = NULL;
1532 }
1533 if (m0->m_next == NULL)
1534 m_defraguseless++;
1535 m_freem(m0);
1536 m0 = m_final;
1537 m_defragpackets++;
1538 m_defragbytes += m0->m_pkthdr.len;
1539 return (m0);
1540nospace:
1541 m_defragfailure++;
1542 if (m_new)
1543 m_free(m_new);
1544 if (m_final)
1545 m_freem(m_final);
1546 return (NULL);
1547}