__P() removal
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
402ed7e1 35 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.12 2003/08/26 21:09:02 rob Exp $
984263bc
MD
36 */
37
38#include "opt_param.h"
39#include "opt_mbuf_stress_test.h"
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/kernel.h>
45#include <sys/sysctl.h>
46#include <sys/domain.h>
47#include <sys/protosw.h>
ef0fdad1 48#include <sys/thread.h>
a2a5ad0d 49#include <sys/globaldata.h>
984263bc
MD
50
51#include <vm/vm.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_extern.h>
54
55#ifdef INVARIANTS
56#include <machine/cpu.h>
57#endif
58
402ed7e1 59static void mbinit (void *);
984263bc
MD
60SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
61
62struct mbuf *mbutl;
63char *mclrefcnt;
64struct mbstat mbstat;
65u_long mbtypes[MT_NTYPES];
66struct mbuf *mmbfree;
67union mcluster *mclfree;
68int max_linkhdr;
69int max_protohdr;
70int max_hdr;
71int max_datalen;
72int m_defragpackets;
73int m_defragbytes;
74int m_defraguseless;
75int m_defragfailure;
76#ifdef MBUF_STRESS_TEST
77int m_defragrandomfailures;
78#endif
79
80int nmbclusters;
81int nmbufs;
82u_int m_mballoc_wid = 0;
83u_int m_clalloc_wid = 0;
84
85SYSCTL_DECL(_kern_ipc);
86SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
87 &max_linkhdr, 0, "");
88SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
89 &max_protohdr, 0, "");
90SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
91SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
92 &max_datalen, 0, "");
93SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
94 &mbuf_wait, 0, "");
95SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
96SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
97 sizeof(mbtypes), "LU", "");
98SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
99 &nmbclusters, 0, "Maximum number of mbuf clusters available");
100SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
101 "Maximum number of mbufs available");
102SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
103 &m_defragpackets, 0, "");
104SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
105 &m_defragbytes, 0, "");
106SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
107 &m_defraguseless, 0, "");
108SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
109 &m_defragfailure, 0, "");
110#ifdef MBUF_STRESS_TEST
111SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
112 &m_defragrandomfailures, 0, "");
113#endif
114
402ed7e1 115static void m_reclaim (void);
984263bc
MD
116
117#ifndef NMBCLUSTERS
118#define NMBCLUSTERS (512 + maxusers * 16)
119#endif
120#ifndef NMBUFS
121#define NMBUFS (nmbclusters * 4)
122#endif
123
124/*
125 * Perform sanity checks of tunables declared above.
126 */
127static void
128tunable_mbinit(void *dummy)
129{
130
131 /*
132 * This has to be done before VM init.
133 */
134 nmbclusters = NMBCLUSTERS;
135 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
136 nmbufs = NMBUFS;
137 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
138 /* Sanity checks */
139 if (nmbufs < nmbclusters * 2)
140 nmbufs = nmbclusters * 2;
141
142 return;
143}
144SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
145
146/* "number of clusters of pages" */
147#define NCL_INIT 1
148
149#define NMB_INIT 16
150
151/* ARGSUSED*/
152static void
153mbinit(dummy)
154 void *dummy;
155{
156 int s;
157
158 mmbfree = NULL; mclfree = NULL;
159 mbstat.m_msize = MSIZE;
160 mbstat.m_mclbytes = MCLBYTES;
161 mbstat.m_minclsize = MINCLSIZE;
162 mbstat.m_mlen = MLEN;
163 mbstat.m_mhlen = MHLEN;
164
165 s = splimp();
166 if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
167 goto bad;
168#if MCLBYTES <= PAGE_SIZE
169 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
170 goto bad;
171#else
172 /* It's OK to call contigmalloc in this context. */
173 if (m_clalloc(16, M_WAIT) == 0)
174 goto bad;
175#endif
176 splx(s);
177 return;
178bad:
179 panic("mbinit");
180}
181
182/*
183 * Allocate at least nmb mbufs and place on mbuf free list.
184 * Must be called at splimp.
185 */
186/* ARGSUSED */
187int
188m_mballoc(nmb, how)
1fd87d54 189 int nmb;
984263bc
MD
190 int how;
191{
1fd87d54
RG
192 caddr_t p;
193 int i;
984263bc
MD
194 int nbytes;
195
196 /*
197 * If we've hit the mbuf limit, stop allocating from mb_map,
198 * (or trying to) in order to avoid dipping into the section of
199 * mb_map which we've "reserved" for clusters.
200 */
201 if ((nmb + mbstat.m_mbufs) > nmbufs)
202 return (0);
203
204 /*
205 * Once we run out of map space, it will be impossible to get
206 * any more (nothing is ever freed back to the map)
207 * -- however you are not dead as m_reclaim might
208 * still be able to free a substantial amount of space.
209 *
210 * XXX Furthermore, we can also work with "recycled" mbufs (when
211 * we're calling with M_WAIT the sleep procedure will be woken
212 * up when an mbuf is freed. See m_mballoc_wait()).
213 */
214 if (mb_map_full)
215 return (0);
216
217 nbytes = round_page(nmb * MSIZE);
218 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
219 if (p == 0 && how == M_WAIT) {
220 mbstat.m_wait++;
221 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
222 }
223
224 /*
225 * Either the map is now full, or `how' is M_NOWAIT and there
226 * are no pages left.
227 */
228 if (p == NULL)
229 return (0);
230
231 nmb = nbytes / MSIZE;
232 for (i = 0; i < nmb; i++) {
233 ((struct mbuf *)p)->m_next = mmbfree;
234 mmbfree = (struct mbuf *)p;
235 p += MSIZE;
236 }
237 mbstat.m_mbufs += nmb;
238 mbtypes[MT_FREE] += nmb;
239 return (1);
240}
241
242/*
243 * Once the mb_map has been exhausted and if the call to the allocation macros
244 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
245 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
246 * designated (mbuf_wait) time.
247 */
248struct mbuf *
249m_mballoc_wait(int caller, int type)
250{
251 struct mbuf *p;
252 int s;
253
254 s = splimp();
255 m_mballoc_wid++;
377d4740 256 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
257 m_mballoc_wid--;
258 splx(s);
259
260 /*
261 * Now that we (think) that we've got something, we will redo an
262 * MGET, but avoid getting into another instance of m_mballoc_wait()
263 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
264 * this way, purposely, in the [unlikely] case that an mbuf was
265 * freed but the sleep was not awakened in time.
266 */
267 p = NULL;
268 switch (caller) {
269 case MGET_C:
270 MGET(p, M_DONTWAIT, type);
271 break;
272 case MGETHDR_C:
273 MGETHDR(p, M_DONTWAIT, type);
274 break;
275 default:
276 panic("m_mballoc_wait: invalid caller (%d)", caller);
277 }
278
279 s = splimp();
280 if (p != NULL) { /* We waited and got something... */
281 mbstat.m_wait++;
282 /* Wake up another if we have more free. */
283 if (mmbfree != NULL)
284 MMBWAKEUP();
285 }
286 splx(s);
287 return (p);
288}
289
290#if MCLBYTES > PAGE_SIZE
291static int i_want_my_mcl;
292
293static void
294kproc_mclalloc(void)
295{
296 int status;
297
298 while (1) {
377d4740 299 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
984263bc
MD
300
301 for (; i_want_my_mcl; i_want_my_mcl--) {
302 if (m_clalloc(1, M_WAIT) == 0)
303 printf("m_clalloc failed even in process context!\n");
304 }
305 }
306}
307
bc6dffab 308static struct thread *mclallocthread;
984263bc
MD
309static struct kproc_desc mclalloc_kp = {
310 "mclalloc",
311 kproc_mclalloc,
bc6dffab 312 &mclallocthread
984263bc 313};
bc6dffab 314SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
984263bc
MD
315 &mclalloc_kp);
316#endif
317
318/*
319 * Allocate some number of mbuf clusters
320 * and place on cluster free list.
321 * Must be called at splimp.
322 */
323/* ARGSUSED */
324int
325m_clalloc(ncl, how)
1fd87d54 326 int ncl;
984263bc
MD
327 int how;
328{
1fd87d54
RG
329 caddr_t p;
330 int i;
984263bc
MD
331 int npg;
332
333 /*
334 * If we've hit the mcluster number limit, stop allocating from
335 * mb_map, (or trying to) in order to avoid dipping into the section
336 * of mb_map which we've "reserved" for mbufs.
337 */
338 if ((ncl + mbstat.m_clusters) > nmbclusters)
339 goto m_clalloc_fail;
340
341 /*
342 * Once we run out of map space, it will be impossible
343 * to get any more (nothing is ever freed back to the
344 * map). From this point on, we solely rely on freed
345 * mclusters.
346 */
347 if (mb_map_full)
348 goto m_clalloc_fail;
349
350#if MCLBYTES > PAGE_SIZE
351 if (how != M_WAIT) {
352 i_want_my_mcl += ncl;
353 wakeup(&i_want_my_mcl);
354 mbstat.m_wait++;
355 p = 0;
356 } else {
357 p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
358 ~0ul, PAGE_SIZE, 0, mb_map);
359 }
360#else
361 npg = ncl;
362 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
363 how != M_WAIT ? M_NOWAIT : M_WAITOK);
364 ncl = ncl * PAGE_SIZE / MCLBYTES;
365#endif
366 /*
367 * Either the map is now full, or `how' is M_NOWAIT and there
368 * are no pages left.
369 */
370 if (p == NULL) {
371 static int last_report ; /* when we did that (in ticks) */
372m_clalloc_fail:
373 mbstat.m_drops++;
374 if (ticks < last_report || (ticks - last_report) >= hz) {
375 last_report = ticks;
376 printf("All mbuf clusters exhausted, please see tuning(7).\n");
377 }
378 return (0);
379 }
380
381 for (i = 0; i < ncl; i++) {
382 ((union mcluster *)p)->mcl_next = mclfree;
383 mclfree = (union mcluster *)p;
384 p += MCLBYTES;
385 mbstat.m_clfree++;
386 }
387 mbstat.m_clusters += ncl;
388 return (1);
389}
390
391/*
392 * Once the mb_map submap has been exhausted and the allocation is called with
393 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
394 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
395 * due to sudden mcluster availability.
396 */
397caddr_t
398m_clalloc_wait(void)
399{
400 caddr_t p;
401 int s;
402
984263bc 403 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
ef0fdad1 404 KASSERT(mycpu->gd_intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
405
406 /* Sleep until something's available or until we expire. */
407 m_clalloc_wid++;
377d4740 408 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
409 m_clalloc_wid--;
410
411 /*
412 * Now that we (think) that we've got something, we will redo and
413 * MGET, but avoid getting into another instance of m_clalloc_wait()
414 */
b6650ec0 415 p = m_mclalloc(M_DONTWAIT);
984263bc
MD
416
417 s = splimp();
418 if (p != NULL) { /* We waited and got something... */
419 mbstat.m_wait++;
420 /* Wake up another if we have more free. */
421 if (mclfree != NULL)
422 MCLWAKEUP();
423 }
424
425 splx(s);
426 return (p);
427}
428
429/*
430 * When MGET fails, ask protocols to free space when short of memory,
431 * then re-attempt to allocate an mbuf.
432 */
433struct mbuf *
434m_retry(i, t)
435 int i, t;
436{
12496bdf
MD
437 struct mbuf *m;
438 int ms;
984263bc
MD
439
440 /*
441 * Must only do the reclaim if not in an interrupt context.
442 */
443 if (i == M_WAIT) {
ef0fdad1 444 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 445 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
446 m_reclaim();
447 }
448
12496bdf
MD
449 ms = splimp();
450 if (mmbfree == NULL)
451 (void)m_mballoc(1, i);
452 m = mmbfree;
453 if (m != NULL) {
454 mmbfree = m->m_next;
455 mbtypes[MT_FREE]--;
456 m->m_type = t;
457 mbtypes[t]++;
458 m->m_next = NULL;
459 m->m_nextpkt = NULL;
460 m->m_data = m->m_dat;
461 m->m_flags = 0;
462 splx(ms);
984263bc 463 mbstat.m_wait++;
12496bdf 464 } else {
984263bc 465 static int last_report ; /* when we did that (in ticks) */
377d4740 466
12496bdf 467 splx(ms);
984263bc
MD
468 mbstat.m_drops++;
469 if (ticks < last_report || (ticks - last_report) >= hz) {
470 last_report = ticks;
471 printf("All mbufs exhausted, please see tuning(7).\n");
472 }
473 }
474
475 return (m);
476}
477
478/*
479 * As above; retry an MGETHDR.
480 */
481struct mbuf *
482m_retryhdr(i, t)
483 int i, t;
484{
12496bdf
MD
485 struct mbuf *m;
486 int ms;
984263bc
MD
487
488 /*
489 * Must only do the reclaim if not in an interrupt context.
490 */
491 if (i == M_WAIT) {
ef0fdad1 492 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 493 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
494 m_reclaim();
495 }
496
12496bdf
MD
497 ms = splimp();
498 if (mmbfree == NULL)
499 (void)m_mballoc(1, i);
500 m = mmbfree;
501 if (m != NULL) {
502 mmbfree = m->m_next;
503 mbtypes[MT_FREE]--;
504 m->m_type = t;
505 mbtypes[t]++;
506 m->m_next = NULL;
507 m->m_nextpkt = NULL;
508 m->m_data = m->m_pktdat;
509 m->m_flags = M_PKTHDR;
510 m->m_pkthdr.rcvif = NULL;
511 SLIST_INIT(&m->m_pkthdr.tags);
512 m->m_pkthdr.csum_flags = 0;
513 splx(ms);
984263bc 514 mbstat.m_wait++;
12496bdf 515 } else {
984263bc 516 static int last_report ; /* when we did that (in ticks) */
12496bdf
MD
517
518 splx(ms);
984263bc
MD
519 mbstat.m_drops++;
520 if (ticks < last_report || (ticks - last_report) >= hz) {
521 last_report = ticks;
522 printf("All mbufs exhausted, please see tuning(7).\n");
523 }
524 }
525
526 return (m);
527}
528
529static void
530m_reclaim()
531{
1fd87d54
RG
532 struct domain *dp;
533 struct protosw *pr;
984263bc
MD
534 int s = splimp();
535
536 for (dp = domains; dp; dp = dp->dom_next)
537 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
538 if (pr->pr_drain)
539 (*pr->pr_drain)();
540 splx(s);
541 mbstat.m_drain++;
542}
543
544/*
545 * Space allocation routines.
546 * These are also available as macros
547 * for critical paths.
548 */
549struct mbuf *
550m_get(how, type)
551 int how, type;
552{
12496bdf
MD
553 struct mbuf *m;
554 int ms;
555
556 ms = splimp();
557 if (mmbfree == NULL)
558 (void)m_mballoc(1, how);
559 m = mmbfree;
560 if (m != NULL) {
561 mmbfree = m->m_next;
562 mbtypes[MT_FREE]--;
563 m->m_type = type;
564 mbtypes[type]++;
565 m->m_next = NULL;
566 m->m_nextpkt = NULL;
567 m->m_data = m->m_dat;
568 m->m_flags = 0;
569 splx(ms);
570 } else {
571 splx(ms);
572 m = m_retry(how, type);
573 if (m == NULL && how == M_WAIT)
574 m = m_mballoc_wait(MGET_C, type);
575 }
984263bc
MD
576 return (m);
577}
578
579struct mbuf *
580m_gethdr(how, type)
581 int how, type;
582{
12496bdf
MD
583 struct mbuf *m;
584 int ms;
585
586 ms = splimp();
587 if (mmbfree == NULL)
588 (void)m_mballoc(1, how);
589 m = mmbfree;
590 if (m != NULL) {
591 mmbfree = m->m_next;
592 mbtypes[MT_FREE]--;
593 m->m_type = type;
594 mbtypes[type]++;
595 m->m_next = NULL;
596 m->m_nextpkt = NULL;
597 m->m_data = m->m_pktdat;
598 m->m_flags = M_PKTHDR;
599 m->m_pkthdr.rcvif = NULL;
600 SLIST_INIT(&m->m_pkthdr.tags);
601 m->m_pkthdr.csum_flags = 0;
602 splx(ms);
603 } else {
604 splx(ms);
605 m = m_retryhdr(how, type);
606 if (m == NULL && how == M_WAIT)
607 m = m_mballoc_wait(MGETHDR_C, type);
608 }
984263bc
MD
609 return (m);
610}
611
612struct mbuf *
613m_getclr(how, type)
614 int how, type;
615{
1fd87d54 616 struct mbuf *m;
984263bc
MD
617
618 MGET(m, how, type);
619 if (m == 0)
620 return (0);
621 bzero(mtod(m, caddr_t), MLEN);
622 return (m);
623}
624
625/*
626 * m_getcl() returns an mbuf with an attached cluster.
627 * Because many network drivers use this kind of buffers a lot, it is
628 * convenient to keep a small pool of free buffers of this kind.
629 * Even a small size such as 10 gives about 10% improvement in the
630 * forwarding rate in a bridge or router.
631 * The size of this free list is controlled by the sysctl variable
632 * mcl_pool_max. The list is populated on m_freem(), and used in
633 * m_getcl() if elements are available.
634 */
635static struct mbuf *mcl_pool;
636static int mcl_pool_now;
637static int mcl_pool_max = 0;
638
639SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
640 "Maximum number of mbufs+cluster in free list");
641SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_now, CTLFLAG_RD, &mcl_pool_now, 0,
642 "Current number of mbufs+cluster in free list");
643
644struct mbuf *
645m_getcl(int how, short type, int flags)
646{
647 int s = splimp();
648 struct mbuf *mp;
649
650 if (flags & M_PKTHDR) {
651 if (type == MT_DATA && mcl_pool) {
652 mp = mcl_pool;
653 mcl_pool = mp->m_nextpkt;
654 mcl_pool_now--;
655 splx(s);
656 mp->m_nextpkt = NULL;
657 mp->m_data = mp->m_ext.ext_buf;
658 mp->m_flags = M_PKTHDR|M_EXT;
659 mp->m_pkthdr.rcvif = NULL;
660 mp->m_pkthdr.csum_flags = 0;
661 return mp;
662 } else
663 MGETHDR(mp, how, type);
664 } else
665 MGET(mp, how, type);
666 if (mp) {
667 MCLGET(mp, how);
668 if ( (mp->m_flags & M_EXT) == 0) {
669 m_free(mp);
670 mp = NULL;
671 }
672 }
673 splx(s);
674 return mp;
675}
676
677/*
678 * struct mbuf *
679 * m_getm(m, len, how, type)
680 *
681 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
682 * best) and return a pointer to the top of the allocated chain. If m is
683 * non-null, then we assume that it is a single mbuf or an mbuf chain to
684 * which we want len bytes worth of mbufs and/or clusters attached, and so
685 * if we succeed in allocating it, we will just return a pointer to m.
686 *
687 * If we happen to fail at any point during the allocation, we will free
688 * up everything we have already allocated and return NULL.
689 *
690 */
691struct mbuf *
692m_getm(struct mbuf *m, int len, int how, int type)
693{
694 struct mbuf *top, *tail, *mp, *mtail = NULL;
695
696 KASSERT(len >= 0, ("len is < 0 in m_getm"));
697
698 MGET(mp, how, type);
699 if (mp == NULL)
700 return (NULL);
701 else if (len > MINCLSIZE) {
702 MCLGET(mp, how);
703 if ((mp->m_flags & M_EXT) == 0) {
704 m_free(mp);
705 return (NULL);
706 }
707 }
708 mp->m_len = 0;
709 len -= M_TRAILINGSPACE(mp);
710
711 if (m != NULL)
712 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
713 else
714 m = mp;
715
716 top = tail = mp;
717 while (len > 0) {
718 MGET(mp, how, type);
719 if (mp == NULL)
720 goto failed;
721
722 tail->m_next = mp;
723 tail = mp;
724 if (len > MINCLSIZE) {
725 MCLGET(mp, how);
726 if ((mp->m_flags & M_EXT) == 0)
727 goto failed;
728 }
729
730 mp->m_len = 0;
731 len -= M_TRAILINGSPACE(mp);
732 }
733
734 if (mtail != NULL)
735 mtail->m_next = top;
736 return (m);
737
738failed:
739 m_freem(top);
740 return (NULL);
741}
742
743/*
b6650ec0
MD
744 * m_mclalloc() - Allocates an mbuf cluster.
745 */
746caddr_t
747m_mclalloc(int how)
748{
749 caddr_t mp;
750 int s;
751
752 s = splimp();
753
754 if (mclfree == NULL)
755 m_clalloc(1, how);
756 mp = (caddr_t)mclfree;
757 if (mp != NULL) {
758 mclrefcnt[mtocl(mp)]++;
759 mbstat.m_clfree--;
760 mclfree = ((union mcluster *)mp)->mcl_next;
761 splx(s);
762 return(mp);
763 }
764 splx(s);
765 if (how == M_WAIT)
766 return(m_clalloc_wait());
767 return(NULL);
768}
769
770/*
771 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
772 */
773void
774m_mclget(struct mbuf *m, int how)
775{
776 m->m_ext.ext_buf = m_mclalloc(how);
777 if (m->m_ext.ext_buf != NULL) {
778 m->m_data = m->m_ext.ext_buf;
779 m->m_flags |= M_EXT;
780 m->m_ext.ext_free = NULL;
781 m->m_ext.ext_ref = NULL;
782 m->m_ext.ext_size = MCLBYTES;
783 }
784}
785
786static __inline void
787_m_mclfree(caddr_t data)
788{
789 union mcluster *mp = (union mcluster *)data;
790
791 KASSERT(mclrefcnt[mtocl(mp)] > 0, ("freeing free cluster"));
792 if (--mclrefcnt[mtocl(mp)] == 0) {
793 mp->mcl_next = mclfree;
794 mclfree = mp;
795 mbstat.m_clfree++;
796 MCLWAKEUP();
797 }
798}
799
800void
801m_mclfree(caddr_t mp)
802{
803 int s = splimp();
804 _m_mclfree(mp);
805 splx(s);
806}
807
808/*
809 * m_free()
810 *
811 * Free a single mbuf and any associated external storage. The successor,
812 * if any, is returned.
984263bc 813 *
b6650ec0 814 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
815 * code does not call M_PREPEND properly.
816 * (example: call to bpf_mtap from drivers)
817 */
984263bc 818struct mbuf *
b6650ec0 819m_free(struct mbuf *m)
984263bc 820{
b6650ec0
MD
821 int s;
822 struct mbuf *n;
823
824 s = splimp();
825 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf"));
826 mbtypes[m->m_type]--;
827 if ((m->m_flags & M_PKTHDR) != 0)
828 m_tag_delete_chain(m, NULL);
829 if (m->m_flags & M_EXT) {
830 if (m->m_ext.ext_free != NULL) {
831 m->m_ext.ext_free(m->m_ext.ext_buf, m->m_ext.ext_size);
832 } else {
833 _m_mclfree(m->m_ext.ext_buf); /* inlined */
834 }
835 }
836 n = m->m_next;
837 m->m_type = MT_FREE;
838 mbtypes[MT_FREE]++;
839 m->m_next = mmbfree;
840 mmbfree = m;
841 MMBWAKEUP();
842 splx(s);
984263bc 843
984263bc
MD
844 return (n);
845}
846
847void
b6650ec0 848m_freem(struct mbuf *m)
984263bc
MD
849{
850 int s = splimp();
851
852 /*
853 * Try to keep a small pool of mbuf+cluster for quick use in
854 * device drivers. A good candidate is a M_PKTHDR buffer with
855 * only one cluster attached. Other mbufs, or those exceeding
856 * the pool size, are just m_free'd in the usual way.
857 * The following code makes sure that m_next, m_type,
858 * m_pkthdr.aux and m_ext.* are properly initialized.
859 * Other fields in the mbuf are initialized in m_getcl()
860 * upon allocation.
861 */
862 if (mcl_pool_now < mcl_pool_max && m && m->m_next == NULL &&
863 (m->m_flags & (M_PKTHDR|M_EXT)) == (M_PKTHDR|M_EXT) &&
864 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
865 m_tag_delete_chain(m, NULL);
866 m->m_nextpkt = mcl_pool;
867 mcl_pool = m;
868 mcl_pool_now++;
869 } else {
870 while (m)
871 m = m_free(m);
872 }
873 splx(s);
874}
875
876/*
877 * Mbuffer utility routines.
878 */
879
880/*
881 * Lesser-used path for M_PREPEND:
882 * allocate new mbuf to prepend to chain,
883 * copy junk along.
884 */
885struct mbuf *
886m_prepend(m, len, how)
1fd87d54 887 struct mbuf *m;
984263bc
MD
888 int len, how;
889{
890 struct mbuf *mn;
891
892 MGET(mn, how, m->m_type);
893 if (mn == (struct mbuf *)NULL) {
894 m_freem(m);
895 return ((struct mbuf *)NULL);
896 }
897 if (m->m_flags & M_PKTHDR)
898 M_MOVE_PKTHDR(mn, m);
899 mn->m_next = m;
900 m = mn;
901 if (len < MHLEN)
902 MH_ALIGN(m, len);
903 m->m_len = len;
904 return (m);
905}
906
907/*
908 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
909 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
910 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
911 * Note that the copy is read-only, because clusters are not copied,
912 * only their reference counts are incremented.
913 */
914#define MCFail (mbstat.m_mcfail)
915
916struct mbuf *
917m_copym(m, off0, len, wait)
f15db79e 918 const struct mbuf *m;
984263bc 919 int off0, wait;
1fd87d54 920 int len;
984263bc 921{
1fd87d54
RG
922 struct mbuf *n, **np;
923 int off = off0;
984263bc
MD
924 struct mbuf *top;
925 int copyhdr = 0;
926
927 KASSERT(off >= 0, ("m_copym, negative off %d", off));
928 KASSERT(len >= 0, ("m_copym, negative len %d", len));
929 if (off == 0 && m->m_flags & M_PKTHDR)
930 copyhdr = 1;
931 while (off > 0) {
932 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
933 if (off < m->m_len)
934 break;
935 off -= m->m_len;
936 m = m->m_next;
937 }
938 np = &top;
939 top = 0;
940 while (len > 0) {
941 if (m == 0) {
942 KASSERT(len == M_COPYALL,
943 ("m_copym, length > size of mbuf chain"));
944 break;
945 }
946 MGET(n, wait, m->m_type);
947 *np = n;
948 if (n == 0)
949 goto nospace;
950 if (copyhdr) {
951 if (!m_dup_pkthdr(n, m, wait))
952 goto nospace;
953 if (len == M_COPYALL)
954 n->m_pkthdr.len -= off0;
955 else
956 n->m_pkthdr.len = len;
957 copyhdr = 0;
958 }
959 n->m_len = min(len, m->m_len - off);
960 if (m->m_flags & M_EXT) {
961 n->m_data = m->m_data + off;
962 if (m->m_ext.ext_ref == NULL) {
963 atomic_add_char(
964 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
965 } else {
966 int s = splimp();
967
968 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
969 m->m_ext.ext_size);
970 splx(s);
971 }
972 n->m_ext = m->m_ext;
973 n->m_flags |= M_EXT;
974 } else
975 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
976 (unsigned)n->m_len);
977 if (len != M_COPYALL)
978 len -= n->m_len;
979 off = 0;
980 m = m->m_next;
981 np = &n->m_next;
982 }
983 if (top == 0)
984 MCFail++;
985 return (top);
986nospace:
987 m_freem(top);
988 MCFail++;
989 return (0);
990}
991
992/*
993 * Copy an entire packet, including header (which must be present).
994 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
995 * Note that the copy is read-only, because clusters are not copied,
996 * only their reference counts are incremented.
997 * Preserve alignment of the first mbuf so if the creator has left
998 * some room at the beginning (e.g. for inserting protocol headers)
999 * the copies also have the room available.
1000 */
1001struct mbuf *
1002m_copypacket(m, how)
1003 struct mbuf *m;
1004 int how;
1005{
1006 struct mbuf *top, *n, *o;
1007
1008 MGET(n, how, m->m_type);
1009 top = n;
1010 if (!n)
1011 goto nospace;
1012
1013 if (!m_dup_pkthdr(n, m, how))
1014 goto nospace;
1015 n->m_len = m->m_len;
1016 if (m->m_flags & M_EXT) {
1017 n->m_data = m->m_data;
1018 if (m->m_ext.ext_ref == NULL)
1019 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1020 else {
1021 int s = splimp();
1022
1023 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1024 m->m_ext.ext_size);
1025 splx(s);
1026 }
1027 n->m_ext = m->m_ext;
1028 n->m_flags |= M_EXT;
1029 } else {
1030 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1031 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1032 }
1033
1034 m = m->m_next;
1035 while (m) {
1036 MGET(o, how, m->m_type);
1037 if (!o)
1038 goto nospace;
1039
1040 n->m_next = o;
1041 n = n->m_next;
1042
1043 n->m_len = m->m_len;
1044 if (m->m_flags & M_EXT) {
1045 n->m_data = m->m_data;
1046 if (m->m_ext.ext_ref == NULL) {
1047 atomic_add_char(
1048 &mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1049 } else {
1050 int s = splimp();
1051
1052 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1053 m->m_ext.ext_size);
1054 splx(s);
1055 }
1056 n->m_ext = m->m_ext;
1057 n->m_flags |= M_EXT;
1058 } else {
1059 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1060 }
1061
1062 m = m->m_next;
1063 }
1064 return top;
1065nospace:
1066 m_freem(top);
1067 MCFail++;
1068 return 0;
1069}
1070
1071/*
1072 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1073 * continuing for "len" bytes, into the indicated buffer.
1074 */
1075void
1076m_copydata(m, off, len, cp)
f15db79e 1077 const struct mbuf *m;
1fd87d54
RG
1078 int off;
1079 int len;
984263bc
MD
1080 caddr_t cp;
1081{
1fd87d54 1082 unsigned count;
984263bc
MD
1083
1084 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1085 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1086 while (off > 0) {
1087 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1088 if (off < m->m_len)
1089 break;
1090 off -= m->m_len;
1091 m = m->m_next;
1092 }
1093 while (len > 0) {
1094 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1095 count = min(m->m_len - off, len);
1096 bcopy(mtod(m, caddr_t) + off, cp, count);
1097 len -= count;
1098 cp += count;
1099 off = 0;
1100 m = m->m_next;
1101 }
1102}
1103
1104/*
1105 * Copy a packet header mbuf chain into a completely new chain, including
1106 * copying any mbuf clusters. Use this instead of m_copypacket() when
1107 * you need a writable copy of an mbuf chain.
1108 */
1109struct mbuf *
1110m_dup(m, how)
1111 struct mbuf *m;
1112 int how;
1113{
1114 struct mbuf **p, *top = NULL;
1115 int remain, moff, nsize;
1116
1117 /* Sanity check */
1118 if (m == NULL)
1119 return (0);
1120 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1121
1122 /* While there's more data, get a new mbuf, tack it on, and fill it */
1123 remain = m->m_pkthdr.len;
1124 moff = 0;
1125 p = &top;
1126 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1127 struct mbuf *n;
1128
1129 /* Get the next new mbuf */
1130 MGET(n, how, m->m_type);
1131 if (n == NULL)
1132 goto nospace;
1133 if (top == NULL) { /* first one, must be PKTHDR */
1134 if (!m_dup_pkthdr(n, m, how))
1135 goto nospace;
1136 nsize = MHLEN;
1137 } else /* not the first one */
1138 nsize = MLEN;
1139 if (remain >= MINCLSIZE) {
1140 MCLGET(n, how);
1141 if ((n->m_flags & M_EXT) == 0) {
1142 (void)m_free(n);
1143 goto nospace;
1144 }
1145 nsize = MCLBYTES;
1146 }
1147 n->m_len = 0;
1148
1149 /* Link it into the new chain */
1150 *p = n;
1151 p = &n->m_next;
1152
1153 /* Copy data from original mbuf(s) into new mbuf */
1154 while (n->m_len < nsize && m != NULL) {
1155 int chunk = min(nsize - n->m_len, m->m_len - moff);
1156
1157 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1158 moff += chunk;
1159 n->m_len += chunk;
1160 remain -= chunk;
1161 if (moff == m->m_len) {
1162 m = m->m_next;
1163 moff = 0;
1164 }
1165 }
1166
1167 /* Check correct total mbuf length */
1168 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1169 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1170 }
1171 return (top);
1172
1173nospace:
1174 m_freem(top);
1175 MCFail++;
1176 return (0);
1177}
1178
1179/*
1180 * Concatenate mbuf chain n to m.
1181 * Both chains must be of the same type (e.g. MT_DATA).
1182 * Any m_pkthdr is not updated.
1183 */
1184void
1185m_cat(m, n)
1fd87d54 1186 struct mbuf *m, *n;
984263bc
MD
1187{
1188 while (m->m_next)
1189 m = m->m_next;
1190 while (n) {
1191 if (m->m_flags & M_EXT ||
1192 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1193 /* just join the two chains */
1194 m->m_next = n;
1195 return;
1196 }
1197 /* splat the data from one into the other */
1198 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1199 (u_int)n->m_len);
1200 m->m_len += n->m_len;
1201 n = m_free(n);
1202 }
1203}
1204
1205void
1206m_adj(mp, req_len)
1207 struct mbuf *mp;
1208 int req_len;
1209{
1fd87d54
RG
1210 int len = req_len;
1211 struct mbuf *m;
1212 int count;
984263bc
MD
1213
1214 if ((m = mp) == NULL)
1215 return;
1216 if (len >= 0) {
1217 /*
1218 * Trim from head.
1219 */
1220 while (m != NULL && len > 0) {
1221 if (m->m_len <= len) {
1222 len -= m->m_len;
1223 m->m_len = 0;
1224 m = m->m_next;
1225 } else {
1226 m->m_len -= len;
1227 m->m_data += len;
1228 len = 0;
1229 }
1230 }
1231 m = mp;
1232 if (mp->m_flags & M_PKTHDR)
1233 m->m_pkthdr.len -= (req_len - len);
1234 } else {
1235 /*
1236 * Trim from tail. Scan the mbuf chain,
1237 * calculating its length and finding the last mbuf.
1238 * If the adjustment only affects this mbuf, then just
1239 * adjust and return. Otherwise, rescan and truncate
1240 * after the remaining size.
1241 */
1242 len = -len;
1243 count = 0;
1244 for (;;) {
1245 count += m->m_len;
1246 if (m->m_next == (struct mbuf *)0)
1247 break;
1248 m = m->m_next;
1249 }
1250 if (m->m_len >= len) {
1251 m->m_len -= len;
1252 if (mp->m_flags & M_PKTHDR)
1253 mp->m_pkthdr.len -= len;
1254 return;
1255 }
1256 count -= len;
1257 if (count < 0)
1258 count = 0;
1259 /*
1260 * Correct length for chain is "count".
1261 * Find the mbuf with last data, adjust its length,
1262 * and toss data from remaining mbufs on chain.
1263 */
1264 m = mp;
1265 if (m->m_flags & M_PKTHDR)
1266 m->m_pkthdr.len = count;
1267 for (; m; m = m->m_next) {
1268 if (m->m_len >= count) {
1269 m->m_len = count;
1270 break;
1271 }
1272 count -= m->m_len;
1273 }
1274 while (m->m_next)
1275 (m = m->m_next) ->m_len = 0;
1276 }
1277}
1278
1279/*
1280 * Rearange an mbuf chain so that len bytes are contiguous
1281 * and in the data area of an mbuf (so that mtod and dtom
1282 * will work for a structure of size len). Returns the resulting
1283 * mbuf chain on success, frees it and returns null on failure.
1284 * If there is room, it will add up to max_protohdr-len extra bytes to the
1285 * contiguous region in an attempt to avoid being called next time.
1286 */
1287#define MPFail (mbstat.m_mpfail)
1288
1289struct mbuf *
1290m_pullup(n, len)
1fd87d54 1291 struct mbuf *n;
984263bc
MD
1292 int len;
1293{
1fd87d54
RG
1294 struct mbuf *m;
1295 int count;
984263bc
MD
1296 int space;
1297
1298 /*
1299 * If first mbuf has no cluster, and has room for len bytes
1300 * without shifting current data, pullup into it,
1301 * otherwise allocate a new mbuf to prepend to the chain.
1302 */
1303 if ((n->m_flags & M_EXT) == 0 &&
1304 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1305 if (n->m_len >= len)
1306 return (n);
1307 m = n;
1308 n = n->m_next;
1309 len -= m->m_len;
1310 } else {
1311 if (len > MHLEN)
1312 goto bad;
1313 MGET(m, M_DONTWAIT, n->m_type);
1314 if (m == 0)
1315 goto bad;
1316 m->m_len = 0;
1317 if (n->m_flags & M_PKTHDR)
1318 M_MOVE_PKTHDR(m, n);
1319 }
1320 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1321 do {
1322 count = min(min(max(len, max_protohdr), space), n->m_len);
1323 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1324 (unsigned)count);
1325 len -= count;
1326 m->m_len += count;
1327 n->m_len -= count;
1328 space -= count;
1329 if (n->m_len)
1330 n->m_data += count;
1331 else
1332 n = m_free(n);
1333 } while (len > 0 && n);
1334 if (len > 0) {
1335 (void) m_free(m);
1336 goto bad;
1337 }
1338 m->m_next = n;
1339 return (m);
1340bad:
1341 m_freem(n);
1342 MPFail++;
1343 return (0);
1344}
1345
1346/*
1347 * Partition an mbuf chain in two pieces, returning the tail --
1348 * all but the first len0 bytes. In case of failure, it returns NULL and
1349 * attempts to restore the chain to its original state.
1350 *
1351 * Note that the resulting mbufs might be read-only, because the new
1352 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1353 * the "breaking point" happens to lie within a cluster mbuf. Use the
1354 * M_WRITABLE() macro to check for this case.
1355 */
1356struct mbuf *
1357m_split(m0, len0, wait)
1fd87d54 1358 struct mbuf *m0;
984263bc
MD
1359 int len0, wait;
1360{
1fd87d54 1361 struct mbuf *m, *n;
984263bc
MD
1362 unsigned len = len0, remain;
1363
1364 for (m = m0; m && len > m->m_len; m = m->m_next)
1365 len -= m->m_len;
1366 if (m == 0)
1367 return (0);
1368 remain = m->m_len - len;
1369 if (m0->m_flags & M_PKTHDR) {
1370 MGETHDR(n, wait, m0->m_type);
1371 if (n == 0)
1372 return (0);
1373 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1374 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1375 m0->m_pkthdr.len = len0;
1376 if (m->m_flags & M_EXT)
1377 goto extpacket;
1378 if (remain > MHLEN) {
1379 /* m can't be the lead packet */
1380 MH_ALIGN(n, 0);
1381 n->m_next = m_split(m, len, wait);
1382 if (n->m_next == 0) {
1383 (void) m_free(n);
1384 return (0);
1385 } else {
1386 n->m_len = 0;
1387 return (n);
1388 }
1389 } else
1390 MH_ALIGN(n, remain);
1391 } else if (remain == 0) {
1392 n = m->m_next;
1393 m->m_next = 0;
1394 return (n);
1395 } else {
1396 MGET(n, wait, m->m_type);
1397 if (n == 0)
1398 return (0);
1399 M_ALIGN(n, remain);
1400 }
1401extpacket:
1402 if (m->m_flags & M_EXT) {
1403 n->m_flags |= M_EXT;
1404 n->m_ext = m->m_ext;
1405 if (m->m_ext.ext_ref == NULL)
1406 atomic_add_char(&mclrefcnt[mtocl(m->m_ext.ext_buf)], 1);
1407 else {
1408 int s = splimp();
1409
1410 (*m->m_ext.ext_ref)(m->m_ext.ext_buf,
1411 m->m_ext.ext_size);
1412 splx(s);
1413 }
1414 n->m_data = m->m_data + len;
1415 } else {
1416 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1417 }
1418 n->m_len = remain;
1419 m->m_len = len;
1420 n->m_next = m->m_next;
1421 m->m_next = 0;
1422 return (n);
1423}
1424/*
1425 * Routine to copy from device local memory into mbufs.
1426 */
1427struct mbuf *
1428m_devget(buf, totlen, off0, ifp, copy)
1429 char *buf;
1430 int totlen, off0;
1431 struct ifnet *ifp;
402ed7e1 1432 void (*copy) (char *from, caddr_t to, u_int len);
984263bc 1433{
1fd87d54 1434 struct mbuf *m;
984263bc 1435 struct mbuf *top = 0, **mp = &top;
1fd87d54
RG
1436 int off = off0, len;
1437 char *cp;
984263bc
MD
1438 char *epkt;
1439
1440 cp = buf;
1441 epkt = cp + totlen;
1442 if (off) {
1443 cp += off + 2 * sizeof(u_short);
1444 totlen -= 2 * sizeof(u_short);
1445 }
1446 MGETHDR(m, M_DONTWAIT, MT_DATA);
1447 if (m == 0)
1448 return (0);
1449 m->m_pkthdr.rcvif = ifp;
1450 m->m_pkthdr.len = totlen;
1451 m->m_len = MHLEN;
1452
1453 while (totlen > 0) {
1454 if (top) {
1455 MGET(m, M_DONTWAIT, MT_DATA);
1456 if (m == 0) {
1457 m_freem(top);
1458 return (0);
1459 }
1460 m->m_len = MLEN;
1461 }
1462 len = min(totlen, epkt - cp);
1463 if (len >= MINCLSIZE) {
1464 MCLGET(m, M_DONTWAIT);
1465 if (m->m_flags & M_EXT)
1466 m->m_len = len = min(len, MCLBYTES);
1467 else
1468 len = m->m_len;
1469 } else {
1470 /*
1471 * Place initial small packet/header at end of mbuf.
1472 */
1473 if (len < m->m_len) {
1474 if (top == 0 && len + max_linkhdr <= m->m_len)
1475 m->m_data += max_linkhdr;
1476 m->m_len = len;
1477 } else
1478 len = m->m_len;
1479 }
1480 if (copy)
1481 copy(cp, mtod(m, caddr_t), (unsigned)len);
1482 else
1483 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1484 cp += len;
1485 *mp = m;
1486 mp = &m->m_next;
1487 totlen -= len;
1488 if (cp == epkt)
1489 cp = buf;
1490 }
1491 return (top);
1492}
1493
1494/*
1495 * Copy data from a buffer back into the indicated mbuf chain,
1496 * starting "off" bytes from the beginning, extending the mbuf
1497 * chain if necessary.
1498 */
1499void
1500m_copyback(m0, off, len, cp)
1501 struct mbuf *m0;
1fd87d54
RG
1502 int off;
1503 int len;
984263bc
MD
1504 caddr_t cp;
1505{
1fd87d54
RG
1506 int mlen;
1507 struct mbuf *m = m0, *n;
984263bc
MD
1508 int totlen = 0;
1509
1510 if (m0 == 0)
1511 return;
1512 while (off > (mlen = m->m_len)) {
1513 off -= mlen;
1514 totlen += mlen;
1515 if (m->m_next == 0) {
1516 n = m_getclr(M_DONTWAIT, m->m_type);
1517 if (n == 0)
1518 goto out;
1519 n->m_len = min(MLEN, len + off);
1520 m->m_next = n;
1521 }
1522 m = m->m_next;
1523 }
1524 while (len > 0) {
1525 mlen = min (m->m_len - off, len);
1526 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1527 cp += mlen;
1528 len -= mlen;
1529 mlen += off;
1530 off = 0;
1531 totlen += mlen;
1532 if (len == 0)
1533 break;
1534 if (m->m_next == 0) {
1535 n = m_get(M_DONTWAIT, m->m_type);
1536 if (n == 0)
1537 break;
1538 n->m_len = min(MLEN, len);
1539 m->m_next = n;
1540 }
1541 m = m->m_next;
1542 }
1543out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1544 m->m_pkthdr.len = totlen;
1545}
1546
1547void
1548m_print(const struct mbuf *m)
1549{
1550 int len;
1551 const struct mbuf *m2;
1552
1553 len = m->m_pkthdr.len;
1554 m2 = m;
1555 while (len) {
1556 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1557 len -= m2->m_len;
1558 m2 = m2->m_next;
1559 }
1560 return;
1561}
1562
1563/*
1564 * "Move" mbuf pkthdr from "from" to "to".
1565 * "from" must have M_PKTHDR set, and "to" must be empty.
1566 */
1567void
1568m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1569{
1570 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1571
1572 to->m_flags = from->m_flags & M_COPYFLAGS;
1573 to->m_data = to->m_pktdat;
1574 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1575 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1576 from->m_flags &= ~M_PKTHDR;
1577}
1578
1579/*
1580 * Duplicate "from"'s mbuf pkthdr in "to".
1581 * "from" must have M_PKTHDR set, and "to" must be empty.
1582 * In particular, this does a deep copy of the packet tags.
1583 */
1584int
f15db79e 1585m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc
MD
1586{
1587 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1588 if ((to->m_flags & M_EXT) == 0)
1589 to->m_data = to->m_pktdat;
1590 to->m_pkthdr = from->m_pkthdr;
1591 SLIST_INIT(&to->m_pkthdr.tags);
1592 return (m_tag_copy_chain(to, from, how));
1593}
1594
1595/*
1596 * Defragment a mbuf chain, returning the shortest possible
1597 * chain of mbufs and clusters. If allocation fails and
1598 * this cannot be completed, NULL will be returned, but
1599 * the passed in chain will be unchanged. Upon success,
1600 * the original chain will be freed, and the new chain
1601 * will be returned.
1602 *
1603 * If a non-packet header is passed in, the original
1604 * mbuf (chain?) will be returned unharmed.
1605 */
1606struct mbuf *
1607m_defrag(struct mbuf *m0, int how)
1608{
1609 struct mbuf *m_new = NULL, *m_final = NULL;
1610 int progress = 0, length;
1611
1612 if (!(m0->m_flags & M_PKTHDR))
1613 return (m0);
1614
1615#ifdef MBUF_STRESS_TEST
1616 if (m_defragrandomfailures) {
1617 int temp = arc4random() & 0xff;
1618 if (temp == 0xba)
1619 goto nospace;
1620 }
1621#endif
1622
1623 if (m0->m_pkthdr.len > MHLEN)
1624 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1625 else
1626 m_final = m_gethdr(how, MT_DATA);
1627
1628 if (m_final == NULL)
1629 goto nospace;
1630
1631 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1632 goto nospace;
1633
1634 m_new = m_final;
1635
1636 while (progress < m0->m_pkthdr.len) {
1637 length = m0->m_pkthdr.len - progress;
1638 if (length > MCLBYTES)
1639 length = MCLBYTES;
1640
1641 if (m_new == NULL) {
1642 if (length > MLEN)
1643 m_new = m_getcl(how, MT_DATA, 0);
1644 else
1645 m_new = m_get(how, MT_DATA);
1646 if (m_new == NULL)
1647 goto nospace;
1648 }
1649
1650 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1651 progress += length;
1652 m_new->m_len = length;
1653 if (m_new != m_final)
1654 m_cat(m_final, m_new);
1655 m_new = NULL;
1656 }
1657 if (m0->m_next == NULL)
1658 m_defraguseless++;
1659 m_freem(m0);
1660 m0 = m_final;
1661 m_defragpackets++;
1662 m_defragbytes += m0->m_pkthdr.len;
1663 return (m0);
1664nospace:
1665 m_defragfailure++;
1666 if (m_new)
1667 m_free(m_new);
1668 if (m_final)
1669 m_freem(m_final);
1670 return (NULL);
1671}