Remove unused junk from the slab allocator.
[dragonfly.git] / sys / kern / uipc_mbuf.c
CommitLineData
984263bc 1/*
0c33f36d 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
36 *
37 * License terms: all terms for the DragonFly license above plus the following:
38 *
39 * 4. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 *
42 * This product includes software developed by Jeffrey M. Hsu
43 * for the DragonFly Project.
44 *
45 * This requirement may be waived with permission from Jeffrey Hsu.
46 * This requirement will sunset and may be removed on July 8 2005,
47 * after which the standard DragonFly license (as shown above) will
48 * apply.
49 */
50
51/*
984263bc
MD
52 * Copyright (c) 1982, 1986, 1988, 1991, 1993
53 * The Regents of the University of California. All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. All advertising materials mentioning features or use of this software
64 * must display the following acknowledgement:
65 * This product includes software developed by the University of
66 * California, Berkeley and its contributors.
67 * 4. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
70 *
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 * SUCH DAMAGE.
82 *
8a3125c6 83 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
984263bc 84 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
02742ec6 85 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.28 2004/09/19 22:32:47 joerg Exp $
984263bc
MD
86 */
87
88#include "opt_param.h"
89#include "opt_mbuf_stress_test.h"
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/malloc.h>
93#include <sys/mbuf.h>
94#include <sys/kernel.h>
95#include <sys/sysctl.h>
96#include <sys/domain.h>
97#include <sys/protosw.h>
0c33f36d 98#include <sys/uio.h>
ef0fdad1 99#include <sys/thread.h>
a2a5ad0d 100#include <sys/globaldata.h>
90775e29 101#include <sys/thread2.h>
984263bc
MD
102
103#include <vm/vm.h>
104#include <vm/vm_kern.h>
105#include <vm/vm_extern.h>
106
107#ifdef INVARIANTS
108#include <machine/cpu.h>
109#endif
110
90775e29
MD
111/*
112 * mbuf cluster meta-data
113 */
114typedef struct mbcluster {
115 struct mbcluster *mcl_next;
116 int32_t mcl_magic;
117 int32_t mcl_refs;
118 void *mcl_data;
119} *mbcluster_t;
120
121typedef struct mbuf *mbuf_t;
122
123#define MCL_MAGIC 0x6d62636c
124
402ed7e1 125static void mbinit (void *);
984263bc
MD
126SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
127
90775e29
MD
128static u_long mbtypes[MT_NTYPES];
129
984263bc 130struct mbstat mbstat;
984263bc
MD
131int max_linkhdr;
132int max_protohdr;
133int max_hdr;
134int max_datalen;
135int m_defragpackets;
136int m_defragbytes;
137int m_defraguseless;
138int m_defragfailure;
139#ifdef MBUF_STRESS_TEST
140int m_defragrandomfailures;
141#endif
142
143int nmbclusters;
144int nmbufs;
145u_int m_mballoc_wid = 0;
146u_int m_clalloc_wid = 0;
147
984263bc
MD
148SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
149 &max_linkhdr, 0, "");
150SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
151 &max_protohdr, 0, "");
152SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
153SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
154 &max_datalen, 0, "");
155SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
156 &mbuf_wait, 0, "");
157SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
158SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes,
159 sizeof(mbtypes), "LU", "");
b35fbf6c 160SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RW,
984263bc 161 &nmbclusters, 0, "Maximum number of mbuf clusters available");
b35fbf6c 162SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RW, &nmbufs, 0,
984263bc
MD
163 "Maximum number of mbufs available");
164SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
165 &m_defragpackets, 0, "");
166SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
167 &m_defragbytes, 0, "");
168SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
169 &m_defraguseless, 0, "");
170SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
171 &m_defragfailure, 0, "");
172#ifdef MBUF_STRESS_TEST
173SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
174 &m_defragrandomfailures, 0, "");
175#endif
176
90775e29
MD
177static int mcl_pool_count;
178static int mcl_pool_max = 20;
179static int mcl_free_max = 1000;
180static int mbuf_free_max = 5000;
181
182SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_max, CTLFLAG_RW, &mcl_pool_max, 0,
183 "Maximum number of mbufs+cluster in free list");
184SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_pool_count, CTLFLAG_RD, &mcl_pool_count, 0,
185 "Current number of mbufs+cluster in free list");
186SYSCTL_INT(_kern_ipc, OID_AUTO, mcl_free_max, CTLFLAG_RW, &mcl_free_max, 0,
187 "Maximum number of clusters on the free list");
188SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_free_max, CTLFLAG_RW, &mbuf_free_max, 0,
189 "Maximum number of mbufs on the free list");
190
191static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
192static MALLOC_DEFINE(M_MBUFCL, "mbufcl", "mbufcl");
193
194static mbuf_t mmbfree;
195static mbcluster_t mclfree;
196static struct mbuf *mcl_pool;
197
198static void m_reclaim (void);
199static int m_mballoc(int nmb, int how);
200static int m_clalloc(int ncl, int how);
201static struct mbuf *m_mballoc_wait(int caller, int type);
202static void m_mclref(void *arg);
203static void m_mclfree(void *arg);
984263bc
MD
204
205#ifndef NMBCLUSTERS
206#define NMBCLUSTERS (512 + maxusers * 16)
207#endif
208#ifndef NMBUFS
209#define NMBUFS (nmbclusters * 4)
210#endif
211
212/*
213 * Perform sanity checks of tunables declared above.
214 */
215static void
216tunable_mbinit(void *dummy)
217{
218
219 /*
220 * This has to be done before VM init.
221 */
222 nmbclusters = NMBCLUSTERS;
223 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
224 nmbufs = NMBUFS;
225 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
226 /* Sanity checks */
227 if (nmbufs < nmbclusters * 2)
228 nmbufs = nmbclusters * 2;
229
230 return;
231}
232SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
233
234/* "number of clusters of pages" */
235#define NCL_INIT 1
236
237#define NMB_INIT 16
238
239/* ARGSUSED*/
240static void
8a3125c6 241mbinit(void *dummy)
984263bc
MD
242{
243 int s;
244
90775e29
MD
245 mmbfree = NULL;
246 mclfree = NULL;
984263bc
MD
247 mbstat.m_msize = MSIZE;
248 mbstat.m_mclbytes = MCLBYTES;
249 mbstat.m_minclsize = MINCLSIZE;
250 mbstat.m_mlen = MLEN;
251 mbstat.m_mhlen = MHLEN;
252
253 s = splimp();
74f1caca 254 if (m_mballoc(NMB_INIT, MB_DONTWAIT) == 0)
984263bc
MD
255 goto bad;
256#if MCLBYTES <= PAGE_SIZE
74f1caca 257 if (m_clalloc(NCL_INIT, MB_DONTWAIT) == 0)
984263bc
MD
258 goto bad;
259#else
260 /* It's OK to call contigmalloc in this context. */
74f1caca 261 if (m_clalloc(16, MB_WAIT) == 0)
984263bc
MD
262 goto bad;
263#endif
264 splx(s);
265 return;
266bad:
267 panic("mbinit");
268}
269
270/*
271 * Allocate at least nmb mbufs and place on mbuf free list.
90775e29
MD
272 * Returns the number of mbufs successfully allocated, 0 if none.
273 *
984263bc
MD
274 * Must be called at splimp.
275 */
90775e29 276static int
8a3125c6 277m_mballoc(int nmb, int how)
984263bc 278{
1fd87d54 279 int i;
90775e29 280 struct mbuf *m;
984263bc
MD
281
282 /*
90775e29
MD
283 * If we've hit the mbuf limit, stop allocating (or trying to)
284 * in order to avoid exhausting kernel memory entirely.
984263bc
MD
285 */
286 if ((nmb + mbstat.m_mbufs) > nmbufs)
287 return (0);
288
289 /*
90775e29
MD
290 * Attempt to allocate the requested number of mbufs, terminate when
291 * the allocation fails but if blocking is allowed allocate at least
292 * one.
984263bc 293 */
90775e29
MD
294 for (i = 0; i < nmb; ++i) {
295 m = malloc(MSIZE, M_MBUF, M_NOWAIT|M_NULLOK|M_ZERO);
296 if (m == NULL) {
297 if (how == MB_WAIT) {
298 mbstat.m_wait++;
299 m = malloc(MSIZE, M_MBUF,
300 M_WAITOK|M_NULLOK|M_ZERO);
301 }
302 if (m == NULL)
303 break;
304 }
305 m->m_next = mmbfree;
306 mmbfree = m;
307 ++mbstat.m_mbufs;
308 ++mbtypes[MT_FREE];
309 how = MB_DONTWAIT;
984263bc 310 }
90775e29 311 return(i);
984263bc
MD
312}
313
314/*
90775e29 315 * Once mbuf memory has been exhausted and if the call to the allocation macros
74f1caca 316 * (or, in some cases, functions) is with MB_WAIT, then it is necessary to rely
984263bc
MD
317 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
318 * designated (mbuf_wait) time.
319 */
90775e29 320static struct mbuf *
984263bc
MD
321m_mballoc_wait(int caller, int type)
322{
90775e29 323 struct mbuf *m;
984263bc
MD
324 int s;
325
326 s = splimp();
327 m_mballoc_wid++;
377d4740 328 if ((tsleep(&m_mballoc_wid, 0, "mballc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
329 m_mballoc_wid--;
330 splx(s);
331
332 /*
333 * Now that we (think) that we've got something, we will redo an
334 * MGET, but avoid getting into another instance of m_mballoc_wait()
335 * XXX: We retry to fetch _even_ if the sleep timed out. This is left
336 * this way, purposely, in the [unlikely] case that an mbuf was
337 * freed but the sleep was not awakened in time.
338 */
90775e29 339 m = NULL;
984263bc
MD
340 switch (caller) {
341 case MGET_C:
90775e29 342 MGET(m, MB_DONTWAIT, type);
984263bc
MD
343 break;
344 case MGETHDR_C:
90775e29 345 MGETHDR(m, MB_DONTWAIT, type);
984263bc
MD
346 break;
347 default:
348 panic("m_mballoc_wait: invalid caller (%d)", caller);
349 }
350
351 s = splimp();
90775e29 352 if (m != NULL) { /* We waited and got something... */
984263bc
MD
353 mbstat.m_wait++;
354 /* Wake up another if we have more free. */
355 if (mmbfree != NULL)
356 MMBWAKEUP();
357 }
358 splx(s);
90775e29 359 return (m);
984263bc
MD
360}
361
362#if MCLBYTES > PAGE_SIZE
363static int i_want_my_mcl;
364
365static void
366kproc_mclalloc(void)
367{
368 int status;
90775e29 369 int s;
984263bc 370
90775e29
MD
371 s = splimp();
372 for (;;) {
377d4740 373 tsleep(&i_want_my_mcl, 0, "mclalloc", 0);
984263bc 374
90775e29 375 while (i_want_my_mcl > 0) {
74f1caca 376 if (m_clalloc(1, MB_WAIT) == 0)
90775e29
MD
377 printf("m_clalloc failed even in thread context!\n");
378 --i_want_my_mcl;
984263bc
MD
379 }
380 }
90775e29
MD
381 /* not reached */
382 splx(s);
984263bc
MD
383}
384
bc6dffab 385static struct thread *mclallocthread;
984263bc
MD
386static struct kproc_desc mclalloc_kp = {
387 "mclalloc",
388 kproc_mclalloc,
bc6dffab 389 &mclallocthread
984263bc 390};
bc6dffab 391SYSINIT(mclallocthread, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
984263bc
MD
392 &mclalloc_kp);
393#endif
394
395/*
90775e29
MD
396 * Allocate at least nmb mbuf clusters and place on mbuf free list.
397 * Returns the number of mbuf clusters successfully allocated, 0 if none.
398 *
984263bc
MD
399 * Must be called at splimp.
400 */
90775e29 401static int
8a3125c6 402m_clalloc(int ncl, int how)
984263bc 403{
90775e29
MD
404 static int last_report;
405 mbcluster_t mcl;
406 void *data;
1fd87d54 407 int i;
984263bc
MD
408
409 /*
90775e29 410 * If we've hit the mbuf cluster limit, stop allocating (or trying to).
984263bc
MD
411 */
412 if ((ncl + mbstat.m_clusters) > nmbclusters)
90775e29 413 ncl = 0;
984263bc
MD
414
415 /*
90775e29
MD
416 * Attempt to allocate the requested number of mbuf clusters,
417 * terminate when the allocation fails but if blocking is allowed
418 * allocate at least one.
419 *
420 * We need to allocate two structures for each cluster... a
421 * ref counting / governing structure and the actual data. MCLBYTES
422 * should be a power of 2 which means that the slab allocator will
423 * return a buffer that does not cross a page boundary.
984263bc 424 */
90775e29
MD
425 for (i = 0; i < ncl; ++i) {
426 /*
427 * Meta structure
428 */
429 mcl = malloc(sizeof(*mcl), M_MBUFCL, M_NOWAIT|M_NULLOK|M_ZERO);
430 if (mcl == NULL && how == MB_WAIT) {
431 mbstat.m_wait++;
432 mcl = malloc(sizeof(*mcl),
433 M_MBUFCL, M_WAITOK|M_NULLOK|M_ZERO);
434 }
984263bc 435
90775e29
MD
436 /*
437 * Physically contiguous data buffer.
438 */
984263bc 439#if MCLBYTES > PAGE_SIZE
90775e29
MD
440 if (how != MB_WAIT) {
441 i_want_my_mcl += ncl - i;
442 wakeup(&i_want_my_mcl);
443 mbstat.m_wait++;
444 data = NULL;
445 } else {
446 data = contigmalloc_map(MCLBYTES, M_MBUFCL,
447 M_WAITOK, 0ul, ~0ul, PAGE_SIZE, 0, kernel_map);
448 }
984263bc 449#else
90775e29
MD
450 data = malloc(MCLBYTES, M_MBUFCL, M_NOWAIT|M_NULLOK);
451 if (data == NULL) {
452 if (how == MB_WAIT) {
453 mbstat.m_wait++;
454 data = malloc(MCLBYTES, M_MBUFCL,
455 M_WAITOK|M_NULLOK);
456 }
457 }
984263bc 458#endif
90775e29
MD
459 if (data == NULL) {
460 free(mcl, M_MBUFCL);
461 break;
462 }
463 mcl->mcl_next = mclfree;
464 mcl->mcl_data = data;
465 mcl->mcl_magic = MCL_MAGIC;
466 mcl->mcl_refs = 0;
467 mclfree = mcl;
468 ++mbstat.m_clfree;
469 ++mbstat.m_clusters;
470 how = MB_DONTWAIT;
471 }
472
984263bc 473 /*
90775e29
MD
474 * If we could not allocate any report failure no more often then
475 * once a second.
984263bc 476 */
90775e29 477 if (i == 0) {
984263bc
MD
478 mbstat.m_drops++;
479 if (ticks < last_report || (ticks - last_report) >= hz) {
480 last_report = ticks;
481 printf("All mbuf clusters exhausted, please see tuning(7).\n");
482 }
984263bc 483 }
90775e29 484 return (i);
984263bc
MD
485}
486
487/*
90775e29
MD
488 * Once cluster memory has been exhausted and the allocation is called with
489 * MB_WAIT, we rely on the mclfree pointers. If nothing is free, we will
984263bc
MD
490 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
491 * due to sudden mcluster availability.
492 */
90775e29 493static void
984263bc
MD
494m_clalloc_wait(void)
495{
984263bc
MD
496 int s;
497
984263bc 498 /* If in interrupt context, and INVARIANTS, maintain sanity and die. */
90775e29
MD
499 KASSERT(mycpu->gd_intr_nesting_level == 0,
500 ("CLALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc 501
90775e29
MD
502 /*
503 * Sleep until something's available or until we expire.
504 */
984263bc 505 m_clalloc_wid++;
377d4740 506 if ((tsleep(&m_clalloc_wid, 0, "mclalc", mbuf_wait)) == EWOULDBLOCK)
984263bc
MD
507 m_clalloc_wid--;
508
509 /*
90775e29
MD
510 * Try the allocation once more, and if we see mor then two
511 * free entries wake up others as well.
984263bc 512 */
90775e29 513 m_clalloc(1, MB_WAIT);
984263bc 514 s = splimp();
90775e29
MD
515 if (mclfree && mclfree->mcl_next) {
516 MCLWAKEUP();
984263bc 517 }
90775e29
MD
518 splx(s);
519}
984263bc 520
90775e29
MD
521/*
522 * Return the number of references to this mbuf's data. 0 is returned
523 * if the mbuf is not M_EXT, a reference count is returned if it is
524 * M_EXT|M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
525 */
526int
527m_sharecount(struct mbuf *m)
528{
529 int count;
530
531 switch(m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
532 case 0:
533 count = 0;
534 break;
535 case M_EXT:
536 count = 99;
537 break;
538 case M_EXT|M_EXT_CLUSTER:
539 count = ((mbcluster_t)m->m_ext.ext_arg)->mcl_refs;
540 break;
541 default:
542 panic("bad mbuf flags: %p", m);
543 count = 0;
544 }
545 return(count);
546}
547
548/*
549 * change mbuf to new type
550 */
551void
552m_chtype(struct mbuf *m, int type)
553{
554 int s;
555
556 s = splimp();
557 --mbtypes[m->m_type];
558 ++mbtypes[type];
559 m->m_type = type;
984263bc 560 splx(s);
984263bc
MD
561}
562
563/*
564 * When MGET fails, ask protocols to free space when short of memory,
565 * then re-attempt to allocate an mbuf.
566 */
567struct mbuf *
90775e29 568m_retry(int how, int t)
984263bc 569{
12496bdf
MD
570 struct mbuf *m;
571 int ms;
984263bc
MD
572
573 /*
574 * Must only do the reclaim if not in an interrupt context.
575 */
90775e29 576 if (how == MB_WAIT) {
ef0fdad1 577 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 578 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
579 m_reclaim();
580 }
581
12496bdf
MD
582 ms = splimp();
583 if (mmbfree == NULL)
90775e29 584 m_mballoc(1, how);
12496bdf
MD
585 m = mmbfree;
586 if (m != NULL) {
587 mmbfree = m->m_next;
588 mbtypes[MT_FREE]--;
589 m->m_type = t;
590 mbtypes[t]++;
591 m->m_next = NULL;
592 m->m_nextpkt = NULL;
593 m->m_data = m->m_dat;
594 m->m_flags = 0;
595 splx(ms);
984263bc 596 mbstat.m_wait++;
12496bdf 597 } else {
984263bc 598 static int last_report ; /* when we did that (in ticks) */
377d4740 599
12496bdf 600 splx(ms);
984263bc
MD
601 mbstat.m_drops++;
602 if (ticks < last_report || (ticks - last_report) >= hz) {
603 last_report = ticks;
604 printf("All mbufs exhausted, please see tuning(7).\n");
605 }
606 }
984263bc
MD
607 return (m);
608}
609
610/*
611 * As above; retry an MGETHDR.
612 */
613struct mbuf *
90775e29 614m_retryhdr(int how, int t)
984263bc 615{
12496bdf
MD
616 struct mbuf *m;
617 int ms;
984263bc
MD
618
619 /*
620 * Must only do the reclaim if not in an interrupt context.
621 */
90775e29 622 if (how == MB_WAIT) {
ef0fdad1 623 KASSERT(mycpu->gd_intr_nesting_level == 0,
984263bc 624 ("MBALLOC: CANNOT WAIT IN INTERRUPT"));
984263bc
MD
625 m_reclaim();
626 }
627
12496bdf
MD
628 ms = splimp();
629 if (mmbfree == NULL)
90775e29 630 m_mballoc(1, how);
12496bdf
MD
631 m = mmbfree;
632 if (m != NULL) {
633 mmbfree = m->m_next;
634 mbtypes[MT_FREE]--;
635 m->m_type = t;
636 mbtypes[t]++;
637 m->m_next = NULL;
638 m->m_nextpkt = NULL;
639 m->m_data = m->m_pktdat;
640 m->m_flags = M_PKTHDR;
641 m->m_pkthdr.rcvif = NULL;
642 SLIST_INIT(&m->m_pkthdr.tags);
643 m->m_pkthdr.csum_flags = 0;
644 splx(ms);
984263bc 645 mbstat.m_wait++;
12496bdf 646 } else {
984263bc 647 static int last_report ; /* when we did that (in ticks) */
12496bdf
MD
648
649 splx(ms);
984263bc
MD
650 mbstat.m_drops++;
651 if (ticks < last_report || (ticks - last_report) >= hz) {
652 last_report = ticks;
653 printf("All mbufs exhausted, please see tuning(7).\n");
654 }
655 }
984263bc
MD
656 return (m);
657}
658
659static void
8a3125c6 660m_reclaim(void)
984263bc 661{
1fd87d54
RG
662 struct domain *dp;
663 struct protosw *pr;
90775e29 664 int s;
984263bc 665
90775e29 666 s = splimp();
8a3125c6
MD
667 for (dp = domains; dp; dp = dp->dom_next) {
668 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
984263bc
MD
669 if (pr->pr_drain)
670 (*pr->pr_drain)();
8a3125c6
MD
671 }
672 }
984263bc
MD
673 splx(s);
674 mbstat.m_drain++;
675}
676
677/*
678 * Space allocation routines.
679 * These are also available as macros
680 * for critical paths.
681 */
682struct mbuf *
8a3125c6 683m_get(int how, int type)
984263bc 684{
12496bdf
MD
685 struct mbuf *m;
686 int ms;
687
688 ms = splimp();
689 if (mmbfree == NULL)
90775e29 690 m_mballoc(1, how);
12496bdf
MD
691 m = mmbfree;
692 if (m != NULL) {
693 mmbfree = m->m_next;
694 mbtypes[MT_FREE]--;
695 m->m_type = type;
696 mbtypes[type]++;
697 m->m_next = NULL;
698 m->m_nextpkt = NULL;
699 m->m_data = m->m_dat;
700 m->m_flags = 0;
701 splx(ms);
702 } else {
703 splx(ms);
704 m = m_retry(how, type);
74f1caca 705 if (m == NULL && how == MB_WAIT)
12496bdf
MD
706 m = m_mballoc_wait(MGET_C, type);
707 }
984263bc
MD
708 return (m);
709}
710
711struct mbuf *
8a3125c6 712m_gethdr(int how, int type)
984263bc 713{
12496bdf
MD
714 struct mbuf *m;
715 int ms;
716
717 ms = splimp();
718 if (mmbfree == NULL)
90775e29 719 m_mballoc(1, how);
12496bdf
MD
720 m = mmbfree;
721 if (m != NULL) {
722 mmbfree = m->m_next;
723 mbtypes[MT_FREE]--;
724 m->m_type = type;
725 mbtypes[type]++;
726 m->m_next = NULL;
727 m->m_nextpkt = NULL;
728 m->m_data = m->m_pktdat;
729 m->m_flags = M_PKTHDR;
730 m->m_pkthdr.rcvif = NULL;
731 SLIST_INIT(&m->m_pkthdr.tags);
732 m->m_pkthdr.csum_flags = 0;
02742ec6 733 m->m_pkthdr.pf_flags = 0;
12496bdf
MD
734 splx(ms);
735 } else {
736 splx(ms);
737 m = m_retryhdr(how, type);
74f1caca 738 if (m == NULL && how == MB_WAIT)
12496bdf
MD
739 m = m_mballoc_wait(MGETHDR_C, type);
740 }
984263bc
MD
741 return (m);
742}
743
744struct mbuf *
8a3125c6 745m_getclr(int how, int type)
984263bc 746{
1fd87d54 747 struct mbuf *m;
984263bc 748
90775e29
MD
749 if ((m = m_get(how, type)) != NULL) {
750 bzero(mtod(m, caddr_t), MLEN);
751 }
984263bc
MD
752 return (m);
753}
754
755/*
756 * m_getcl() returns an mbuf with an attached cluster.
757 * Because many network drivers use this kind of buffers a lot, it is
758 * convenient to keep a small pool of free buffers of this kind.
759 * Even a small size such as 10 gives about 10% improvement in the
760 * forwarding rate in a bridge or router.
761 * The size of this free list is controlled by the sysctl variable
762 * mcl_pool_max. The list is populated on m_freem(), and used in
763 * m_getcl() if elements are available.
764 */
984263bc
MD
765struct mbuf *
766m_getcl(int how, short type, int flags)
767{
90775e29 768 int s;
984263bc
MD
769 struct mbuf *mp;
770
90775e29 771 s = splimp();
984263bc
MD
772 if (flags & M_PKTHDR) {
773 if (type == MT_DATA && mcl_pool) {
774 mp = mcl_pool;
775 mcl_pool = mp->m_nextpkt;
90775e29 776 --mcl_pool_count;
984263bc
MD
777 splx(s);
778 mp->m_nextpkt = NULL;
779 mp->m_data = mp->m_ext.ext_buf;
90775e29 780 mp->m_flags = M_PKTHDR|M_EXT|M_EXT_CLUSTER;
984263bc
MD
781 mp->m_pkthdr.rcvif = NULL;
782 mp->m_pkthdr.csum_flags = 0;
783 return mp;
90775e29 784 } else {
984263bc 785 MGETHDR(mp, how, type);
90775e29
MD
786 }
787 } else {
984263bc 788 MGET(mp, how, type);
90775e29 789 }
984263bc 790 if (mp) {
90775e29
MD
791 m_mclget(mp, how);
792 if ((mp->m_flags & M_EXT) == 0) {
984263bc
MD
793 m_free(mp);
794 mp = NULL;
795 }
796 }
797 splx(s);
798 return mp;
799}
800
801/*
802 * struct mbuf *
803 * m_getm(m, len, how, type)
804 *
805 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
806 * best) and return a pointer to the top of the allocated chain. If m is
807 * non-null, then we assume that it is a single mbuf or an mbuf chain to
808 * which we want len bytes worth of mbufs and/or clusters attached, and so
809 * if we succeed in allocating it, we will just return a pointer to m.
810 *
811 * If we happen to fail at any point during the allocation, we will free
812 * up everything we have already allocated and return NULL.
813 *
814 */
815struct mbuf *
816m_getm(struct mbuf *m, int len, int how, int type)
817{
818 struct mbuf *top, *tail, *mp, *mtail = NULL;
819
820 KASSERT(len >= 0, ("len is < 0 in m_getm"));
821
90775e29
MD
822 mp = m_get(how, type);
823 if (mp == NULL) {
984263bc 824 return (NULL);
90775e29
MD
825 } else if (len > MINCLSIZE) {
826 m_mclget(mp, how);
984263bc
MD
827 if ((mp->m_flags & M_EXT) == 0) {
828 m_free(mp);
829 return (NULL);
830 }
831 }
832 mp->m_len = 0;
833 len -= M_TRAILINGSPACE(mp);
834
90775e29
MD
835 if (m != NULL) {
836 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
837 ;
838 } else {
984263bc 839 m = mp;
90775e29 840 }
984263bc
MD
841
842 top = tail = mp;
843 while (len > 0) {
90775e29 844 mp = m_get(how, type);
984263bc
MD
845 if (mp == NULL)
846 goto failed;
847
848 tail->m_next = mp;
849 tail = mp;
850 if (len > MINCLSIZE) {
90775e29 851 m_mclget(mp, how);
984263bc
MD
852 if ((mp->m_flags & M_EXT) == 0)
853 goto failed;
854 }
855
856 mp->m_len = 0;
857 len -= M_TRAILINGSPACE(mp);
858 }
859
860 if (mtail != NULL)
861 mtail->m_next = top;
862 return (m);
984263bc
MD
863failed:
864 m_freem(top);
865 return (NULL);
866}
867
868/*
90775e29 869 * m_mclget() - Adds a cluster to a normal mbuf, M_EXT is set on success.
b6650ec0 870 */
90775e29
MD
871void
872m_mclget(struct mbuf *m, int how)
b6650ec0 873{
90775e29 874 mbcluster_t mcl;
b6650ec0
MD
875 int s;
876
90775e29 877 KKASSERT((m->m_flags & M_EXT_OLD) == 0);
b6650ec0 878
90775e29
MD
879 s = splimp();
880 if ((mcl = mclfree) == NULL) {
b6650ec0 881 m_clalloc(1, how);
90775e29
MD
882 if ((mcl = mclfree) == NULL) {
883 if (how == MB_WAIT) {
884 m_clalloc_wait();
885 mcl = mclfree;
886 }
887 }
888 }
889
890 /*
891 * Possibly found a cluster, unlink it from the free list and
892 * set the ref count.
893 */
894 if (mcl == NULL) {
b6650ec0 895 splx(s);
90775e29 896 return;
b6650ec0 897 }
90775e29
MD
898 KKASSERT(mcl->mcl_refs == 0);
899 mclfree = mcl->mcl_next;
900 mcl->mcl_refs = 1;
901 --mbstat.m_clfree;
b6650ec0 902 splx(s);
b6650ec0 903
90775e29
MD
904 /*
905 * Add the cluster to the mbuf.
906 */
907 m->m_ext.ext_arg = mcl;
908 m->m_ext.ext_buf = mcl->mcl_data;
909 m->m_ext.ext_nref.new = m_mclref;
910 m->m_ext.ext_nfree.new = m_mclfree;
911 m->m_ext.ext_size = MCLBYTES;
912
913 m->m_data = m->m_ext.ext_buf;
914 m->m_flags |= M_EXT | M_EXT_CLUSTER;
b6650ec0
MD
915}
916
90775e29
MD
917static void
918m_mclfree(void *arg)
b6650ec0 919{
90775e29
MD
920 mbcluster_t mcl = arg;
921
922 KKASSERT(mcl->mcl_magic == MCL_MAGIC);
923 KKASSERT(mcl->mcl_refs > 0);
924 crit_enter();
925 if (--mcl->mcl_refs == 0) {
926 if (mbstat.m_clfree < mcl_free_max) {
927 mcl->mcl_next = mclfree;
928 mclfree = mcl;
929 ++mbstat.m_clfree;
930 MCLWAKEUP();
931 } else {
932 mcl->mcl_magic = -1;
933 free(mcl->mcl_data, M_MBUFCL);
934 free(mcl, M_MBUFCL);
935 --mbstat.m_clusters;
936 }
b6650ec0 937 }
90775e29 938 crit_exit();
b6650ec0
MD
939}
940
90775e29
MD
941static void
942m_mclref(void *arg)
b6650ec0 943{
90775e29
MD
944 mbcluster_t mcl = arg;
945
946 KKASSERT(mcl->mcl_magic == MCL_MAGIC);
947 crit_enter();
948 ++mcl->mcl_refs;
949 crit_exit();
b6650ec0
MD
950}
951
7eccf245
MD
952/*
953 * Helper routines for M_EXT reference/free
954 */
955static __inline void
956m_extref(const struct mbuf *m)
957{
90775e29 958 int s;
7eccf245 959
90775e29
MD
960 KKASSERT(m->m_ext.ext_nfree.any != NULL);
961 s = splimp();
962 if (m->m_flags & M_EXT_OLD)
963 m->m_ext.ext_nref.old(m->m_ext.ext_buf, m->m_ext.ext_size);
964 else
965 m->m_ext.ext_nref.new(m->m_ext.ext_arg);
966 splx(s);
7eccf245
MD
967}
968
b6650ec0
MD
969/*
970 * m_free()
971 *
972 * Free a single mbuf and any associated external storage. The successor,
973 * if any, is returned.
984263bc 974 *
b6650ec0 975 * We do need to check non-first mbuf for m_aux, since some of existing
984263bc
MD
976 * code does not call M_PREPEND properly.
977 * (example: call to bpf_mtap from drivers)
978 */
984263bc 979struct mbuf *
b6650ec0 980m_free(struct mbuf *m)
984263bc 981{
b6650ec0
MD
982 int s;
983 struct mbuf *n;
984
985 s = splimp();
361af367 986 KASSERT(m->m_type != MT_FREE, ("freeing free mbuf %p", m));
90775e29
MD
987
988 /*
989 * Adjust our type count and delete any attached chains if the
990 * mbuf is a packet header.
991 */
b6650ec0
MD
992 if ((m->m_flags & M_PKTHDR) != 0)
993 m_tag_delete_chain(m, NULL);
90775e29
MD
994
995 /*
996 * Place the mbuf on the appropriate free list. Try to maintain a
997 * small cache of mbuf+cluster pairs.
998 */
999 n = m->m_next;
1000 m->m_next = NULL;
b6650ec0 1001 if (m->m_flags & M_EXT) {
90775e29
MD
1002 KKASSERT(m->m_ext.ext_nfree.any != NULL);
1003 if (mcl_pool_count < mcl_pool_max && m && m->m_next == NULL &&
1004 (m->m_flags & (M_PKTHDR|M_EXT_CLUSTER)) == (M_PKTHDR|M_EXT_CLUSTER) &&
1005 m->m_type == MT_DATA && M_EXT_WRITABLE(m) ) {
1006 KKASSERT(((mbcluster_t)m->m_ext.ext_arg)->mcl_magic == MCL_MAGIC);
1007 m->m_nextpkt = mcl_pool;
1008 mcl_pool = m;
1009 ++mcl_pool_count;
1010 m = NULL;
1011 } else {
1012 if (m->m_flags & M_EXT_OLD)
1013 m->m_ext.ext_nfree.old(m->m_ext.ext_buf, m->m_ext.ext_size);
1014 else
1015 m->m_ext.ext_nfree.new(m->m_ext.ext_arg);
1016 m->m_flags = 0;
1017 m->m_ext.ext_arg = NULL;
1018 m->m_ext.ext_nref.new = NULL;
1019 m->m_ext.ext_nfree.new = NULL;
1020 }
1021 }
1022 if (m) {
1023 --mbtypes[m->m_type];
1024 if (mbtypes[MT_FREE] < mbuf_free_max) {
1025 m->m_type = MT_FREE;
1026 mbtypes[MT_FREE]++;
1027 m->m_next = mmbfree;
1028 mmbfree = m;
1029 MMBWAKEUP();
1030 } else {
1031 free(m, M_MBUF);
1032 --mbstat.m_mbufs;
1033 }
b6650ec0 1034 }
b6650ec0 1035 splx(s);
984263bc
MD
1036 return (n);
1037}
1038
1039void
b6650ec0 1040m_freem(struct mbuf *m)
984263bc 1041{
90775e29 1042 int s;
984263bc 1043
90775e29
MD
1044 s = splimp();
1045 while (m)
1046 m = m_free(m);
984263bc
MD
1047 splx(s);
1048}
1049
1050/*
1051 * Mbuffer utility routines.
1052 */
1053
1054/*
1055 * Lesser-used path for M_PREPEND:
1056 * allocate new mbuf to prepend to chain,
1057 * copy junk along.
1058 */
1059struct mbuf *
8a3125c6 1060m_prepend(struct mbuf *m, int len, int how)
984263bc
MD
1061{
1062 struct mbuf *mn;
1063
1064 MGET(mn, how, m->m_type);
1065 if (mn == (struct mbuf *)NULL) {
1066 m_freem(m);
1067 return ((struct mbuf *)NULL);
1068 }
1069 if (m->m_flags & M_PKTHDR)
1070 M_MOVE_PKTHDR(mn, m);
1071 mn->m_next = m;
1072 m = mn;
1073 if (len < MHLEN)
1074 MH_ALIGN(m, len);
1075 m->m_len = len;
1076 return (m);
1077}
1078
1079/*
1080 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1081 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
74f1caca 1082 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
984263bc
MD
1083 * Note that the copy is read-only, because clusters are not copied,
1084 * only their reference counts are incremented.
1085 */
1086#define MCFail (mbstat.m_mcfail)
1087
1088struct mbuf *
8a3125c6 1089m_copym(const struct mbuf *m, int off0, int len, int wait)
984263bc 1090{
1fd87d54
RG
1091 struct mbuf *n, **np;
1092 int off = off0;
984263bc
MD
1093 struct mbuf *top;
1094 int copyhdr = 0;
1095
1096 KASSERT(off >= 0, ("m_copym, negative off %d", off));
1097 KASSERT(len >= 0, ("m_copym, negative len %d", len));
1098 if (off == 0 && m->m_flags & M_PKTHDR)
1099 copyhdr = 1;
1100 while (off > 0) {
1101 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
1102 if (off < m->m_len)
1103 break;
1104 off -= m->m_len;
1105 m = m->m_next;
1106 }
1107 np = &top;
1108 top = 0;
1109 while (len > 0) {
1110 if (m == 0) {
1111 KASSERT(len == M_COPYALL,
1112 ("m_copym, length > size of mbuf chain"));
1113 break;
1114 }
1115 MGET(n, wait, m->m_type);
1116 *np = n;
1117 if (n == 0)
1118 goto nospace;
1119 if (copyhdr) {
1120 if (!m_dup_pkthdr(n, m, wait))
1121 goto nospace;
1122 if (len == M_COPYALL)
1123 n->m_pkthdr.len -= off0;
1124 else
1125 n->m_pkthdr.len = len;
1126 copyhdr = 0;
1127 }
1128 n->m_len = min(len, m->m_len - off);
1129 if (m->m_flags & M_EXT) {
1130 n->m_data = m->m_data + off;
7eccf245 1131 m_extref(m);
984263bc 1132 n->m_ext = m->m_ext;
2915401a
MD
1133 n->m_flags |= m->m_flags &
1134 (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
7eccf245 1135 } else {
984263bc
MD
1136 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
1137 (unsigned)n->m_len);
7eccf245 1138 }
984263bc
MD
1139 if (len != M_COPYALL)
1140 len -= n->m_len;
1141 off = 0;
1142 m = m->m_next;
1143 np = &n->m_next;
1144 }
1145 if (top == 0)
1146 MCFail++;
1147 return (top);
1148nospace:
1149 m_freem(top);
1150 MCFail++;
1151 return (0);
1152}
1153
1154/*
1155 * Copy an entire packet, including header (which must be present).
1156 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1157 * Note that the copy is read-only, because clusters are not copied,
1158 * only their reference counts are incremented.
1159 * Preserve alignment of the first mbuf so if the creator has left
1160 * some room at the beginning (e.g. for inserting protocol headers)
1161 * the copies also have the room available.
1162 */
1163struct mbuf *
8a3125c6 1164m_copypacket(struct mbuf *m, int how)
984263bc
MD
1165{
1166 struct mbuf *top, *n, *o;
1167
1168 MGET(n, how, m->m_type);
1169 top = n;
1170 if (!n)
1171 goto nospace;
1172
1173 if (!m_dup_pkthdr(n, m, how))
1174 goto nospace;
1175 n->m_len = m->m_len;
1176 if (m->m_flags & M_EXT) {
1177 n->m_data = m->m_data;
7eccf245 1178 m_extref(m);
984263bc 1179 n->m_ext = m->m_ext;
2915401a 1180 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
984263bc
MD
1181 } else {
1182 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
1183 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1184 }
1185
1186 m = m->m_next;
1187 while (m) {
1188 MGET(o, how, m->m_type);
1189 if (!o)
1190 goto nospace;
1191
1192 n->m_next = o;
1193 n = n->m_next;
1194
1195 n->m_len = m->m_len;
1196 if (m->m_flags & M_EXT) {
1197 n->m_data = m->m_data;
7eccf245 1198 m_extref(m);
984263bc 1199 n->m_ext = m->m_ext;
2915401a
MD
1200 n->m_flags |= m->m_flags &
1201 (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
984263bc
MD
1202 } else {
1203 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
1204 }
1205
1206 m = m->m_next;
1207 }
1208 return top;
1209nospace:
1210 m_freem(top);
1211 MCFail++;
1212 return 0;
1213}
1214
1215/*
1216 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1217 * continuing for "len" bytes, into the indicated buffer.
1218 */
1219void
8a3125c6 1220m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
984263bc 1221{
1fd87d54 1222 unsigned count;
984263bc
MD
1223
1224 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
1225 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
1226 while (off > 0) {
1227 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
1228 if (off < m->m_len)
1229 break;
1230 off -= m->m_len;
1231 m = m->m_next;
1232 }
1233 while (len > 0) {
1234 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
1235 count = min(m->m_len - off, len);
1236 bcopy(mtod(m, caddr_t) + off, cp, count);
1237 len -= count;
1238 cp += count;
1239 off = 0;
1240 m = m->m_next;
1241 }
1242}
1243
1244/*
1245 * Copy a packet header mbuf chain into a completely new chain, including
1246 * copying any mbuf clusters. Use this instead of m_copypacket() when
1247 * you need a writable copy of an mbuf chain.
1248 */
1249struct mbuf *
8a3125c6 1250m_dup(struct mbuf *m, int how)
984263bc
MD
1251{
1252 struct mbuf **p, *top = NULL;
1253 int remain, moff, nsize;
1254
1255 /* Sanity check */
1256 if (m == NULL)
1257 return (0);
1258 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__));
1259
1260 /* While there's more data, get a new mbuf, tack it on, and fill it */
1261 remain = m->m_pkthdr.len;
1262 moff = 0;
1263 p = &top;
1264 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
1265 struct mbuf *n;
1266
1267 /* Get the next new mbuf */
1268 MGET(n, how, m->m_type);
1269 if (n == NULL)
1270 goto nospace;
1271 if (top == NULL) { /* first one, must be PKTHDR */
1272 if (!m_dup_pkthdr(n, m, how))
1273 goto nospace;
1274 nsize = MHLEN;
1275 } else /* not the first one */
1276 nsize = MLEN;
1277 if (remain >= MINCLSIZE) {
1278 MCLGET(n, how);
1279 if ((n->m_flags & M_EXT) == 0) {
1280 (void)m_free(n);
1281 goto nospace;
1282 }
1283 nsize = MCLBYTES;
1284 }
1285 n->m_len = 0;
1286
1287 /* Link it into the new chain */
1288 *p = n;
1289 p = &n->m_next;
1290
1291 /* Copy data from original mbuf(s) into new mbuf */
1292 while (n->m_len < nsize && m != NULL) {
1293 int chunk = min(nsize - n->m_len, m->m_len - moff);
1294
1295 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
1296 moff += chunk;
1297 n->m_len += chunk;
1298 remain -= chunk;
1299 if (moff == m->m_len) {
1300 m = m->m_next;
1301 moff = 0;
1302 }
1303 }
1304
1305 /* Check correct total mbuf length */
1306 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
1307 ("%s: bogus m_pkthdr.len", __FUNCTION__));
1308 }
1309 return (top);
1310
1311nospace:
1312 m_freem(top);
1313 MCFail++;
1314 return (0);
1315}
1316
1317/*
1318 * Concatenate mbuf chain n to m.
1319 * Both chains must be of the same type (e.g. MT_DATA).
1320 * Any m_pkthdr is not updated.
1321 */
1322void
8a3125c6 1323m_cat(struct mbuf *m, struct mbuf *n)
984263bc
MD
1324{
1325 while (m->m_next)
1326 m = m->m_next;
1327 while (n) {
1328 if (m->m_flags & M_EXT ||
1329 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
1330 /* just join the two chains */
1331 m->m_next = n;
1332 return;
1333 }
1334 /* splat the data from one into the other */
1335 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1336 (u_int)n->m_len);
1337 m->m_len += n->m_len;
1338 n = m_free(n);
1339 }
1340}
1341
1342void
8a3125c6 1343m_adj(struct mbuf *mp, int req_len)
984263bc 1344{
1fd87d54
RG
1345 int len = req_len;
1346 struct mbuf *m;
1347 int count;
984263bc
MD
1348
1349 if ((m = mp) == NULL)
1350 return;
1351 if (len >= 0) {
1352 /*
1353 * Trim from head.
1354 */
1355 while (m != NULL && len > 0) {
1356 if (m->m_len <= len) {
1357 len -= m->m_len;
1358 m->m_len = 0;
1359 m = m->m_next;
1360 } else {
1361 m->m_len -= len;
1362 m->m_data += len;
1363 len = 0;
1364 }
1365 }
1366 m = mp;
1367 if (mp->m_flags & M_PKTHDR)
1368 m->m_pkthdr.len -= (req_len - len);
1369 } else {
1370 /*
1371 * Trim from tail. Scan the mbuf chain,
1372 * calculating its length and finding the last mbuf.
1373 * If the adjustment only affects this mbuf, then just
1374 * adjust and return. Otherwise, rescan and truncate
1375 * after the remaining size.
1376 */
1377 len = -len;
1378 count = 0;
1379 for (;;) {
1380 count += m->m_len;
1381 if (m->m_next == (struct mbuf *)0)
1382 break;
1383 m = m->m_next;
1384 }
1385 if (m->m_len >= len) {
1386 m->m_len -= len;
1387 if (mp->m_flags & M_PKTHDR)
1388 mp->m_pkthdr.len -= len;
1389 return;
1390 }
1391 count -= len;
1392 if (count < 0)
1393 count = 0;
1394 /*
1395 * Correct length for chain is "count".
1396 * Find the mbuf with last data, adjust its length,
1397 * and toss data from remaining mbufs on chain.
1398 */
1399 m = mp;
1400 if (m->m_flags & M_PKTHDR)
1401 m->m_pkthdr.len = count;
1402 for (; m; m = m->m_next) {
1403 if (m->m_len >= count) {
1404 m->m_len = count;
1405 break;
1406 }
1407 count -= m->m_len;
1408 }
1409 while (m->m_next)
1410 (m = m->m_next) ->m_len = 0;
1411 }
1412}
1413
1414/*
1415 * Rearange an mbuf chain so that len bytes are contiguous
9e4465af
MD
1416 * and in the data area of an mbuf (so that mtod will work for a structure
1417 * of size len). Returns the resulting mbuf chain on success, frees it and
1418 * returns null on failure. If there is room, it will add up to
1419 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1420 * avoid being called next time.
984263bc
MD
1421 */
1422#define MPFail (mbstat.m_mpfail)
1423
1424struct mbuf *
8a3125c6 1425m_pullup(struct mbuf *n, int len)
984263bc 1426{
1fd87d54
RG
1427 struct mbuf *m;
1428 int count;
984263bc
MD
1429 int space;
1430
1431 /*
1432 * If first mbuf has no cluster, and has room for len bytes
1433 * without shifting current data, pullup into it,
1434 * otherwise allocate a new mbuf to prepend to the chain.
1435 */
1436 if ((n->m_flags & M_EXT) == 0 &&
1437 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1438 if (n->m_len >= len)
1439 return (n);
1440 m = n;
1441 n = n->m_next;
1442 len -= m->m_len;
1443 } else {
1444 if (len > MHLEN)
1445 goto bad;
74f1caca 1446 MGET(m, MB_DONTWAIT, n->m_type);
984263bc
MD
1447 if (m == 0)
1448 goto bad;
1449 m->m_len = 0;
1450 if (n->m_flags & M_PKTHDR)
1451 M_MOVE_PKTHDR(m, n);
1452 }
1453 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1454 do {
1455 count = min(min(max(len, max_protohdr), space), n->m_len);
1456 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1457 (unsigned)count);
1458 len -= count;
1459 m->m_len += count;
1460 n->m_len -= count;
1461 space -= count;
1462 if (n->m_len)
1463 n->m_data += count;
1464 else
1465 n = m_free(n);
1466 } while (len > 0 && n);
1467 if (len > 0) {
1468 (void) m_free(m);
1469 goto bad;
1470 }
1471 m->m_next = n;
1472 return (m);
1473bad:
1474 m_freem(n);
1475 MPFail++;
1476 return (0);
1477}
1478
1479/*
1480 * Partition an mbuf chain in two pieces, returning the tail --
1481 * all but the first len0 bytes. In case of failure, it returns NULL and
1482 * attempts to restore the chain to its original state.
1483 *
1484 * Note that the resulting mbufs might be read-only, because the new
1485 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1486 * the "breaking point" happens to lie within a cluster mbuf. Use the
1487 * M_WRITABLE() macro to check for this case.
1488 */
1489struct mbuf *
8a3125c6 1490m_split(struct mbuf *m0, int len0, int wait)
984263bc 1491{
1fd87d54 1492 struct mbuf *m, *n;
984263bc
MD
1493 unsigned len = len0, remain;
1494
1495 for (m = m0; m && len > m->m_len; m = m->m_next)
1496 len -= m->m_len;
1497 if (m == 0)
1498 return (0);
1499 remain = m->m_len - len;
1500 if (m0->m_flags & M_PKTHDR) {
1501 MGETHDR(n, wait, m0->m_type);
1502 if (n == 0)
1503 return (0);
1504 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1505 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1506 m0->m_pkthdr.len = len0;
1507 if (m->m_flags & M_EXT)
1508 goto extpacket;
1509 if (remain > MHLEN) {
1510 /* m can't be the lead packet */
1511 MH_ALIGN(n, 0);
1512 n->m_next = m_split(m, len, wait);
1513 if (n->m_next == 0) {
1514 (void) m_free(n);
1515 return (0);
1516 } else {
1517 n->m_len = 0;
1518 return (n);
1519 }
1520 } else
1521 MH_ALIGN(n, remain);
1522 } else if (remain == 0) {
1523 n = m->m_next;
1524 m->m_next = 0;
1525 return (n);
1526 } else {
1527 MGET(n, wait, m->m_type);
1528 if (n == 0)
1529 return (0);
1530 M_ALIGN(n, remain);
1531 }
1532extpacket:
1533 if (m->m_flags & M_EXT) {
984263bc 1534 n->m_data = m->m_data + len;
7eccf245
MD
1535 m_extref(m);
1536 n->m_ext = m->m_ext;
2915401a 1537 n->m_flags |= m->m_flags & (M_EXT | M_EXT_OLD | M_EXT_CLUSTER);
984263bc
MD
1538 } else {
1539 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1540 }
1541 n->m_len = remain;
1542 m->m_len = len;
1543 n->m_next = m->m_next;
1544 m->m_next = 0;
1545 return (n);
1546}
1547/*
1548 * Routine to copy from device local memory into mbufs.
1549 */
1550struct mbuf *
8a3125c6
MD
1551m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
1552 void (*copy) (char *from, caddr_t to, u_int len))
984263bc 1553{
1fd87d54 1554 struct mbuf *m;
984263bc 1555 struct mbuf *top = 0, **mp = &top;
1fd87d54
RG
1556 int off = off0, len;
1557 char *cp;
984263bc
MD
1558 char *epkt;
1559
1560 cp = buf;
1561 epkt = cp + totlen;
1562 if (off) {
1563 cp += off + 2 * sizeof(u_short);
1564 totlen -= 2 * sizeof(u_short);
1565 }
74f1caca 1566 MGETHDR(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1567 if (m == 0)
1568 return (0);
1569 m->m_pkthdr.rcvif = ifp;
1570 m->m_pkthdr.len = totlen;
1571 m->m_len = MHLEN;
1572
1573 while (totlen > 0) {
1574 if (top) {
74f1caca 1575 MGET(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
1576 if (m == 0) {
1577 m_freem(top);
1578 return (0);
1579 }
1580 m->m_len = MLEN;
1581 }
1582 len = min(totlen, epkt - cp);
1583 if (len >= MINCLSIZE) {
74f1caca 1584 MCLGET(m, MB_DONTWAIT);
984263bc
MD
1585 if (m->m_flags & M_EXT)
1586 m->m_len = len = min(len, MCLBYTES);
1587 else
1588 len = m->m_len;
1589 } else {
1590 /*
1591 * Place initial small packet/header at end of mbuf.
1592 */
1593 if (len < m->m_len) {
1594 if (top == 0 && len + max_linkhdr <= m->m_len)
1595 m->m_data += max_linkhdr;
1596 m->m_len = len;
1597 } else
1598 len = m->m_len;
1599 }
1600 if (copy)
1601 copy(cp, mtod(m, caddr_t), (unsigned)len);
1602 else
1603 bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1604 cp += len;
1605 *mp = m;
1606 mp = &m->m_next;
1607 totlen -= len;
1608 if (cp == epkt)
1609 cp = buf;
1610 }
1611 return (top);
1612}
1613
1614/*
1615 * Copy data from a buffer back into the indicated mbuf chain,
1616 * starting "off" bytes from the beginning, extending the mbuf
1617 * chain if necessary.
1618 */
1619void
8a3125c6 1620m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
984263bc 1621{
1fd87d54
RG
1622 int mlen;
1623 struct mbuf *m = m0, *n;
984263bc
MD
1624 int totlen = 0;
1625
1626 if (m0 == 0)
1627 return;
1628 while (off > (mlen = m->m_len)) {
1629 off -= mlen;
1630 totlen += mlen;
1631 if (m->m_next == 0) {
74f1caca 1632 n = m_getclr(MB_DONTWAIT, m->m_type);
984263bc
MD
1633 if (n == 0)
1634 goto out;
1635 n->m_len = min(MLEN, len + off);
1636 m->m_next = n;
1637 }
1638 m = m->m_next;
1639 }
1640 while (len > 0) {
1641 mlen = min (m->m_len - off, len);
1642 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1643 cp += mlen;
1644 len -= mlen;
1645 mlen += off;
1646 off = 0;
1647 totlen += mlen;
1648 if (len == 0)
1649 break;
1650 if (m->m_next == 0) {
74f1caca 1651 n = m_get(MB_DONTWAIT, m->m_type);
984263bc
MD
1652 if (n == 0)
1653 break;
1654 n->m_len = min(MLEN, len);
1655 m->m_next = n;
1656 }
1657 m = m->m_next;
1658 }
1659out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1660 m->m_pkthdr.len = totlen;
1661}
1662
1663void
1664m_print(const struct mbuf *m)
1665{
1666 int len;
1667 const struct mbuf *m2;
1668
1669 len = m->m_pkthdr.len;
1670 m2 = m;
1671 while (len) {
1672 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1673 len -= m2->m_len;
1674 m2 = m2->m_next;
1675 }
1676 return;
1677}
1678
1679/*
1680 * "Move" mbuf pkthdr from "from" to "to".
1681 * "from" must have M_PKTHDR set, and "to" must be empty.
1682 */
1683void
1684m_move_pkthdr(struct mbuf *to, struct mbuf *from)
1685{
1686 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
1687
1688 to->m_flags = from->m_flags & M_COPYFLAGS;
1689 to->m_data = to->m_pktdat;
1690 to->m_pkthdr = from->m_pkthdr; /* especially tags */
1691 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
1692 from->m_flags &= ~M_PKTHDR;
1693}
1694
1695/*
1696 * Duplicate "from"'s mbuf pkthdr in "to".
1697 * "from" must have M_PKTHDR set, and "to" must be empty.
1698 * In particular, this does a deep copy of the packet tags.
1699 */
1700int
f15db79e 1701m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
984263bc
MD
1702{
1703 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
1704 if ((to->m_flags & M_EXT) == 0)
1705 to->m_data = to->m_pktdat;
1706 to->m_pkthdr = from->m_pkthdr;
1707 SLIST_INIT(&to->m_pkthdr.tags);
1708 return (m_tag_copy_chain(to, from, how));
1709}
1710
1711/*
1712 * Defragment a mbuf chain, returning the shortest possible
1713 * chain of mbufs and clusters. If allocation fails and
1714 * this cannot be completed, NULL will be returned, but
1715 * the passed in chain will be unchanged. Upon success,
1716 * the original chain will be freed, and the new chain
1717 * will be returned.
1718 *
1719 * If a non-packet header is passed in, the original
1720 * mbuf (chain?) will be returned unharmed.
1721 */
1722struct mbuf *
1723m_defrag(struct mbuf *m0, int how)
1724{
1725 struct mbuf *m_new = NULL, *m_final = NULL;
1726 int progress = 0, length;
1727
1728 if (!(m0->m_flags & M_PKTHDR))
1729 return (m0);
1730
1731#ifdef MBUF_STRESS_TEST
1732 if (m_defragrandomfailures) {
1733 int temp = arc4random() & 0xff;
1734 if (temp == 0xba)
1735 goto nospace;
1736 }
1737#endif
1738
1739 if (m0->m_pkthdr.len > MHLEN)
1740 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1741 else
1742 m_final = m_gethdr(how, MT_DATA);
1743
1744 if (m_final == NULL)
1745 goto nospace;
1746
1747 if (m_dup_pkthdr(m_final, m0, how) == NULL)
1748 goto nospace;
1749
1750 m_new = m_final;
1751
1752 while (progress < m0->m_pkthdr.len) {
1753 length = m0->m_pkthdr.len - progress;
1754 if (length > MCLBYTES)
1755 length = MCLBYTES;
1756
1757 if (m_new == NULL) {
1758 if (length > MLEN)
1759 m_new = m_getcl(how, MT_DATA, 0);
1760 else
1761 m_new = m_get(how, MT_DATA);
1762 if (m_new == NULL)
1763 goto nospace;
1764 }
1765
1766 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1767 progress += length;
1768 m_new->m_len = length;
1769 if (m_new != m_final)
1770 m_cat(m_final, m_new);
1771 m_new = NULL;
1772 }
1773 if (m0->m_next == NULL)
1774 m_defraguseless++;
1775 m_freem(m0);
1776 m0 = m_final;
1777 m_defragpackets++;
1778 m_defragbytes += m0->m_pkthdr.len;
1779 return (m0);
1780nospace:
1781 m_defragfailure++;
1782 if (m_new)
1783 m_free(m_new);
1784 if (m_final)
1785 m_freem(m_final);
1786 return (NULL);
1787}
0c33f36d
JH
1788
1789/*
1790 * Move data from uio into mbufs.
1791 * A length of zero means copy the whole uio.
1792 */
1793struct mbuf *
1794m_uiomove(struct uio *uio, int wait, int len0)
1795{
1796 struct mbuf *head; /* result mbuf chain */
1797 struct mbuf *m; /* current working mbuf */
1798 struct mbuf **mp;
1799 int resid, datalen, error;
1800
1801 resid = (len0 == 0) ? uio->uio_resid : min(len0, uio->uio_resid);
1802
1803 head = NULL;
1804 mp = &head;
1805 do {
1806 if (resid > MHLEN) {
1807 m = m_getcl(wait, MT_DATA, head == NULL ? M_PKTHDR : 0);
1808 if (m == NULL)
1809 goto failed;
1810 if (m->m_flags & M_PKTHDR)
1811 m->m_pkthdr.len = 0;
1812 } else {
1813 if (head == NULL) {
1814 MGETHDR(m, wait, MT_DATA);
1815 if (m == NULL)
1816 goto failed;
1817 m->m_pkthdr.len = 0;
1818 /* Leave room for protocol headers. */
1819 if (resid < MHLEN)
1820 MH_ALIGN(m, resid);
1821 } else {
1822 MGET(m, wait, MT_DATA);
1823 if (m == NULL)
1824 goto failed;
1825 }
1826 }
1827 datalen = min(MCLBYTES, resid);
1828 error = uiomove(mtod(m, caddr_t), datalen, uio);
1829 if (error) {
1830 m_free(m);
1831 goto failed;
1832 }
1833 m->m_len = datalen;
1834 *mp = m;
1835 mp = &m->m_next;
1836 head->m_pkthdr.len += datalen;
1837 resid -= datalen;
1838 } while (resid > 0);
1839
1840 return (head);
1841
1842failed:
1843 if (head)
1844 m_freem(head);
1845 return (NULL);
1846}