Add an alignment feature to vm_map_findspace(). This feature will be used
[dragonfly.git] / sys / vm / vm_page.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
e9bb90e8 38 * $DragonFly: src/sys/vm/vm_page.c,v 1.8 2003/08/25 17:01:13 dillon Exp $
984263bc
MD
39 */
40
41/*
42 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
43 * All rights reserved.
44 *
45 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 *
47 * Permission to use, copy, modify and distribute this software and
48 * its documentation is hereby granted, provided that both the copyright
49 * notice and this permission notice appear in all copies of the
50 * software, derivative works or modified versions, and any portions
51 * thereof, and that both notices appear in supporting documentation.
52 *
53 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 *
57 * Carnegie Mellon requests users of this software to return to
58 *
59 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
60 * School of Computer Science
61 * Carnegie Mellon University
62 * Pittsburgh PA 15213-3890
63 *
64 * any improvements or extensions that they make and grant Carnegie the
65 * rights to redistribute these changes.
66 */
67
68/*
69 * Resident memory management module.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/malloc.h>
75#include <sys/proc.h>
76#include <sys/vmmeter.h>
77#include <sys/vnode.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <sys/lock.h>
82#include <vm/vm_kern.h>
83#include <vm/pmap.h>
84#include <vm/vm_map.h>
85#include <vm/vm_object.h>
86#include <vm/vm_page.h>
87#include <vm/vm_pageout.h>
88#include <vm/vm_pager.h>
89#include <vm/vm_extern.h>
12e4aaff 90#include <vm/vm_page2.h>
984263bc
MD
91
92static void vm_page_queue_init (void);
93static vm_page_t vm_page_select_cache (vm_object_t, vm_pindex_t);
94
95/*
96 * Associated with page of user-allocatable memory is a
97 * page structure.
98 */
99
100static struct vm_page **vm_page_buckets; /* Array of buckets */
101static int vm_page_bucket_count; /* How big is array? */
102static int vm_page_hash_mask; /* Mask for hash function */
103static volatile int vm_page_bucket_generation;
104
105struct vpgqueues vm_page_queues[PQ_COUNT];
106
107static void
108vm_page_queue_init(void) {
109 int i;
110
111 for(i=0;i<PQ_L2_SIZE;i++) {
12e4aaff 112 vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
984263bc 113 }
12e4aaff 114 vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count;
984263bc 115
12e4aaff
MD
116 vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count;
117 vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count;
984263bc 118 for(i=0;i<PQ_L2_SIZE;i++) {
12e4aaff 119 vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
984263bc
MD
120 }
121 for(i=0;i<PQ_COUNT;i++) {
122 TAILQ_INIT(&vm_page_queues[i].pl);
123 }
124}
125
126vm_page_t vm_page_array = 0;
127int vm_page_array_size = 0;
128long first_page = 0;
129int vm_page_zero_count = 0;
130
131static __inline int vm_page_hash (vm_object_t object, vm_pindex_t pindex);
132static void vm_page_free_wakeup (void);
133
134/*
135 * vm_set_page_size:
136 *
137 * Sets the page size, perhaps based upon the memory
138 * size. Must be called before any use of page-size
139 * dependent functions.
140 */
141void
142vm_set_page_size(void)
143{
12e4aaff
MD
144 if (vmstats.v_page_size == 0)
145 vmstats.v_page_size = PAGE_SIZE;
146 if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
984263bc
MD
147 panic("vm_set_page_size: page size not a power of two");
148}
149
150/*
151 * vm_add_new_page:
152 *
153 * Add a new page to the freelist for use by the system.
154 * Must be called at splhigh().
155 */
156vm_page_t
157vm_add_new_page(vm_offset_t pa)
158{
159 vm_page_t m;
160
12e4aaff
MD
161 ++vmstats.v_page_count;
162 ++vmstats.v_free_count;
984263bc
MD
163 m = PHYS_TO_VM_PAGE(pa);
164 m->phys_addr = pa;
165 m->flags = 0;
166 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
167 m->queue = m->pc + PQ_FREE;
168 TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq);
169 vm_page_queues[m->queue].lcnt++;
170 return (m);
171}
172
173/*
174 * vm_page_startup:
175 *
176 * Initializes the resident memory module.
177 *
178 * Allocates memory for the page cells, and
179 * for the object/offset-to-page hash table headers.
180 * Each page cell is initialized and placed on the free list.
181 */
182
183vm_offset_t
184vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
185{
186 vm_offset_t mapped;
187 struct vm_page **bucket;
188 vm_size_t npages, page_range;
189 vm_offset_t new_end;
190 int i;
191 vm_offset_t pa;
192 int nblocks;
193 vm_offset_t last_pa;
194
195 /* the biggest memory array is the second group of pages */
196 vm_offset_t end;
197 vm_offset_t biggestone, biggestsize;
198
199 vm_offset_t total;
200
201 total = 0;
202 biggestsize = 0;
203 biggestone = 0;
204 nblocks = 0;
205 vaddr = round_page(vaddr);
206
207 for (i = 0; phys_avail[i + 1]; i += 2) {
208 phys_avail[i] = round_page(phys_avail[i]);
209 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
210 }
211
212 for (i = 0; phys_avail[i + 1]; i += 2) {
213 int size = phys_avail[i + 1] - phys_avail[i];
214
215 if (size > biggestsize) {
216 biggestone = i;
217 biggestsize = size;
218 }
219 ++nblocks;
220 total += size;
221 }
222
223 end = phys_avail[biggestone+1];
224
225 /*
226 * Initialize the queue headers for the free queue, the active queue
227 * and the inactive queue.
228 */
229
230 vm_page_queue_init();
231
232 /*
233 * Allocate (and initialize) the hash table buckets.
234 *
235 * The number of buckets MUST BE a power of 2, and the actual value is
236 * the next power of 2 greater than the number of physical pages in
237 * the system.
238 *
239 * We make the hash table approximately 2x the number of pages to
240 * reduce the chain length. This is about the same size using the
241 * singly-linked list as the 1x hash table we were using before
242 * using TAILQ but the chain length will be smaller.
243 *
244 * Note: This computation can be tweaked if desired.
245 */
246 vm_page_buckets = (struct vm_page **)vaddr;
247 bucket = vm_page_buckets;
248 if (vm_page_bucket_count == 0) {
249 vm_page_bucket_count = 1;
250 while (vm_page_bucket_count < atop(total))
251 vm_page_bucket_count <<= 1;
252 }
253 vm_page_bucket_count <<= 1;
254 vm_page_hash_mask = vm_page_bucket_count - 1;
255
256 /*
257 * Validate these addresses.
258 */
259 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
260 new_end = trunc_page(new_end);
261 mapped = round_page(vaddr);
262 vaddr = pmap_map(mapped, new_end, end,
263 VM_PROT_READ | VM_PROT_WRITE);
264 vaddr = round_page(vaddr);
265 bzero((caddr_t) mapped, vaddr - mapped);
266
267 for (i = 0; i < vm_page_bucket_count; i++) {
268 *bucket = NULL;
269 bucket++;
270 }
271
272 /*
273 * Compute the number of pages of memory that will be available for
274 * use (taking into account the overhead of a page structure per
275 * page).
276 */
277
278 first_page = phys_avail[0] / PAGE_SIZE;
279
280 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
281 npages = (total - (page_range * sizeof(struct vm_page)) -
282 (end - new_end)) / PAGE_SIZE;
283
284 end = new_end;
285 /*
286 * Initialize the mem entry structures now, and put them in the free
287 * queue.
288 */
289 vm_page_array = (vm_page_t) vaddr;
290 mapped = vaddr;
291
292 /*
293 * Validate these addresses.
294 */
295
296 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
297 mapped = pmap_map(mapped, new_end, end,
298 VM_PROT_READ | VM_PROT_WRITE);
299
300 /*
301 * Clear all of the page structures
302 */
303 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
304 vm_page_array_size = page_range;
305
306 /*
307 * Construct the free queue(s) in descending order (by physical
308 * address) so that the first 16MB of physical memory is allocated
309 * last rather than first. On large-memory machines, this avoids
310 * the exhaustion of low physical memory before isa_dmainit has run.
311 */
12e4aaff
MD
312 vmstats.v_page_count = 0;
313 vmstats.v_free_count = 0;
984263bc
MD
314 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
315 pa = phys_avail[i];
316 if (i == biggestone)
317 last_pa = new_end;
318 else
319 last_pa = phys_avail[i + 1];
320 while (pa < last_pa && npages-- > 0) {
321 vm_add_new_page(pa);
322 pa += PAGE_SIZE;
323 }
324 }
325 return (mapped);
326}
327
328/*
329 * vm_page_hash:
330 *
331 * Distributes the object/offset key pair among hash buckets.
332 *
333 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
334 * This routine may not block.
335 *
336 * We try to randomize the hash based on the object to spread the pages
337 * out in the hash table without it costing us too much.
338 */
339static __inline int
340vm_page_hash(vm_object_t object, vm_pindex_t pindex)
341{
342 int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
343
344 return(i & vm_page_hash_mask);
345}
346
347void
348vm_page_unhold(vm_page_t mem)
349{
350 --mem->hold_count;
351 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
352 if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
353 vm_page_free_toq(mem);
354}
355
356/*
357 * vm_page_insert: [ internal use only ]
358 *
359 * Inserts the given mem entry into the object and object list.
360 *
361 * The pagetables are not updated but will presumably fault the page
362 * in if necessary, or if a kernel page the caller will at some point
363 * enter the page into the kernel's pmap. We are not allowed to block
364 * here so we *can't* do this anyway.
365 *
366 * The object and page must be locked, and must be splhigh.
367 * This routine may not block.
368 */
369
370void
371vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
372{
373 struct vm_page **bucket;
374
375 if (m->object != NULL)
376 panic("vm_page_insert: already inserted");
377
378 /*
379 * Record the object/offset pair in this page
380 */
381
382 m->object = object;
383 m->pindex = pindex;
384
385 /*
386 * Insert it into the object_object/offset hash table
387 */
388
389 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
390 m->hnext = *bucket;
391 *bucket = m;
392 vm_page_bucket_generation++;
393
394 /*
395 * Now link into the object's list of backed pages.
396 */
397
398 TAILQ_INSERT_TAIL(&object->memq, m, listq);
399 object->generation++;
400
401 /*
402 * show that the object has one more resident page.
403 */
404
405 object->resident_page_count++;
406
407 /*
408 * Since we are inserting a new and possibly dirty page,
409 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
410 */
411 if (m->flags & PG_WRITEABLE)
412 vm_object_set_writeable_dirty(object);
413}
414
415/*
416 * vm_page_remove:
417 * NOTE: used by device pager as well -wfj
418 *
419 * Removes the given mem entry from the object/offset-page
420 * table and the object page list, but do not invalidate/terminate
421 * the backing store.
422 *
423 * The object and page must be locked, and at splhigh.
424 * The underlying pmap entry (if any) is NOT removed here.
425 * This routine may not block.
426 */
427
428void
429vm_page_remove(vm_page_t m)
430{
431 vm_object_t object;
432
433 if (m->object == NULL)
434 return;
435
436 if ((m->flags & PG_BUSY) == 0) {
437 panic("vm_page_remove: page not busy");
438 }
439
440 /*
441 * Basically destroy the page.
442 */
443
444 vm_page_wakeup(m);
445
446 object = m->object;
447
448 /*
449 * Remove from the object_object/offset hash table. The object
450 * must be on the hash queue, we will panic if it isn't
451 *
452 * Note: we must NULL-out m->hnext to prevent loops in detached
453 * buffers with vm_page_lookup().
454 */
455
456 {
457 struct vm_page **bucket;
458
459 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
460 while (*bucket != m) {
461 if (*bucket == NULL)
462 panic("vm_page_remove(): page not found in hash");
463 bucket = &(*bucket)->hnext;
464 }
465 *bucket = m->hnext;
466 m->hnext = NULL;
467 vm_page_bucket_generation++;
468 }
469
470 /*
471 * Now remove from the object's list of backed pages.
472 */
473
474 TAILQ_REMOVE(&object->memq, m, listq);
475
476 /*
477 * And show that the object has one fewer resident page.
478 */
479
480 object->resident_page_count--;
481 object->generation++;
482
483 m->object = NULL;
484}
485
486/*
487 * vm_page_lookup:
488 *
489 * Returns the page associated with the object/offset
490 * pair specified; if none is found, NULL is returned.
491 *
492 * NOTE: the code below does not lock. It will operate properly if
493 * an interrupt makes a change, but the generation algorithm will not
494 * operate properly in an SMP environment where both cpu's are able to run
495 * kernel code simultaneously.
496 *
497 * The object must be locked. No side effects.
498 * This routine may not block.
499 * This is a critical path routine
500 */
501
502vm_page_t
503vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
504{
505 vm_page_t m;
506 struct vm_page **bucket;
507 int generation;
508
509 /*
510 * Search the hash table for this object/offset pair
511 */
512
513retry:
514 generation = vm_page_bucket_generation;
515 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
516 for (m = *bucket; m != NULL; m = m->hnext) {
517 if ((m->object == object) && (m->pindex == pindex)) {
518 if (vm_page_bucket_generation != generation)
519 goto retry;
520 return (m);
521 }
522 }
523 if (vm_page_bucket_generation != generation)
524 goto retry;
525 return (NULL);
526}
527
528/*
529 * vm_page_rename:
530 *
531 * Move the given memory entry from its
532 * current object to the specified target object/offset.
533 *
534 * The object must be locked.
535 * This routine may not block.
536 *
537 * Note: this routine will raise itself to splvm(), the caller need not.
538 *
539 * Note: swap associated with the page must be invalidated by the move. We
540 * have to do this for several reasons: (1) we aren't freeing the
541 * page, (2) we are dirtying the page, (3) the VM system is probably
542 * moving the page from object A to B, and will then later move
543 * the backing store from A to B and we can't have a conflict.
544 *
545 * Note: we *always* dirty the page. It is necessary both for the
546 * fact that we moved it, and because we may be invalidating
547 * swap. If the page is on the cache, we have to deactivate it
548 * or vm_page_dirty() will panic. Dirty pages are not allowed
549 * on the cache.
550 */
551
552void
553vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
554{
555 int s;
556
557 s = splvm();
558 vm_page_remove(m);
559 vm_page_insert(m, new_object, new_pindex);
560 if (m->queue - m->pc == PQ_CACHE)
561 vm_page_deactivate(m);
562 vm_page_dirty(m);
563 splx(s);
564}
565
566/*
567 * vm_page_unqueue_nowakeup:
568 *
569 * vm_page_unqueue() without any wakeup
570 *
571 * This routine must be called at splhigh().
572 * This routine may not block.
573 */
574
575void
576vm_page_unqueue_nowakeup(vm_page_t m)
577{
578 int queue = m->queue;
579 struct vpgqueues *pq;
580 if (queue != PQ_NONE) {
581 pq = &vm_page_queues[queue];
582 m->queue = PQ_NONE;
583 TAILQ_REMOVE(&pq->pl, m, pageq);
584 (*pq->cnt)--;
585 pq->lcnt--;
586 }
587}
588
589/*
590 * vm_page_unqueue:
591 *
592 * Remove a page from its queue.
593 *
594 * This routine must be called at splhigh().
595 * This routine may not block.
596 */
597
598void
599vm_page_unqueue(vm_page_t m)
600{
601 int queue = m->queue;
602 struct vpgqueues *pq;
603 if (queue != PQ_NONE) {
604 m->queue = PQ_NONE;
605 pq = &vm_page_queues[queue];
606 TAILQ_REMOVE(&pq->pl, m, pageq);
607 (*pq->cnt)--;
608 pq->lcnt--;
609 if ((queue - m->pc) == PQ_CACHE) {
610 if (vm_paging_needed())
611 pagedaemon_wakeup();
612 }
613 }
614}
615
616#if PQ_L2_SIZE > 1
617
618/*
619 * vm_page_list_find:
620 *
621 * Find a page on the specified queue with color optimization.
622 *
623 * The page coloring optimization attempts to locate a page
624 * that does not overload other nearby pages in the object in
625 * the cpu's L1 or L2 caches. We need this optimization because
626 * cpu caches tend to be physical caches, while object spaces tend
627 * to be virtual.
628 *
629 * This routine must be called at splvm().
630 * This routine may not block.
631 *
632 * This routine may only be called from the vm_page_list_find() macro
633 * in vm_page.h
634 */
635vm_page_t
636_vm_page_list_find(int basequeue, int index)
637{
638 int i;
639 vm_page_t m = NULL;
640 struct vpgqueues *pq;
641
642 pq = &vm_page_queues[basequeue];
643
644 /*
645 * Note that for the first loop, index+i and index-i wind up at the
646 * same place. Even though this is not totally optimal, we've already
647 * blown it by missing the cache case so we do not care.
648 */
649
650 for(i = PQ_L2_SIZE / 2; i > 0; --i) {
651 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
652 break;
653
654 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
655 break;
656 }
657 return(m);
658}
659
660#endif
661
662/*
663 * vm_page_select_cache:
664 *
665 * Find a page on the cache queue with color optimization. As pages
666 * might be found, but not applicable, they are deactivated. This
667 * keeps us from using potentially busy cached pages.
668 *
669 * This routine must be called at splvm().
670 * This routine may not block.
671 */
672vm_page_t
673vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
674{
675 vm_page_t m;
676
677 while (TRUE) {
678 m = vm_page_list_find(
679 PQ_CACHE,
680 (pindex + object->pg_color) & PQ_L2_MASK,
681 FALSE
682 );
683 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
684 m->hold_count || m->wire_count)) {
685 vm_page_deactivate(m);
686 continue;
687 }
688 return m;
689 }
690}
691
692/*
693 * vm_page_select_free:
694 *
695 * Find a free or zero page, with specified preference. We attempt to
696 * inline the nominal case and fall back to _vm_page_select_free()
697 * otherwise.
698 *
699 * This routine must be called at splvm().
700 * This routine may not block.
701 */
702
703static __inline vm_page_t
704vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
705{
706 vm_page_t m;
707
708 m = vm_page_list_find(
709 PQ_FREE,
710 (pindex + object->pg_color) & PQ_L2_MASK,
711 prefer_zero
712 );
713 return(m);
714}
715
716/*
717 * vm_page_alloc:
718 *
719 * Allocate and return a memory cell associated
720 * with this VM object/offset pair.
721 *
722 * page_req classes:
723 * VM_ALLOC_NORMAL normal process request
724 * VM_ALLOC_SYSTEM system *really* needs a page
725 * VM_ALLOC_INTERRUPT interrupt time request
726 * VM_ALLOC_ZERO zero page
727 *
728 * Object must be locked.
729 * This routine may not block.
730 *
731 * Additional special handling is required when called from an
732 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
733 * the page cache in this case.
734 */
735
736vm_page_t
737vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
738{
739 vm_page_t m = NULL;
740 int s;
741
742 KASSERT(!vm_page_lookup(object, pindex),
743 ("vm_page_alloc: page already allocated"));
744
745 /*
746 * The pager is allowed to eat deeper into the free page list.
747 */
748
bc6dffab 749 if ((curthread == pagethread) && (page_req != VM_ALLOC_INTERRUPT)) {
984263bc
MD
750 page_req = VM_ALLOC_SYSTEM;
751 };
752
753 s = splvm();
754
755loop:
12e4aaff 756 if (vmstats.v_free_count > vmstats.v_free_reserved) {
984263bc
MD
757 /*
758 * Allocate from the free queue if there are plenty of pages
759 * in it.
760 */
761 if (page_req == VM_ALLOC_ZERO)
762 m = vm_page_select_free(object, pindex, TRUE);
763 else
764 m = vm_page_select_free(object, pindex, FALSE);
765 } else if (
766 (page_req == VM_ALLOC_SYSTEM &&
12e4aaff
MD
767 vmstats.v_cache_count == 0 &&
768 vmstats.v_free_count > vmstats.v_interrupt_free_min) ||
769 (page_req == VM_ALLOC_INTERRUPT && vmstats.v_free_count > 0)
984263bc
MD
770 ) {
771 /*
772 * Interrupt or system, dig deeper into the free list.
773 */
774 m = vm_page_select_free(object, pindex, FALSE);
775 } else if (page_req != VM_ALLOC_INTERRUPT) {
776 /*
777 * Allocatable from cache (non-interrupt only). On success,
778 * we must free the page and try again, thus ensuring that
12e4aaff 779 * vmstats.v_*_free_min counters are replenished.
984263bc
MD
780 */
781 m = vm_page_select_cache(object, pindex);
782 if (m == NULL) {
783 splx(s);
784#if defined(DIAGNOSTIC)
12e4aaff
MD
785 if (vmstats.v_cache_count > 0)
786 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
984263bc
MD
787#endif
788 vm_pageout_deficit++;
789 pagedaemon_wakeup();
790 return (NULL);
791 }
792 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
793 vm_page_busy(m);
794 vm_page_protect(m, VM_PROT_NONE);
795 vm_page_free(m);
796 goto loop;
797 } else {
798 /*
799 * Not allocatable from cache from interrupt, give up.
800 */
801 splx(s);
802 vm_pageout_deficit++;
803 pagedaemon_wakeup();
804 return (NULL);
805 }
806
807 /*
808 * At this point we had better have found a good page.
809 */
810
811 KASSERT(
812 m != NULL,
813 ("vm_page_alloc(): missing page on free queue\n")
814 );
815
816 /*
817 * Remove from free queue
818 */
819
820 vm_page_unqueue_nowakeup(m);
821
822 /*
823 * Initialize structure. Only the PG_ZERO flag is inherited.
824 */
825
826 if (m->flags & PG_ZERO) {
827 vm_page_zero_count--;
828 m->flags = PG_ZERO | PG_BUSY;
829 } else {
830 m->flags = PG_BUSY;
831 }
832 m->wire_count = 0;
833 m->hold_count = 0;
834 m->act_count = 0;
835 m->busy = 0;
836 m->valid = 0;
837 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
838
839 /*
840 * vm_page_insert() is safe prior to the splx(). Note also that
841 * inserting a page here does not insert it into the pmap (which
842 * could cause us to block allocating memory). We cannot block
843 * anywhere.
844 */
845
846 vm_page_insert(m, object, pindex);
847
848 /*
849 * Don't wakeup too often - wakeup the pageout daemon when
850 * we would be nearly out of memory.
851 */
852 if (vm_paging_needed())
853 pagedaemon_wakeup();
854
855 splx(s);
856
857 return (m);
858}
859
860/*
861 * vm_wait: (also see VM_WAIT macro)
862 *
863 * Block until free pages are available for allocation
864 * - Called in various places before memory allocations.
865 */
866
867void
868vm_wait(void)
869{
870 int s;
871
872 s = splvm();
bc6dffab 873 if (curthread == pagethread) {
984263bc 874 vm_pageout_pages_needed = 1;
377d4740 875 tsleep(&vm_pageout_pages_needed, 0, "VMWait", 0);
984263bc
MD
876 } else {
877 if (!vm_pages_needed) {
878 vm_pages_needed = 1;
879 wakeup(&vm_pages_needed);
880 }
377d4740 881 tsleep(&vmstats.v_free_count, 0, "vmwait", 0);
984263bc
MD
882 }
883 splx(s);
884}
885
886/*
887 * vm_waitpfault: (also see VM_WAITPFAULT macro)
888 *
889 * Block until free pages are available for allocation
890 * - Called only in vm_fault so that processes page faulting
891 * can be easily tracked.
892 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
893 * processes will be able to grab memory first. Do not change
894 * this balance without careful testing first.
895 */
896
897void
898vm_waitpfault(void)
899{
900 int s;
901
902 s = splvm();
903 if (!vm_pages_needed) {
904 vm_pages_needed = 1;
905 wakeup(&vm_pages_needed);
906 }
377d4740 907 tsleep(&vmstats.v_free_count, 0, "pfault", 0);
984263bc
MD
908 splx(s);
909}
910
984263bc
MD
911/*
912 * vm_page_activate:
913 *
914 * Put the specified page on the active list (if appropriate).
915 * Ensure that act_count is at least ACT_INIT but do not otherwise
916 * mess with it.
917 *
918 * The page queues must be locked.
919 * This routine may not block.
920 */
921void
922vm_page_activate(vm_page_t m)
923{
924 int s;
925
926 s = splvm();
927 if (m->queue != PQ_ACTIVE) {
928 if ((m->queue - m->pc) == PQ_CACHE)
12e4aaff 929 mycpu->gd_cnt.v_reactivated++;
984263bc
MD
930
931 vm_page_unqueue(m);
932
933 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
934 m->queue = PQ_ACTIVE;
935 vm_page_queues[PQ_ACTIVE].lcnt++;
936 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
937 if (m->act_count < ACT_INIT)
938 m->act_count = ACT_INIT;
12e4aaff 939 vmstats.v_active_count++;
984263bc
MD
940 }
941 } else {
942 if (m->act_count < ACT_INIT)
943 m->act_count = ACT_INIT;
944 }
945
946 splx(s);
947}
948
949/*
950 * vm_page_free_wakeup:
951 *
952 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
953 * routine is called when a page has been added to the cache or free
954 * queues.
955 *
956 * This routine may not block.
957 * This routine must be called at splvm()
958 */
959static __inline void
960vm_page_free_wakeup(void)
961{
962 /*
963 * if pageout daemon needs pages, then tell it that there are
964 * some free.
965 */
966 if (vm_pageout_pages_needed &&
12e4aaff 967 vmstats.v_cache_count + vmstats.v_free_count >= vmstats.v_pageout_free_min) {
984263bc
MD
968 wakeup(&vm_pageout_pages_needed);
969 vm_pageout_pages_needed = 0;
970 }
971 /*
972 * wakeup processes that are waiting on memory if we hit a
973 * high water mark. And wakeup scheduler process if we have
974 * lots of memory. this process will swapin processes.
975 */
976 if (vm_pages_needed && !vm_page_count_min()) {
977 vm_pages_needed = 0;
12e4aaff 978 wakeup(&vmstats.v_free_count);
984263bc
MD
979 }
980}
981
982/*
983 * vm_page_free_toq:
984 *
985 * Returns the given page to the PQ_FREE list,
986 * disassociating it with any VM object.
987 *
988 * Object and page must be locked prior to entry.
989 * This routine may not block.
990 */
991
992void
993vm_page_free_toq(vm_page_t m)
994{
995 int s;
996 struct vpgqueues *pq;
997 vm_object_t object = m->object;
998
999 s = splvm();
1000
12e4aaff 1001 mycpu->gd_cnt.v_tfree++;
984263bc
MD
1002
1003 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1004 printf(
1005 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1006 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1007 m->hold_count);
1008 if ((m->queue - m->pc) == PQ_FREE)
1009 panic("vm_page_free: freeing free page");
1010 else
1011 panic("vm_page_free: freeing busy page");
1012 }
1013
1014 /*
1015 * unqueue, then remove page. Note that we cannot destroy
1016 * the page here because we do not want to call the pager's
1017 * callback routine until after we've put the page on the
1018 * appropriate free queue.
1019 */
1020
1021 vm_page_unqueue_nowakeup(m);
1022 vm_page_remove(m);
1023
1024 /*
1025 * If fictitious remove object association and
1026 * return, otherwise delay object association removal.
1027 */
1028
1029 if ((m->flags & PG_FICTITIOUS) != 0) {
1030 splx(s);
1031 return;
1032 }
1033
1034 m->valid = 0;
1035 vm_page_undirty(m);
1036
1037 if (m->wire_count != 0) {
1038 if (m->wire_count > 1) {
1039 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1040 m->wire_count, (long)m->pindex);
1041 }
1042 panic("vm_page_free: freeing wired page\n");
1043 }
1044
1045 /*
1046 * If we've exhausted the object's resident pages we want to free
1047 * it up.
1048 */
1049
1050 if (object &&
1051 (object->type == OBJT_VNODE) &&
1052 ((object->flags & OBJ_DEAD) == 0)
1053 ) {
1054 struct vnode *vp = (struct vnode *)object->handle;
1055
1056 if (vp && VSHOULDFREE(vp))
1057 vfree(vp);
1058 }
1059
1060 /*
1061 * Clear the UNMANAGED flag when freeing an unmanaged page.
1062 */
1063
1064 if (m->flags & PG_UNMANAGED) {
1065 m->flags &= ~PG_UNMANAGED;
1066 } else {
1067#ifdef __alpha__
1068 pmap_page_is_free(m);
1069#endif
1070 }
1071
1072 if (m->hold_count != 0) {
1073 m->flags &= ~PG_ZERO;
1074 m->queue = PQ_HOLD;
1075 } else
1076 m->queue = PQ_FREE + m->pc;
1077 pq = &vm_page_queues[m->queue];
1078 pq->lcnt++;
1079 ++(*pq->cnt);
1080
1081 /*
1082 * Put zero'd pages on the end ( where we look for zero'd pages
1083 * first ) and non-zerod pages at the head.
1084 */
1085
1086 if (m->flags & PG_ZERO) {
1087 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1088 ++vm_page_zero_count;
1089 } else {
1090 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1091 }
1092
1093 vm_page_free_wakeup();
1094
1095 splx(s);
1096}
1097
1098/*
1099 * vm_page_unmanage:
1100 *
1101 * Prevent PV management from being done on the page. The page is
1102 * removed from the paging queues as if it were wired, and as a
1103 * consequence of no longer being managed the pageout daemon will not
1104 * touch it (since there is no way to locate the pte mappings for the
1105 * page). madvise() calls that mess with the pmap will also no longer
1106 * operate on the page.
1107 *
1108 * Beyond that the page is still reasonably 'normal'. Freeing the page
1109 * will clear the flag.
1110 *
1111 * This routine is used by OBJT_PHYS objects - objects using unswappable
1112 * physical memory as backing store rather then swap-backed memory and
1113 * will eventually be extended to support 4MB unmanaged physical
1114 * mappings.
1115 */
1116
1117void
1118vm_page_unmanage(vm_page_t m)
1119{
1120 int s;
1121
1122 s = splvm();
1123 if ((m->flags & PG_UNMANAGED) == 0) {
1124 if (m->wire_count == 0)
1125 vm_page_unqueue(m);
1126 }
1127 vm_page_flag_set(m, PG_UNMANAGED);
1128 splx(s);
1129}
1130
1131/*
1132 * vm_page_wire:
1133 *
1134 * Mark this page as wired down by yet
1135 * another map, removing it from paging queues
1136 * as necessary.
1137 *
1138 * The page queues must be locked.
1139 * This routine may not block.
1140 */
1141void
1142vm_page_wire(vm_page_t m)
1143{
1144 int s;
1145
1146 /*
1147 * Only bump the wire statistics if the page is not already wired,
1148 * and only unqueue the page if it is on some queue (if it is unmanaged
1149 * it is already off the queues).
1150 */
1151 s = splvm();
1152 if (m->wire_count == 0) {
1153 if ((m->flags & PG_UNMANAGED) == 0)
1154 vm_page_unqueue(m);
12e4aaff 1155 vmstats.v_wire_count++;
984263bc
MD
1156 }
1157 m->wire_count++;
1158 KASSERT(m->wire_count != 0,
1159 ("vm_page_wire: wire_count overflow m=%p", m));
1160
1161 splx(s);
1162 vm_page_flag_set(m, PG_MAPPED);
1163}
1164
1165/*
1166 * vm_page_unwire:
1167 *
1168 * Release one wiring of this page, potentially
1169 * enabling it to be paged again.
1170 *
1171 * Many pages placed on the inactive queue should actually go
1172 * into the cache, but it is difficult to figure out which. What
1173 * we do instead, if the inactive target is well met, is to put
1174 * clean pages at the head of the inactive queue instead of the tail.
1175 * This will cause them to be moved to the cache more quickly and
1176 * if not actively re-referenced, freed more quickly. If we just
1177 * stick these pages at the end of the inactive queue, heavy filesystem
1178 * meta-data accesses can cause an unnecessary paging load on memory bound
1179 * processes. This optimization causes one-time-use metadata to be
1180 * reused more quickly.
1181 *
1182 * BUT, if we are in a low-memory situation we have no choice but to
1183 * put clean pages on the cache queue.
1184 *
1185 * A number of routines use vm_page_unwire() to guarantee that the page
1186 * will go into either the inactive or active queues, and will NEVER
1187 * be placed in the cache - for example, just after dirtying a page.
1188 * dirty pages in the cache are not allowed.
1189 *
1190 * The page queues must be locked.
1191 * This routine may not block.
1192 */
1193void
1194vm_page_unwire(vm_page_t m, int activate)
1195{
1196 int s;
1197
1198 s = splvm();
1199
1200 if (m->wire_count > 0) {
1201 m->wire_count--;
1202 if (m->wire_count == 0) {
12e4aaff 1203 vmstats.v_wire_count--;
984263bc
MD
1204 if (m->flags & PG_UNMANAGED) {
1205 ;
1206 } else if (activate) {
1207 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1208 m->queue = PQ_ACTIVE;
1209 vm_page_queues[PQ_ACTIVE].lcnt++;
12e4aaff 1210 vmstats.v_active_count++;
984263bc
MD
1211 } else {
1212 vm_page_flag_clear(m, PG_WINATCFLS);
1213 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1214 m->queue = PQ_INACTIVE;
1215 vm_page_queues[PQ_INACTIVE].lcnt++;
12e4aaff 1216 vmstats.v_inactive_count++;
984263bc
MD
1217 }
1218 }
1219 } else {
1220 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
1221 }
1222 splx(s);
1223}
1224
1225
1226/*
1227 * Move the specified page to the inactive queue. If the page has
1228 * any associated swap, the swap is deallocated.
1229 *
1230 * Normally athead is 0 resulting in LRU operation. athead is set
1231 * to 1 if we want this page to be 'as if it were placed in the cache',
1232 * except without unmapping it from the process address space.
1233 *
1234 * This routine may not block.
1235 */
1236static __inline void
1237_vm_page_deactivate(vm_page_t m, int athead)
1238{
1239 int s;
1240
1241 /*
1242 * Ignore if already inactive.
1243 */
1244 if (m->queue == PQ_INACTIVE)
1245 return;
1246
1247 s = splvm();
1248 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1249 if ((m->queue - m->pc) == PQ_CACHE)
12e4aaff 1250 mycpu->gd_cnt.v_reactivated++;
984263bc
MD
1251 vm_page_flag_clear(m, PG_WINATCFLS);
1252 vm_page_unqueue(m);
1253 if (athead)
1254 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1255 else
1256 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1257 m->queue = PQ_INACTIVE;
1258 vm_page_queues[PQ_INACTIVE].lcnt++;
12e4aaff 1259 vmstats.v_inactive_count++;
984263bc
MD
1260 }
1261 splx(s);
1262}
1263
1264void
1265vm_page_deactivate(vm_page_t m)
1266{
1267 _vm_page_deactivate(m, 0);
1268}
1269
1270/*
1271 * vm_page_try_to_cache:
1272 *
1273 * Returns 0 on failure, 1 on success
1274 */
1275int
1276vm_page_try_to_cache(vm_page_t m)
1277{
1278 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1279 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1280 return(0);
1281 }
1282 vm_page_test_dirty(m);
1283 if (m->dirty)
1284 return(0);
1285 vm_page_cache(m);
1286 return(1);
1287}
1288
1289/*
1290 * vm_page_try_to_free()
1291 *
1292 * Attempt to free the page. If we cannot free it, we do nothing.
1293 * 1 is returned on success, 0 on failure.
1294 */
1295
1296int
1297vm_page_try_to_free(vm_page_t m)
1298{
1299 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1300 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1301 return(0);
1302 }
1303 vm_page_test_dirty(m);
1304 if (m->dirty)
1305 return(0);
1306 vm_page_busy(m);
1307 vm_page_protect(m, VM_PROT_NONE);
1308 vm_page_free(m);
1309 return(1);
1310}
1311
1312
1313/*
1314 * vm_page_cache
1315 *
1316 * Put the specified page onto the page cache queue (if appropriate).
1317 *
1318 * This routine may not block.
1319 */
1320void
1321vm_page_cache(vm_page_t m)
1322{
1323 int s;
1324
1325 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
1326 printf("vm_page_cache: attempting to cache busy page\n");
1327 return;
1328 }
1329 if ((m->queue - m->pc) == PQ_CACHE)
1330 return;
1331
1332 /*
1333 * Remove all pmaps and indicate that the page is not
1334 * writeable or mapped.
1335 */
1336
1337 vm_page_protect(m, VM_PROT_NONE);
1338 if (m->dirty != 0) {
1339 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1340 (long)m->pindex);
1341 }
1342 s = splvm();
1343 vm_page_unqueue_nowakeup(m);
1344 m->queue = PQ_CACHE + m->pc;
1345 vm_page_queues[m->queue].lcnt++;
1346 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
12e4aaff 1347 vmstats.v_cache_count++;
984263bc
MD
1348 vm_page_free_wakeup();
1349 splx(s);
1350}
1351
1352/*
1353 * vm_page_dontneed
1354 *
1355 * Cache, deactivate, or do nothing as appropriate. This routine
1356 * is typically used by madvise() MADV_DONTNEED.
1357 *
1358 * Generally speaking we want to move the page into the cache so
1359 * it gets reused quickly. However, this can result in a silly syndrome
1360 * due to the page recycling too quickly. Small objects will not be
1361 * fully cached. On the otherhand, if we move the page to the inactive
1362 * queue we wind up with a problem whereby very large objects
1363 * unnecessarily blow away our inactive and cache queues.
1364 *
1365 * The solution is to move the pages based on a fixed weighting. We
1366 * either leave them alone, deactivate them, or move them to the cache,
1367 * where moving them to the cache has the highest weighting.
1368 * By forcing some pages into other queues we eventually force the
1369 * system to balance the queues, potentially recovering other unrelated
1370 * space from active. The idea is to not force this to happen too
1371 * often.
1372 */
1373
1374void
1375vm_page_dontneed(vm_page_t m)
1376{
1377 static int dnweight;
1378 int dnw;
1379 int head;
1380
1381 dnw = ++dnweight;
1382
1383 /*
1384 * occassionally leave the page alone
1385 */
1386
1387 if ((dnw & 0x01F0) == 0 ||
1388 m->queue == PQ_INACTIVE ||
1389 m->queue - m->pc == PQ_CACHE
1390 ) {
1391 if (m->act_count >= ACT_INIT)
1392 --m->act_count;
1393 return;
1394 }
1395
1396 if (m->dirty == 0)
1397 vm_page_test_dirty(m);
1398
1399 if (m->dirty || (dnw & 0x0070) == 0) {
1400 /*
1401 * Deactivate the page 3 times out of 32.
1402 */
1403 head = 0;
1404 } else {
1405 /*
1406 * Cache the page 28 times out of every 32. Note that
1407 * the page is deactivated instead of cached, but placed
1408 * at the head of the queue instead of the tail.
1409 */
1410 head = 1;
1411 }
1412 _vm_page_deactivate(m, head);
1413}
1414
1415/*
1416 * Grab a page, waiting until we are waken up due to the page
1417 * changing state. We keep on waiting, if the page continues
1418 * to be in the object. If the page doesn't exist, allocate it.
1419 *
1420 * This routine may block.
1421 */
1422vm_page_t
1423vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1424{
1425
1426 vm_page_t m;
1427 int s, generation;
1428
1429retrylookup:
1430 if ((m = vm_page_lookup(object, pindex)) != NULL) {
1431 if (m->busy || (m->flags & PG_BUSY)) {
1432 generation = object->generation;
1433
1434 s = splvm();
1435 while ((object->generation == generation) &&
1436 (m->busy || (m->flags & PG_BUSY))) {
1437 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
377d4740 1438 tsleep(m, 0, "pgrbwt", 0);
984263bc
MD
1439 if ((allocflags & VM_ALLOC_RETRY) == 0) {
1440 splx(s);
1441 return NULL;
1442 }
1443 }
1444 splx(s);
1445 goto retrylookup;
1446 } else {
1447 vm_page_busy(m);
1448 return m;
1449 }
1450 }
1451
1452 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1453 if (m == NULL) {
1454 VM_WAIT;
1455 if ((allocflags & VM_ALLOC_RETRY) == 0)
1456 return NULL;
1457 goto retrylookup;
1458 }
1459
1460 return m;
1461}
1462
1463/*
1464 * Mapping function for valid bits or for dirty bits in
1465 * a page. May not block.
1466 *
1467 * Inputs are required to range within a page.
1468 */
1469
1470__inline int
1471vm_page_bits(int base, int size)
1472{
1473 int first_bit;
1474 int last_bit;
1475
1476 KASSERT(
1477 base + size <= PAGE_SIZE,
1478 ("vm_page_bits: illegal base/size %d/%d", base, size)
1479 );
1480
1481 if (size == 0) /* handle degenerate case */
1482 return(0);
1483
1484 first_bit = base >> DEV_BSHIFT;
1485 last_bit = (base + size - 1) >> DEV_BSHIFT;
1486
1487 return ((2 << last_bit) - (1 << first_bit));
1488}
1489
1490/*
1491 * vm_page_set_validclean:
1492 *
1493 * Sets portions of a page valid and clean. The arguments are expected
1494 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1495 * of any partial chunks touched by the range. The invalid portion of
1496 * such chunks will be zero'd.
1497 *
1498 * This routine may not block.
1499 *
1500 * (base + size) must be less then or equal to PAGE_SIZE.
1501 */
1502void
1503vm_page_set_validclean(vm_page_t m, int base, int size)
1504{
1505 int pagebits;
1506 int frag;
1507 int endoff;
1508
1509 if (size == 0) /* handle degenerate case */
1510 return;
1511
1512 /*
1513 * If the base is not DEV_BSIZE aligned and the valid
1514 * bit is clear, we have to zero out a portion of the
1515 * first block.
1516 */
1517
1518 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1519 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1520 ) {
1521 pmap_zero_page_area(
1522 VM_PAGE_TO_PHYS(m),
1523 frag,
1524 base - frag
1525 );
1526 }
1527
1528 /*
1529 * If the ending offset is not DEV_BSIZE aligned and the
1530 * valid bit is clear, we have to zero out a portion of
1531 * the last block.
1532 */
1533
1534 endoff = base + size;
1535
1536 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1537 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1538 ) {
1539 pmap_zero_page_area(
1540 VM_PAGE_TO_PHYS(m),
1541 endoff,
1542 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1543 );
1544 }
1545
1546 /*
1547 * Set valid, clear dirty bits. If validating the entire
1548 * page we can safely clear the pmap modify bit. We also
1549 * use this opportunity to clear the PG_NOSYNC flag. If a process
1550 * takes a write fault on a MAP_NOSYNC memory area the flag will
1551 * be set again.
1552 *
1553 * We set valid bits inclusive of any overlap, but we can only
1554 * clear dirty bits for DEV_BSIZE chunks that are fully within
1555 * the range.
1556 */
1557
1558 pagebits = vm_page_bits(base, size);
1559 m->valid |= pagebits;
1560#if 0 /* NOT YET */
1561 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1562 frag = DEV_BSIZE - frag;
1563 base += frag;
1564 size -= frag;
1565 if (size < 0)
1566 size = 0;
1567 }
1568 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1569#endif
1570 m->dirty &= ~pagebits;
1571 if (base == 0 && size == PAGE_SIZE) {
1572 pmap_clear_modify(m);
1573 vm_page_flag_clear(m, PG_NOSYNC);
1574 }
1575}
1576
1577#if 0
1578
1579void
1580vm_page_set_dirty(vm_page_t m, int base, int size)
1581{
1582 m->dirty |= vm_page_bits(base, size);
1583}
1584
1585#endif
1586
1587void
1588vm_page_clear_dirty(vm_page_t m, int base, int size)
1589{
1590 m->dirty &= ~vm_page_bits(base, size);
1591}
1592
1593/*
1594 * vm_page_set_invalid:
1595 *
1596 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1597 * valid and dirty bits for the effected areas are cleared.
1598 *
1599 * May not block.
1600 */
1601void
1602vm_page_set_invalid(vm_page_t m, int base, int size)
1603{
1604 int bits;
1605
1606 bits = vm_page_bits(base, size);
1607 m->valid &= ~bits;
1608 m->dirty &= ~bits;
1609 m->object->generation++;
1610}
1611
1612/*
1613 * vm_page_zero_invalid()
1614 *
1615 * The kernel assumes that the invalid portions of a page contain
1616 * garbage, but such pages can be mapped into memory by user code.
1617 * When this occurs, we must zero out the non-valid portions of the
1618 * page so user code sees what it expects.
1619 *
1620 * Pages are most often semi-valid when the end of a file is mapped
1621 * into memory and the file's size is not page aligned.
1622 */
1623
1624void
1625vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1626{
1627 int b;
1628 int i;
1629
1630 /*
1631 * Scan the valid bits looking for invalid sections that
1632 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1633 * valid bit may be set ) have already been zerod by
1634 * vm_page_set_validclean().
1635 */
1636
1637 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1638 if (i == (PAGE_SIZE / DEV_BSIZE) ||
1639 (m->valid & (1 << i))
1640 ) {
1641 if (i > b) {
1642 pmap_zero_page_area(
1643 VM_PAGE_TO_PHYS(m),
1644 b << DEV_BSHIFT,
1645 (i - b) << DEV_BSHIFT
1646 );
1647 }
1648 b = i + 1;
1649 }
1650 }
1651
1652 /*
1653 * setvalid is TRUE when we can safely set the zero'd areas
1654 * as being valid. We can do this if there are no cache consistency
1655 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1656 */
1657
1658 if (setvalid)
1659 m->valid = VM_PAGE_BITS_ALL;
1660}
1661
1662/*
1663 * vm_page_is_valid:
1664 *
1665 * Is (partial) page valid? Note that the case where size == 0
1666 * will return FALSE in the degenerate case where the page is
1667 * entirely invalid, and TRUE otherwise.
1668 *
1669 * May not block.
1670 */
1671
1672int
1673vm_page_is_valid(vm_page_t m, int base, int size)
1674{
1675 int bits = vm_page_bits(base, size);
1676
1677 if (m->valid && ((m->valid & bits) == bits))
1678 return 1;
1679 else
1680 return 0;
1681}
1682
1683/*
1684 * update dirty bits from pmap/mmu. May not block.
1685 */
1686
1687void
1688vm_page_test_dirty(vm_page_t m)
1689{
1690 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1691 vm_page_dirty(m);
1692 }
1693}
1694
1695/*
1696 * This interface is for merging with malloc() someday.
1697 * Even if we never implement compaction so that contiguous allocation
1698 * works after initialization time, malloc()'s data structures are good
1699 * for statistics and for allocations of less than a page.
1700 */
1701void *
1702contigmalloc1(
1703 unsigned long size, /* should be size_t here and for malloc() */
1704 struct malloc_type *type,
1705 int flags,
1706 unsigned long low,
1707 unsigned long high,
1708 unsigned long alignment,
1709 unsigned long boundary,
1710 vm_map_t map)
1711{
1712 int i, s, start;
1713 vm_offset_t addr, phys, tmp_addr;
1714 int pass;
1715 vm_page_t pga = vm_page_array;
1716
1717 size = round_page(size);
1718 if (size == 0)
1719 panic("contigmalloc1: size must not be 0");
1720 if ((alignment & (alignment - 1)) != 0)
1721 panic("contigmalloc1: alignment must be a power of 2");
1722 if ((boundary & (boundary - 1)) != 0)
1723 panic("contigmalloc1: boundary must be a power of 2");
1724
1725 start = 0;
1726 for (pass = 0; pass <= 1; pass++) {
1727 s = splvm();
1728again:
1729 /*
1730 * Find first page in array that is free, within range, aligned, and
1731 * such that the boundary won't be crossed.
1732 */
12e4aaff 1733 for (i = start; i < vmstats.v_page_count; i++) {
984263bc
MD
1734 int pqtype;
1735 phys = VM_PAGE_TO_PHYS(&pga[i]);
1736 pqtype = pga[i].queue - pga[i].pc;
1737 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
1738 (phys >= low) && (phys < high) &&
1739 ((phys & (alignment - 1)) == 0) &&
1740 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
1741 break;
1742 }
1743
1744 /*
1745 * If the above failed or we will exceed the upper bound, fail.
1746 */
12e4aaff 1747 if ((i == vmstats.v_page_count) ||
984263bc
MD
1748 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
1749 vm_page_t m, next;
1750
1751again1:
1752 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
1753 m != NULL;
1754 m = next) {
1755
1756 KASSERT(m->queue == PQ_INACTIVE,
1757 ("contigmalloc1: page %p is not PQ_INACTIVE", m));
1758
1759 next = TAILQ_NEXT(m, pageq);
1760 if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
1761 goto again1;
1762 vm_page_test_dirty(m);
1763 if (m->dirty) {
1764 if (m->object->type == OBJT_VNODE) {
dadab5e9 1765 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
984263bc 1766 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
dadab5e9 1767 VOP_UNLOCK(m->object->handle, 0, curthread);
984263bc
MD
1768 goto again1;
1769 } else if (m->object->type == OBJT_SWAP ||
1770 m->object->type == OBJT_DEFAULT) {
1771 vm_pageout_flush(&m, 1, 0);
1772 goto again1;
1773 }
1774 }
1775 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
1776 vm_page_cache(m);
1777 }
1778
1779 for (m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1780 m != NULL;
1781 m = next) {
1782
1783 KASSERT(m->queue == PQ_ACTIVE,
1784 ("contigmalloc1: page %p is not PQ_ACTIVE", m));
1785
1786 next = TAILQ_NEXT(m, pageq);
1787 if (vm_page_sleep_busy(m, TRUE, "vpctw1"))
1788 goto again1;
1789 vm_page_test_dirty(m);
1790 if (m->dirty) {
1791 if (m->object->type == OBJT_VNODE) {
dadab5e9 1792 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
984263bc 1793 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
dadab5e9 1794 VOP_UNLOCK(m->object->handle, 0, curthread);
984263bc
MD
1795 goto again1;
1796 } else if (m->object->type == OBJT_SWAP ||
1797 m->object->type == OBJT_DEFAULT) {
1798 vm_pageout_flush(&m, 1, 0);
1799 goto again1;
1800 }
1801 }
1802 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
1803 vm_page_cache(m);
1804 }
1805
1806 splx(s);
1807 continue;
1808 }
1809 start = i;
1810
1811 /*
1812 * Check successive pages for contiguous and free.
1813 */
1814 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
1815 int pqtype;
1816 pqtype = pga[i].queue - pga[i].pc;
1817 if ((VM_PAGE_TO_PHYS(&pga[i]) !=
1818 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
1819 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
1820 start++;
1821 goto again;
1822 }
1823 }
1824
1825 for (i = start; i < (start + size / PAGE_SIZE); i++) {
1826 int pqtype;
1827 vm_page_t m = &pga[i];
1828
1829 pqtype = m->queue - m->pc;
1830 if (pqtype == PQ_CACHE) {
1831 vm_page_busy(m);
1832 vm_page_free(m);
1833 }
1834 vm_page_unqueue_nowakeup(m);
1835 m->valid = VM_PAGE_BITS_ALL;
1836 if (m->flags & PG_ZERO)
1837 vm_page_zero_count--;
1838 m->flags = 0;
1839 KASSERT(m->dirty == 0, ("contigmalloc1: page %p was dirty", m));
1840 m->wire_count = 0;
1841 m->busy = 0;
1842 m->object = NULL;
1843 }
1844
1845 /*
1846 * We've found a contiguous chunk that meets are requirements.
1847 * Allocate kernel VM, unfree and assign the physical pages to it and
1848 * return kernel VM pointer.
1849 */
1850 vm_map_lock(map);
e9bb90e8 1851 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) !=
984263bc
MD
1852 KERN_SUCCESS) {
1853 /*
1854 * XXX We almost never run out of kernel virtual
1855 * space, so we don't make the allocated memory
1856 * above available.
1857 */
1858 vm_map_unlock(map);
1859 splx(s);
1860 return (NULL);
1861 }
1862 vm_object_reference(kernel_object);
1863 vm_map_insert(map, kernel_object, addr - VM_MIN_KERNEL_ADDRESS,
1864 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
1865 vm_map_unlock(map);
1866
1867 tmp_addr = addr;
1868 for (i = start; i < (start + size / PAGE_SIZE); i++) {
1869 vm_page_t m = &pga[i];
1870 vm_page_insert(m, kernel_object,
1871 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
1872 tmp_addr += PAGE_SIZE;
1873 }
1874 vm_map_pageable(map, addr, addr + size, FALSE);
1875
1876 splx(s);
1877 return ((void *)addr);
1878 }
1879 return NULL;
1880}
1881
1882void *
1883contigmalloc(
1884 unsigned long size, /* should be size_t here and for malloc() */
1885 struct malloc_type *type,
1886 int flags,
1887 unsigned long low,
1888 unsigned long high,
1889 unsigned long alignment,
1890 unsigned long boundary)
1891{
1892 return contigmalloc1(size, type, flags, low, high, alignment, boundary,
1893 kernel_map);
1894}
1895
1896void
1897contigfree(void *addr, unsigned long size, struct malloc_type *type)
1898{
1899 kmem_free(kernel_map, (vm_offset_t)addr, size);
1900}
1901
1902vm_offset_t
1903vm_page_alloc_contig(
1904 vm_offset_t size,
1905 vm_offset_t low,
1906 vm_offset_t high,
1907 vm_offset_t alignment)
1908{
1909 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
1910 alignment, 0ul, kernel_map));
1911}
1912
1913#include "opt_ddb.h"
1914#ifdef DDB
1915#include <sys/kernel.h>
1916
1917#include <ddb/ddb.h>
1918
1919DB_SHOW_COMMAND(page, vm_page_print_page_info)
1920{
12e4aaff
MD
1921 db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
1922 db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
1923 db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
1924 db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
1925 db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
1926 db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
1927 db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
1928 db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
1929 db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
1930 db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
984263bc
MD
1931}
1932
1933DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1934{
1935 int i;
1936 db_printf("PQ_FREE:");
1937 for(i=0;i<PQ_L2_SIZE;i++) {
1938 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1939 }
1940 db_printf("\n");
1941
1942 db_printf("PQ_CACHE:");
1943 for(i=0;i<PQ_L2_SIZE;i++) {
1944 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1945 }
1946 db_printf("\n");
1947
1948 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1949 vm_page_queues[PQ_ACTIVE].lcnt,
1950 vm_page_queues[PQ_INACTIVE].lcnt);
1951}
1952#endif /* DDB */