more header changes for amd64 port; the pc64 building infrastructure
[dragonfly.git] / sys / platform / pc64 / amd64 / pmap.c
CommitLineData
d7f50089
YY
1/*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1991 Regents of the University of California.
4 * All rights reserved.
5 * Copyright (c) 1994 John S. Dyson
6 * All rights reserved.
7 * Copyright (c) 1994 David Greenman
8 * All rights reserved.
9 * Copyright (c) 2004-2006 Matthew Dillon
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in
20 * the documentation and/or other materials provided with the
21 * distribution.
22 * 3. Neither the name of The DragonFly Project nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific, prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
29 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
30 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
32 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
33 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
34 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
35 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
36 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
40 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
41 * $DragonFly: src/sys/platform/pc64/amd64/pmap.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
42 * $DragonFly: src/sys/platform/pc64/amd64/pmap.c,v 1.1 2007/09/23 04:29:31 yanyh Exp $
43 */
44/*
45 * NOTE: PMAP_INVAL_ADD: In pc32 this function is called prior to adjusting
46 * the PTE in the page table, because a cpu synchronization might be required.
47 * The actual invalidation is delayed until the following call or flush. In
48 * the VKERNEL build this function is called prior to adjusting the PTE and
49 * invalidates the table synchronously (not delayed), and is not SMP safe
50 * as a consequence.
51 */
52
53#include <sys/types.h>
54#include <sys/systm.h>
55#include <sys/kernel.h>
56#include <sys/stat.h>
57#include <sys/mman.h>
58#include <sys/proc.h>
59#include <sys/thread.h>
60#include <sys/user.h>
61#include <sys/vmspace.h>
62
63#include <vm/pmap.h>
64#include <vm/vm_page.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_kern.h>
67#include <vm/vm_object.h>
68#include <vm/vm_zone.h>
69#include <vm/vm_pageout.h>
70
71#include <machine/md_var.h>
72#include <machine/pcb.h>
73#include <machine/pmap_inval.h>
74#include <machine/globaldata.h>
75
76struct pmap kernel_pmap;
77
78void
79pmap_init(void)
80{
81}
82
83void
84pmap_init2(void)
85{
86}
87
88/*
89 * Bootstrap the kernel_pmap so it can be used with pmap_enter().
90 *
91 * NOTE! pm_pdir for the kernel pmap is offset so VA's translate
92 * directly into PTD indexes (PTA is also offset for the same reason).
93 * This is necessary because, for now, KVA is not mapped at address 0.
94 *
95 * Page table pages are not managed like they are in normal pmaps, so
96 * no pteobj is needed.
97 */
98void
99pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
100{
101}
102
103/*
104 * Initialize pmap0/vmspace0 . Since process 0 never enters user mode we
105 * just dummy it up so it works well enough for fork().
106 *
107 * In DragonFly, process pmaps may only be used to manipulate user address
108 * space, never kernel address space.
109 */
110void
111pmap_pinit0(struct pmap *pmap)
112{
113}
114
115/************************************************************************
116 * Procedures to manage whole physical maps *
117 ************************************************************************
118 *
119 * Initialize a preallocated and zeroed pmap structure,
120 * such as one in a vmspace structure.
121 */
122void
123pmap_pinit(struct pmap *pmap)
124{
125}
126
127/*
128 * Clean up a pmap structure so it can be physically freed
129 */
130void
131pmap_puninit(pmap_t pmap)
132{
133}
134
135
136/*
137 * Wire in kernel global address entries. To avoid a race condition
138 * between pmap initialization and pmap_growkernel, this procedure
139 * adds the pmap to the master list (which growkernel scans to update),
140 * then copies the template.
141 *
142 * In a virtual kernel there are no kernel global address entries.
143 */
144void
145pmap_pinit2(struct pmap *pmap)
146{
147}
148
149/*
150 * Release all resources held by the given physical map.
151 *
152 * Should only be called if the map contains no valid mappings.
153 */
154static int pmap_release_callback(struct vm_page *p, void *data);
155
156void
157pmap_release(struct pmap *pmap)
158{
159}
160
161static int
162pmap_release_callback(struct vm_page *p, void *data)
163{
164 return(0);
165}
166
167/*
168 * Retire the given physical map from service. Should only be called if
169 * the map contains no valid mappings.
170 */
171void
172pmap_destroy(pmap_t pmap)
173{
174}
175
176/*
177 * Add a reference to the specified pmap.
178 */
179void
180pmap_reference(pmap_t pmap)
181{
182}
183
184/************************************************************************
185 * VMSPACE MANAGEMENT *
186 ************************************************************************
187 *
188 * The VMSPACE management we do in our virtual kernel must be reflected
189 * in the real kernel. This is accomplished by making vmspace system
190 * calls to the real kernel.
191 */
192void
193cpu_vmspace_alloc(struct vmspace *vm)
194{
195}
196
197void
198cpu_vmspace_free(struct vmspace *vm)
199{
200}
201
202/************************************************************************
203 * Procedures which operate directly on the kernel PMAP *
204 ************************************************************************/
205
206/*
207 * This maps the requested page table and gives us access to it.
208 */
209static vpte_t *
210get_ptbase(struct pmap *pmap, vm_offset_t va)
211{
212 return NULL;
213}
214
215static vpte_t *
216get_ptbase1(struct pmap *pmap, vm_offset_t va)
217{
218 return NULL;
219}
220
221static vpte_t *
222get_ptbase2(struct pmap *pmap, vm_offset_t va)
223{
224 return NULL;
225}
226
227/*
228 * When removing a page directory the related VA range in the self-mapping
229 * of the page table must be invalidated.
230 */
231static void
232inval_ptbase_pagedir(pmap_t pmap, vm_pindex_t pindex)
233{
234}
235
236/*
237 * Enter a mapping into kernel_pmap. Mappings created in this fashion
238 * are not managed. Mappings must be immediately accessible on all cpus.
239 *
240 * Call pmap_inval_pte() to invalidate the virtual pte and clean out the
241 * real pmap and handle related races before storing the new vpte.
242 */
243void
244pmap_kenter(vm_offset_t va, vm_paddr_t pa)
245{
246}
247
248/*
249 * Synchronize a kvm mapping originally made for the private use on
250 * some other cpu so it can be used on all cpus.
251 *
252 * XXX add MADV_RESYNC to improve performance.
253 */
254void
255pmap_kenter_sync(vm_offset_t va)
256{
257}
258
259/*
260 * Synchronize a kvm mapping originally made for the private use on
261 * some other cpu so it can be used on our cpu. Turns out to be the
262 * same madvise() call, because we have to sync the real pmaps anyway.
263 *
264 * XXX add MADV_RESYNC to improve performance.
265 */
266void
267pmap_kenter_sync_quick(vm_offset_t va)
268{
269}
270
271#if 0
272/*
273 * Make a previously read-only kernel mapping R+W (not implemented by
274 * virtual kernels).
275 */
276void
277pmap_kmodify_rw(vm_offset_t va)
278{
279 *pmap_kpte(va) |= VPTE_R | VPTE_W;
280 madvise((void *)va, PAGE_SIZE, MADV_INVAL);
281}
282
283/*
284 * Make a kernel mapping non-cacheable (not applicable to virtual kernels)
285 */
286void
287pmap_kmodify_nc(vm_offset_t va)
288{
289 *pmap_kpte(va) |= VPTE_N;
290 madvise((void *)va, PAGE_SIZE, MADV_INVAL);
291}
292
293#endif
294
295/*
296 * Map a contiguous range of physical memory to a KVM
297 */
298vm_offset_t
299pmap_map(vm_offset_t virt, vm_paddr_t start, vm_paddr_t end, int prot)
300{
301 return (NULL);
302}
303
304/*
305 * Enter an unmanaged KVA mapping for the private use of the current
306 * cpu only. pmap_kenter_sync() may be called to make the mapping usable
307 * by other cpus.
308 *
309 * It is illegal for the mapping to be accessed by other cpus unleess
310 * pmap_kenter_sync*() is called.
311 */
312void
313pmap_kenter_quick(vm_offset_t va, vm_paddr_t pa)
314{
315}
316
317/*
318 * Make a temporary mapping for a physical address. This is only intended
319 * to be used for panic dumps.
320 */
321void *
322pmap_kenter_temporary(vm_paddr_t pa, int i)
323{
324 return (NULL);
325}
326
327/*
328 * Remove an unmanaged mapping created with pmap_kenter*().
329 */
330void
331pmap_kremove(vm_offset_t va)
332{
333}
334
335/*
336 * Remove an unmanaged mapping created with pmap_kenter*() but synchronize
337 * only with this cpu.
338 *
339 * Unfortunately because we optimize new entries by testing VPTE_V later
340 * on, we actually still have to synchronize with all the cpus. XXX maybe
341 * store a junk value and test against 0 in the other places instead?
342 */
343void
344pmap_kremove_quick(vm_offset_t va)
345{
346}
347
348/*
349 * Map a set of unmanaged VM pages into KVM.
350 */
351void
352pmap_qenter(vm_offset_t va, struct vm_page **m, int count)
353{
354}
355
356/*
357 * Map a set of VM pages to kernel virtual memory. If a mapping changes
358 * clear the supplied mask. The caller handles any SMP interactions.
359 * The mask is used to provide the caller with hints on what SMP interactions
360 * might be needed.
361 */
362void
363pmap_qenter2(vm_offset_t va, struct vm_page **m, int count, cpumask_t *mask)
364{
365}
366
367/*
368 * Undo the effects of pmap_qenter*().
369 */
370void
371pmap_qremove(vm_offset_t va, int count)
372{
373}
374
375/************************************************************************
376 * Misc support glue called by machine independant code *
377 ************************************************************************
378 *
379 * These routines are called by machine independant code to operate on
380 * certain machine-dependant aspects of processes, threads, and pmaps.
381 */
382
383/*
384 * Initialize MD portions of the thread structure.
385 */
386void
387pmap_init_thread(thread_t td)
388{
389}
390
391/*
392 * This routine directly affects the fork perf for a process.
393 */
394void
395pmap_init_proc(struct proc *p)
396{
397}
398
399/*
400 * Destroy the UPAGES for a process that has exited and disassociate
401 * the process from its thread.
402 */
403void
404pmap_dispose_proc(struct proc *p)
405{
406}
407
408/*
409 * We pre-allocate all page table pages for kernel virtual memory so
410 * this routine will only be called if KVM has been exhausted.
411 */
412void
413pmap_growkernel(vm_offset_t addr)
414{
415}
416
417/*
418 * The modification bit is not tracked for any pages in this range. XXX
419 * such pages in this maps should always use pmap_k*() functions and not
420 * be managed anyhow.
421 *
422 * XXX User and kernel address spaces are independant for virtual kernels,
423 * this function only applies to the kernel pmap.
424 */
425static int
426pmap_track_modified(pmap_t pmap, vm_offset_t va)
427{
428 return 0;
429}
430
431/************************************************************************
432 * Procedures supporting managed page table pages *
433 ************************************************************************
434 *
435 * These procedures are used to track managed page table pages. These pages
436 * use the page table page's vm_page_t to track PTEs in the page. The
437 * page table pages themselves are arranged in a VM object, pmap->pm_pteobj.
438 *
439 * This allows the system to throw away page table pages for user processes
440 * at will and reinstantiate them on demand.
441 */
442
443/*
444 * This routine works like vm_page_lookup() but also blocks as long as the
445 * page is busy. This routine does not busy the page it returns.
446 *
447 * Unless the caller is managing objects whos pages are in a known state,
448 * the call should be made with a critical section held so the page's object
449 * association remains valid on return.
450 */
451static vm_page_t
452pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
453{
454 return(NULL);
455}
456
457/*
458 * This routine unholds page table pages, and if the hold count
459 * drops to zero, then it decrements the wire count.
460 */
461static int
462_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
463{
464 return 0;
465}
466
467static __inline int
468pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
469{
470 return 0;
471}
472
473/*
474 * After removing a page table entry, this routine is used to
475 * conditionally free the page, and manage the hold/wire counts.
476 */
477static int
478pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
479{
480 return 0;
481}
482
483/*
484 * Attempt to release and free an vm_page in a pmap. Returns 1 on success,
485 * 0 on failure (if the procedure had to sleep).
486 */
487static int
488pmap_release_free_page(struct pmap *pmap, vm_page_t p)
489{
490 return 1;
491}
492
493/*
494 * This routine is called if the page table page is not mapped in the page
495 * table directory.
496 *
497 * The routine is broken up into two parts for readability.
498 */
499static vm_page_t
500_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
501{
502 return (NULL);
503}
504
505/*
506 * Determine the page table page required to access the VA in the pmap
507 * and allocate it if necessary. Return a held vm_page_t for the page.
508 *
509 * Only used with user pmaps.
510 */
511static vm_page_t
512pmap_allocpte(pmap_t pmap, vm_offset_t va)
513{
514 return NULL;
515}
516
517/************************************************************************
518 * Managed pages in pmaps *
519 ************************************************************************
520 *
521 * All pages entered into user pmaps and some pages entered into the kernel
522 * pmap are managed, meaning that pmap_protect() and other related management
523 * functions work on these pages.
524 */
525
526/*
527 * free the pv_entry back to the free list. This function may be
528 * called from an interrupt.
529 */
530static __inline void
531free_pv_entry(pv_entry_t pv)
532{
533}
534
535/*
536 * get a new pv_entry, allocating a block from the system
537 * when needed. This function may be called from an interrupt.
538 */
539static pv_entry_t
540get_pv_entry(void)
541{
542 return NULL;
543}
544
545/*
546 * This routine is very drastic, but can save the system
547 * in a pinch.
548 */
549void
550pmap_collect(void)
551{
552}
553
554/*
555 * If it is the first entry on the list, it is actually
556 * in the header and we must copy the following entry up
557 * to the header. Otherwise we must search the list for
558 * the entry. In either case we free the now unused entry.
559 */
560static int
561pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va)
562{
563 return 0;
564}
565
566/*
567 * Create a pv entry for page at pa for (pmap, va). If the page table page
568 * holding the VA is managed, mpte will be non-NULL.
569 */
570static void
571pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
572{
573}
574
575/*
576 * pmap_remove_pte: do the things to unmap a page in a process
577 */
578static int
579pmap_remove_pte(struct pmap *pmap, vpte_t *ptq, vm_offset_t va)
580{
581 return 0;
582}
583
584/*
585 * pmap_remove_page:
586 *
587 * Remove a single page from a process address space.
588 *
589 * This function may not be called from an interrupt if the pmap is
590 * not kernel_pmap.
591 */
592static void
593pmap_remove_page(struct pmap *pmap, vm_offset_t va)
594{
595}
596
597/*
598 * pmap_remove:
599 *
600 * Remove the given range of addresses from the specified map.
601 *
602 * It is assumed that the start and end are properly
603 * rounded to the page size.
604 *
605 * This function may not be called from an interrupt if the pmap is
606 * not kernel_pmap.
607 */
608void
609pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
610{
611}
612
613/*
614 * pmap_remove_all:
615 *
616 * Removes this physical page from all physical maps in which it resides.
617 * Reflects back modify bits to the pager.
618 *
619 * This routine may not be called from an interrupt.
620 */
621static void
622pmap_remove_all(vm_page_t m)
623{
624}
625
626/*
627 * pmap_protect:
628 *
629 * Set the physical protection on the specified range of this map
630 * as requested.
631 *
632 * This function may not be called from an interrupt if the map is
633 * not the kernel_pmap.
634 */
635void
636pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
637{
638}
639
640/*
641 * Enter a managed page into a pmap. If the page is not wired related pmap
642 * data can be destroyed at any time for later demand-operation.
643 *
644 * Insert the vm_page (m) at virtual address (v) in (pmap), with the
645 * specified protection, and wire the mapping if requested.
646 *
647 * NOTE: This routine may not lazy-evaluate or lose information. The
648 * page must actually be inserted into the given map NOW.
649 *
650 * NOTE: When entering a page at a KVA address, the pmap must be the
651 * kernel_pmap.
652 */
653void
654pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
655 boolean_t wired)
656{
657}
658
659/*
660 * This is a quick version of pmap_enter(). It is used only under the
661 * following conditions:
662 *
663 * (1) The pmap is not the kernel_pmap
664 * (2) The page is not to be wired into the map
665 * (3) The page is to mapped read-only in the pmap (initially that is)
666 * (4) The calling procedure is responsible for flushing the TLB
667 * (5) The page is always managed
668 * (6) There is no prior mapping at the VA
669 */
670
671static vm_page_t
672pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
673{
674 return NULL;
675}
676
677/*
678 * Extract the physical address for the translation at the specified
679 * virtual address in the pmap.
680 */
681vm_paddr_t
682pmap_extract(pmap_t pmap, vm_offset_t va)
683{
684 return(0);
685}
686
687/*
688 * This routine preloads the ptes for a given object into the specified pmap.
689 * This eliminates the blast of soft faults on process startup and
690 * immediately after an mmap.
691 */
692static int pmap_object_init_pt_callback(vm_page_t p, void *data);
693
694void
695pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_prot_t prot,
696 vm_object_t object, vm_pindex_t pindex,
697 vm_size_t size, int limit)
698{
699}
700
701static
702int
703pmap_object_init_pt_callback(vm_page_t p, void *data)
704{
705 return(0);
706}
707
708/*
709 * pmap_prefault provides a quick way of clustering pagefaults into a
710 * processes address space. It is a "cousin" of pmap_object_init_pt,
711 * except it runs at page fault time instead of mmap time.
712 */
713#define PFBAK 4
714#define PFFOR 4
715#define PAGEORDER_SIZE (PFBAK+PFFOR)
716
717static int pmap_prefault_pageorder[] = {
718 -PAGE_SIZE, PAGE_SIZE,
719 -2 * PAGE_SIZE, 2 * PAGE_SIZE,
720 -3 * PAGE_SIZE, 3 * PAGE_SIZE,
721 -4 * PAGE_SIZE, 4 * PAGE_SIZE
722};
723
724void
725pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
726{
727}
728
729/*
730 * Routine: pmap_change_wiring
731 * Function: Change the wiring attribute for a map/virtual-address
732 * pair.
733 * In/out conditions:
734 * The mapping must already exist in the pmap.
735 */
736void
737pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
738{
739}
740
741/*
742 * Copy the range specified by src_addr/len
743 * from the source map to the range dst_addr/len
744 * in the destination map.
745 *
746 * This routine is only advisory and need not do anything.
747 */
748void
749pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
750 vm_size_t len, vm_offset_t src_addr)
751{
752}
753
754/*
755 * pmap_zero_page:
756 *
757 * Zero the specified PA by mapping the page into KVM and clearing its
758 * contents.
759 *
760 * This function may be called from an interrupt and no locking is
761 * required.
762 */
763void
764pmap_zero_page(vm_paddr_t phys)
765{
766}
767
768/*
769 * pmap_page_assertzero:
770 *
771 * Assert that a page is empty, panic if it isn't.
772 */
773void
774pmap_page_assertzero(vm_paddr_t phys)
775{
776}
777
778/*
779 * pmap_zero_page:
780 *
781 * Zero part of a physical page by mapping it into memory and clearing
782 * its contents with bzero.
783 *
784 * off and size may not cover an area beyond a single hardware page.
785 */
786void
787pmap_zero_page_area(vm_paddr_t phys, int off, int size)
788{
789}
790
791/*
792 * pmap_copy_page:
793 *
794 * Copy the physical page from the source PA to the target PA.
795 * This function may be called from an interrupt. No locking
796 * is required.
797 */
798void
799pmap_copy_page(vm_paddr_t src, vm_paddr_t dst)
800{
801}
802
803/*
804 * pmap_copy_page_frag:
805 *
806 * Copy the physical page from the source PA to the target PA.
807 * This function may be called from an interrupt. No locking
808 * is required.
809 */
810void
811pmap_copy_page_frag(vm_paddr_t src, vm_paddr_t dst, size_t bytes)
812{
813}
814
815/*
816 * Returns true if the pmap's pv is one of the first
817 * 16 pvs linked to from this page. This count may
818 * be changed upwards or downwards in the future; it
819 * is only necessary that true be returned for a small
820 * subset of pmaps for proper page aging.
821 */
822boolean_t
823pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
824{
825 return (FALSE);
826}
827
828/*
829 * Remove all pages from specified address space
830 * this aids process exit speeds. Also, this code
831 * is special cased for current process only, but
832 * can have the more generic (and slightly slower)
833 * mode enabled. This is much faster than pmap_remove
834 * in the case of running down an entire address space.
835 */
836void
837pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
838{
839}
840
841/*
842 * pmap_testbit tests bits in active mappings of a VM page.
843 */
844static boolean_t
845pmap_testbit(vm_page_t m, int bit)
846{
847 return (FALSE);
848}
849
850/*
851 * This routine is used to clear bits in ptes. Certain bits require special
852 * handling, in particular (on virtual kernels) the VPTE_M (modify) bit.
853 *
854 * This routine is only called with certain VPTE_* bit combinations.
855 */
856static __inline void
857pmap_clearbit(vm_page_t m, int bit)
858{
859}
860
861/*
862 * pmap_page_protect:
863 *
864 * Lower the permission for all mappings to a given page.
865 */
866void
867pmap_page_protect(vm_page_t m, vm_prot_t prot)
868{
869}
870
871vm_paddr_t
872pmap_phys_address(int ppn)
873{
874 return NULL;
875}
876
877/*
878 * pmap_ts_referenced:
879 *
880 * Return a count of reference bits for a page, clearing those bits.
881 * It is not necessary for every reference bit to be cleared, but it
882 * is necessary that 0 only be returned when there are truly no
883 * reference bits set.
884 *
885 * XXX: The exact number of bits to check and clear is a matter that
886 * should be tested and standardized at some point in the future for
887 * optimal aging of shared pages.
888 */
889int
890pmap_ts_referenced(vm_page_t m)
891{
892 return (0);
893}
894
895/*
896 * pmap_is_modified:
897 *
898 * Return whether or not the specified physical page was modified
899 * in any physical maps.
900 */
901boolean_t
902pmap_is_modified(vm_page_t m)
903{
904 return NULL;
905}
906
907/*
908 * Clear the modify bits on the specified physical page.
909 */
910void
911pmap_clear_modify(vm_page_t m)
912{
913}
914
915/*
916 * pmap_clear_reference:
917 *
918 * Clear the reference bit on the specified physical page.
919 */
920void
921pmap_clear_reference(vm_page_t m)
922{
923}
924
925#if 0
926/*
927 * Miscellaneous support routines follow
928 */
929
930static void
931i386_protection_init(void)
932{
933 int *kp, prot;
934
935 kp = protection_codes;
936 for (prot = 0; prot < 8; prot++) {
937 if (prot & VM_PROT_READ)
938 *kp |= VPTE_R;
939 if (prot & VM_PROT_WRITE)
940 *kp |= VPTE_W;
941 if (prot & VM_PROT_EXECUTE)
942 *kp |= VPTE_X;
943 ++kp;
944 }
945}
946
947/*
948 * Map a set of physical memory pages into the kernel virtual
949 * address space. Return a pointer to where it is mapped. This
950 * routine is intended to be used for mapping device memory,
951 * NOT real memory.
952 *
953 * NOTE: we can't use pgeflag unless we invalidate the pages one at
954 * a time.
955 */
956void *
957pmap_mapdev(vm_paddr_t pa, vm_size_t size)
958{
959 vm_offset_t va, tmpva, offset;
960 vpte_t *pte;
961
962 offset = pa & PAGE_MASK;
963 size = roundup(offset + size, PAGE_SIZE);
964
965 va = kmem_alloc_nofault(&kernel_map, size);
966 if (!va)
967 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
968
969 pa = pa & VPTE_FRAME;
970 for (tmpva = va; size > 0;) {
971 pte = KernelPTA + (tmpva >> PAGE_SHIFT);
972 *pte = pa | VPTE_R | VPTE_W | VPTE_V; /* | pgeflag; */
973 size -= PAGE_SIZE;
974 tmpva += PAGE_SIZE;
975 pa += PAGE_SIZE;
976 }
977 cpu_invltlb();
978 smp_invltlb();
979
980 return ((void *)(va + offset));
981}
982
983void
984pmap_unmapdev(vm_offset_t va, vm_size_t size)
985{
986 vm_offset_t base, offset;
987
988 base = va & VPTE_FRAME;
989 offset = va & PAGE_MASK;
990 size = roundup(offset + size, PAGE_SIZE);
991 pmap_qremove(va, size >> PAGE_SHIFT);
992 kmem_free(&kernel_map, base, size);
993}
994
995#endif
996
997/*
998 * perform the pmap work for mincore
999 */
1000int
1001pmap_mincore(pmap_t pmap, vm_offset_t addr)
1002{
1003 return 0;
1004}
1005
1006void
1007pmap_replacevm(struct proc *p, struct vmspace *newvm, int adjrefs)
1008{
1009}
1010
1011void
1012pmap_setlwpvm(struct lwp *lp, struct vmspace *newvm)
1013{
1014}
1015
1016
1017vm_offset_t
1018pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
1019{
1020 return NULL;
1021}
1022