2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/amd64/include/Attic/pmap.h,v 1.2 2004/02/14 20:34:26 dillon Exp $
28 #ifndef _MACHINE_PMAP_H_
29 #define _MACHINE_PMAP_H_
32 * A four level page table is implemented by the amd64 hardware. Each
33 * page table represents 9 address bits and eats 4KB of space. There are
34 * 512 8-byte entries in each table. The last page table contains PTE's
35 * representing 4K pages (12 bits of address space).
37 * The page tables are named:
38 * PML4 Represents 512GB per entry (256TB total) LEVEL4
39 * PDP Represents 1GB per entry LEVEL3
40 * PDE Represents 2MB per entry LEVEL2
41 * PTE Represents 4KB per entry LEVEL1
43 * PG_PAE PAE 2MB extension. In the PDE. If 0 there is another level
44 * of page table and PG_D and PG_G are ignored. If 1 this is
45 * the terminating page table and PG_D and PG_G apply.
47 * PG_PWT Page write through. If 1 caching is disabled for data
48 * represented by the page.
49 * PG_PCD Page Cache Disable. If 1 the page table entry will not
50 * be cached in the data cache.
52 * Each entry in the PML4 table represents a 512GB VA space. We use a fixed
53 * PML4 and adjust entries within it to switch user spaces.
56 #define PG_V 0x0001LL /* P Present */
57 #define PG_RW 0x0002LL /* R/W Writable */
58 #define PG_U 0x0004LL /* U/S User */
59 #define PG_PWT 0x0008LL /* PWT Page Write Through */
60 #define PG_PCD 0x0010LL /* PCD Page Cache Disable */
61 #define PG_A 0x0020LL /* A Accessed */
62 #define PG_D 0x0040LL /* D Dirty (pte only) */
63 #define PG_PAE 0x0080LL /* PAT (pte only) */
64 #define PG_G 0x0100LL /* G Global (pte only) */
65 #define PG_USR0 0x0200LL /* available to os */
66 #define PG_USR1 0x0400LL /* available to os */
67 #define PG_USR2 0x0800LL /* available to os */
68 #define PG_PTE_PAT PG_PAE /* PAT bit for 4K pages */
69 #define PG_PDE_PAT 0x1000LL /* PAT bit for 2M pages */
70 #define PG_FRAME 0x000000FFFFFF0000LL /* 40 bit phys address */
71 #define PG_PHYSRESERVED 0x000FFF0000000000LL /* reserved for future PA */
72 #define PG_USR3 0x0010000000000000LL /* avilable to os */
77 #define PG_W PG_USR0 /* Wired */
78 #define PG_MANAGED PG_USR1 /* Managed */
79 #define PG_PROT (PG_RW|PG_U) /* all protection bits . */
80 #define PG_N (PG_PWT|PG_PCD) /* Non-cacheable */
83 * Page Protection Exception bits
86 #define PGEX_P 0x01 /* Protection violation vs. not present */
87 #define PGEX_W 0x02 /* during a Write cycle */
88 #define PGEX_U 0x04 /* access from User mode (UPL) */
91 * User space is limited to one PML4 entry (512GB). Kernel space is also
92 * limited to one PML4 entry. Other PML4 entries are used to map foreign
93 * user spaces into KVM. Typically each cpu in the system reserves two
94 * PML4 entries for private use.
96 #define UVA_MAXMEM (512LL*1024*1024*1024)
97 #define KVA_MAXMEM (512LL*1024*1024*1024)
103 #define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
106 #define NKPT 30 /* actual number of kernel page tables */
109 #define NKPDE (KVA_PAGES - 2) /* addressable number of page tables/pde's */
111 #if NKPDE > KVA_PAGES - 2
112 #error "Maximum NKPDE is KVA_PAGES - 2"
116 * The *PTDI values control the layout of virtual memory
118 * XXX This works for now, but I am not real happy with it, I'll fix it
119 * right after I fix locore.s and the magic 28K hole
121 * SMP_PRIVPAGES: The per-cpu address space is 0xff80000 -> 0xffbfffff
123 #define APTDPTDI (NPDEPG-1) /* alt ptd entry that points to APTD */
124 #define MPPTDI (APTDPTDI-1) /* per cpu ptd entry */
125 #define KPTDI (MPPTDI-NKPDE) /* start of kernel virtual pde's */
126 #define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
127 #define UMAXPTDI (PTDPTDI-1) /* ptd entry for user space end */
128 #define UMAXPTEOFF (NPTEPG) /* pte entry for user space end */
133 * XXX doesn't really belong here I guess...
135 #define ISA_HOLE_START 0xa0000
136 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
140 #include <sys/queue.h>
145 * Address of current and alternate address space page table maps
149 extern pt_entry_t PTmap[], APTmap[], Upte;
150 extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
152 extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
157 * virtual address to page table entry and
158 * to physical address. Likewise for alternate address space.
159 * Note: these work recursively, thus vtopte of a pte will give
160 * the corresponding pde that in turn maps it.
162 #define vtopte(va) (PTmap + i386_btop(va))
164 #define avtopte(va) (APTmap + i386_btop(va))
167 * Routine: pmap_kextract
169 * Extract the physical page address associated
170 * kernel virtual address.
172 static __inline vm_paddr_t
173 pmap_kextract(vm_offset_t va)
177 if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PS) {
178 pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
180 pa = *(vm_offset_t *)vtopte(va);
181 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
189 #define vtophys(va) pmap_kextract(((vm_offset_t)(va)))
190 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va))))
192 #define avtophys(va) (((vm_offset_t) (*avtopte(va))&PG_FRAME) | ((vm_offset_t)(va) & PAGE_MASK))
205 TAILQ_HEAD(,pv_entry) pv_list;
209 pd_entry_t *pm_pdir; /* KVA of page directory */
210 vm_object_t pm_pteobj; /* Container for pte's */
211 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
212 int pm_count; /* reference count */
213 cpumask_t pm_active; /* active on cpus */
214 struct pmap_statistics pm_stats; /* pmap statistics */
215 struct vm_page *pm_ptphint; /* pmap ptp hint */
218 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
220 typedef struct pmap *pmap_t;
223 extern pmap_t kernel_pmap;
227 * For each vm_page_t, there is a list of all currently valid virtual
228 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
230 typedef struct pv_entry {
231 pmap_t pv_pmap; /* pmap where mapping lies */
232 vm_offset_t pv_va; /* virtual address for mapping */
233 TAILQ_ENTRY(pv_entry) pv_list;
234 TAILQ_ENTRY(pv_entry) pv_plist;
235 vm_page_t pv_ptem; /* VM page for pte */
238 #define PV_ENTRY_NULL ((pv_entry_t) 0)
240 #define PV_CI 0x01 /* all entries must be cache inhibited */
241 #define PV_PTPAGE 0x02 /* entry maps a page table page */
246 #define PPRO_VMTRRphysBase0 0x200
247 #define PPRO_VMTRRphysMask0 0x201
249 u_int64_t base, mask;
251 extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
253 extern caddr_t CADDR1;
254 extern pt_entry_t *CMAP1;
255 extern vm_paddr_t avail_end;
256 extern vm_paddr_t avail_start;
257 extern vm_offset_t clean_eva;
258 extern vm_offset_t clean_sva;
259 extern vm_paddr_t phys_avail[];
260 extern char *ptvmmap; /* poor name! */
261 extern vm_offset_t virtual_avail;
262 extern vm_offset_t virtual_end;
264 void pmap_bootstrap ( vm_paddr_t, vm_paddr_t);
265 pmap_t pmap_kernel (void);
266 void *pmap_mapdev (vm_paddr_t, vm_size_t);
267 void pmap_unmapdev (vm_offset_t, vm_size_t);
268 unsigned *pmap_pte (pmap_t, vm_offset_t) __pure2;
269 vm_page_t pmap_use_pt (pmap_t, vm_offset_t);
271 void pmap_set_opt (void);
278 #endif /* !_MACHINE_PMAP_H_ */