kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / platform / pc32 / include / pmap.h
CommitLineData
a9295349
MD
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
42 *
43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
45 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $
a9295349
MD
46 */
47
48#ifndef _MACHINE_PMAP_H_
49#define _MACHINE_PMAP_H_
50
51#include <cpu/pmap.h>
52
53/*
54 * Size of Kernel address space. This is the number of page table pages
55 * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte.
56 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
57 */
58#ifndef KVA_PAGES
59#define KVA_PAGES 256
60#endif
61
62/*
c1692ddf 63 * PTE related macros
a9295349
MD
64 */
65#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
66
67#ifndef NKPT
c1692ddf 68#define NKPT 30 /* starting general kptds */
a9295349 69#endif
c1692ddf 70
a9295349 71#ifndef NKPDE
9388fcaa 72#define NKPDE (KVA_PAGES - 2) /* max general kptds */
a9295349 73#endif
9388fcaa
MD
74#if NKPDE > KVA_PAGES - 2
75#error "Maximum NKPDE is KVA_PAGES - 2"
a9295349
MD
76#endif
77
78/*
79 * The *PTDI values control the layout of virtual memory
80 *
5926987a 81 * NPEDEPG - number of pde's in the page directory (1024)
c1692ddf
MD
82 * NKPDE - max general kernel page table pages not including
83 * special PTDs. Typically KVA_PAGES minus the number
84 * of special PTDs.
85 *
86 * +---------------+ End of kernel memory
9388fcaa 87 * | APTDPTDI | alt page table map for cpu 0
c1692ddf
MD
88 * +---------------+
89 * | MPPTDI | globaldata array
90 * +---------------+
91 * | |
c1692ddf
MD
92 * | |
93 * | |
94 * | | general kernel page table pages
95 * | |
96 * | KPTDI[NKPDE] |
97 * +---------------+ Start of kernel memory
98 * | PTDPTDI | self-mapping of current pmap
99 * +---------------+
5926987a
MD
100 *
101 * This typically places PTDPTDI at the index corresponding to VM address
c1692ddf
MD
102 * (0xc0000000 - 4M) = bfc00000, and that is where PTmap[] is based for
103 * the self-mapped page table. PTD points to the self-mapped page
104 * directory itself and any indexes >= KPTDI will correspond to the
105 * common kernel page directory pages since all pmaps map the same ones.
106 *
9388fcaa
MD
107 * APTmap / APTDpde are now used by cpu 0 as its alternative page table
108 * mapping via gd_GDMAP1 and GD_GDADDR1. The remaining cpus allocate
109 * their own dynamically.
5926987a 110 *
c1692ddf
MD
111 * Even though the maps are per-cpu the PTD entries are stored in the
112 * individual pmaps and obviously not replicated so each process pmap
113 * essentially gets its own per-cpu cache (PxN) making for fairly efficient
114 * access.
5926987a
MD
115 *
116 * UMAXPTDI - highest inclusive ptd index for user space
a9295349
MD
117 */
118#define APTDPTDI (NPDEPG-1) /* alt ptd entry that points to APTD */
c1692ddf 119#define MPPTDI (APTDPTDI-1) /* globaldata array ptd entry */
9388fcaa 120#define KPTDI (MPPTDI-NKPDE) /* start of kernel virtual pde's */
a9295349
MD
121#define PTDPTDI (KPTDI-1) /* ptd entry that points to ptd! */
122#define UMAXPTDI (PTDPTDI-1) /* ptd entry for user space end */
a9295349
MD
123
124/*
125 * XXX doesn't really belong here I guess...
126 */
127#define ISA_HOLE_START 0xa0000
128#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
129
130#ifndef LOCORE
131
132#ifndef _SYS_TYPES_H_
133#include <sys/types.h>
134#endif
135#ifndef _SYS_QUEUE_H_
136#include <sys/queue.h>
137#endif
b12defdc
MD
138#ifndef _SYS_SPINLOCK_H_
139#include <sys/spinlock.h>
140#endif
141#ifndef _SYS_THREAD_H_
142#include <sys/thread.h>
143#endif
a9295349
MD
144#ifndef _MACHINE_TYPES_H_
145#include <machine/types.h>
146#endif
147#ifndef _MACHINE_PARAM_H_
148#include <machine/param.h>
149#endif
150
151/*
152 * Address of current and alternate address space page table maps
153 * and directories.
154 */
155#ifdef _KERNEL
156extern pt_entry_t PTmap[], APTmap[], Upte;
157extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
158
159extern pd_entry_t IdlePTD; /* physical address of "Idle" state directory */
160#endif
161
162#ifdef _KERNEL
163/*
164 * virtual address to page table entry and
165 * to physical address. Likewise for alternate address space.
166 * Note: these work recursively, thus vtopte of a pte will give
167 * the corresponding pde that in turn maps it.
168 */
169#define vtopte(va) (PTmap + i386_btop(va))
170
171#define avtopte(va) (APTmap + i386_btop(va))
172
173/*
174 * Routine: pmap_kextract
175 * Function:
176 * Extract the physical page address associated
177 * kernel virtual address.
178 */
179static __inline vm_paddr_t
180pmap_kextract(vm_offset_t va)
181{
182 vm_paddr_t pa;
183
184 if ((pa = (vm_offset_t) PTD[va >> PDRSHIFT]) & PG_PS) {
185 pa = (pa & ~(NBPDR - 1)) | (va & (NBPDR - 1));
186 } else {
187 pa = *(vm_offset_t *)vtopte(va);
188 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
189 }
190 return pa;
191}
192
193/*
194 * XXX
195 */
196#define vtophys(va) pmap_kextract(((vm_offset_t)(va)))
197#define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va))))
198
a9295349
MD
199#endif
200
201/*
202 * Pmap stuff
203 */
204struct pv_entry;
205struct vm_page;
206struct vm_object;
c2fb025d 207struct vmspace;
a9295349
MD
208
209struct md_page {
210 int pv_list_count;
211 TAILQ_HEAD(,pv_entry) pv_list;
212};
213
921c891e
MD
214struct md_object {
215};
216
a9295349
MD
217/*
218 * Each machine dependent implementation is expected to
219 * keep certain statistics. They may do this anyway they
220 * so choose, but are expected to return the statistics
221 * in the following structure.
3d33541c
MD
222 *
223 * NOTE: We try to match the size of the pc32 pmap with the vkernel pmap
224 * so the same utilities (like 'ps') can be used on both.
a9295349
MD
225 */
226struct pmap_statistics {
227 long resident_count; /* # of pages mapped (total) */
228 long wired_count; /* # of pages wired */
229};
230typedef struct pmap_statistics *pmap_statistics_t;
231
232struct pmap {
233 pd_entry_t *pm_pdir; /* KVA of page directory */
c3834cb2 234 struct vm_page *pm_pdirm; /* VM page for pg directory */
a9295349
MD
235 struct vm_object *pm_pteobj; /* Container for pte's */
236 TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */
237 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
b12defdc 238 TAILQ_HEAD(,pv_entry) pm_pvlist_free; /* free mappings */
a9295349
MD
239 int pm_count; /* reference count */
240 cpumask_t pm_active; /* active on cpus */
be3aecf7 241 cpumask_t pm_cached; /* cached on cpus */
8790d7d8 242 int pm_filler02; /* (filler sync w/vkernel) */
a9295349
MD
243 struct pmap_statistics pm_stats; /* pmap statistics */
244 struct vm_page *pm_ptphint; /* pmap ptp hint */
8790d7d8 245 int pm_generation; /* detect pvlist deletions */
b12defdc
MD
246 struct spinlock pm_spin;
247 struct lwkt_token pm_token;
a9295349
MD
248};
249
250#define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count
251
da23a592
MD
252#define CPUMASK_LOCK CPUMASK(SMP_MAXCPU)
253#define CPUMASK_BIT SMP_MAXCPU /* 1 << SMP_MAXCPU */
c2fb025d 254
a9295349
MD
255typedef struct pmap *pmap_t;
256
257#ifdef _KERNEL
fbbaeba3 258extern struct pmap kernel_pmap;
a9295349
MD
259#endif
260
261/*
262 * For each vm_page_t, there is a list of all currently valid virtual
263 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
264 */
265typedef struct pv_entry {
266 pmap_t pv_pmap; /* pmap where mapping lies */
267 vm_offset_t pv_va; /* virtual address for mapping */
268 TAILQ_ENTRY(pv_entry) pv_list;
269 TAILQ_ENTRY(pv_entry) pv_plist;
270 struct vm_page *pv_ptem; /* VM page for pte */
5926987a
MD
271#ifdef PMAP_DEBUG
272 struct vm_page *pv_m;
273#else
274 void *pv_dummy; /* align structure to 32 bytes */
275#endif
a9295349
MD
276} *pv_entry_t;
277
278#ifdef _KERNEL
279
280#define NPPROVMTRR 8
281#define PPRO_VMTRRphysBase0 0x200
282#define PPRO_VMTRRphysMask0 0x201
283struct ppro_vmtrr {
284 u_int64_t base, mask;
285};
286extern struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
287
288extern caddr_t CADDR1;
289extern pt_entry_t *CMAP1;
b24cd69c 290extern vm_paddr_t dump_avail[];
a9295349
MD
291extern vm_paddr_t avail_end;
292extern vm_paddr_t avail_start;
293extern vm_offset_t clean_eva;
294extern vm_offset_t clean_sva;
a9295349 295extern char *ptvmmap; /* poor name! */
a9295349 296
b12defdc 297void pmap_release(struct pmap *pmap);
c2fb025d
MD
298void pmap_interlock_wait (struct vmspace *);
299void pmap_bootstrap (vm_paddr_t, vm_paddr_t);
a9295349 300void *pmap_mapdev (vm_paddr_t, vm_size_t);
cd905cd5 301void *pmap_mapdev_uncacheable (vm_paddr_t, vm_size_t);
a9295349 302void pmap_unmapdev (vm_offset_t, vm_size_t);
4107b0c0 303unsigned *pmap_kernel_pte (vm_offset_t) __pure2;
a9295349 304struct vm_page *pmap_use_pt (pmap_t, vm_offset_t);
8b5fcfe2 305int pmap_get_pgeflag(void);
a9295349 306void pmap_set_opt (void);
a9295349
MD
307
308#endif /* _KERNEL */
309
310#endif /* !LOCORE */
311
312#endif /* !_MACHINE_PMAP_H_ */