2 * Copyright (c) 2006 Peter Wemm
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * AMD64 machine dependent routines for kvm and minidumps.
30 #include <sys/user.h> /* MUST BE FIRST */
31 #include <sys/param.h>
35 #include <sys/fnv_hash.h>
44 #include <vm/vm_param.h>
46 #include <machine/elf.h>
47 #include <machine/cpufunc.h>
48 #include <machine/minidump.h>
53 #include "kvm_private.h"
63 /* minidump must be the first item! */
65 int minidump; /* 1 = minidump mode */
66 int pgtable; /* pagetable mode */
67 void *hpt_head[HPT_SIZE];
77 hpt_insert(kvm_t *kd, vm_paddr_t pa, int64_t off)
80 uint32_t fnv = FNV1_32_INIT;
82 fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
83 fnv &= (HPT_SIZE - 1);
84 hpte = malloc(sizeof(*hpte));
87 hpte->next = kd->vmst->hpt_head[fnv];
88 kd->vmst->hpt_head[fnv] = hpte;
92 hpt_find(kvm_t *kd, vm_paddr_t pa)
95 uint32_t fnv = FNV1_32_INIT;
97 fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
98 fnv &= (HPT_SIZE - 1);
99 for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) {
107 inithash(kvm_t *kd, uint64_t *base, int len, off_t off)
113 for (idx = 0; idx < len / sizeof(*base); idx++) {
117 bits &= ~(1ul << bit);
118 pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE;
119 hpt_insert(kd, pa, off);
127 _kvm_minidump_freevtop(kvm_t *kd)
129 struct vmstate *vm = kd->vmst;
139 static int _kvm_minidump_init_hdr1(kvm_t *kd, struct vmstate *vmst,
140 struct minidumphdr1 *hdr);
141 static int _kvm_minidump_init_hdr2(kvm_t *kd, struct vmstate *vmst,
142 struct minidumphdr2 *hdr);
145 _kvm_minidump_initvtop(kvm_t *kd)
147 struct vmstate *vmst;
150 struct minidumphdr1 hdr1;
151 struct minidumphdr2 hdr2;
154 vmst = _kvm_malloc(kd, sizeof(*vmst));
156 _kvm_err(kd, kd->program, "cannot allocate vm");
160 bzero(vmst, sizeof(*vmst));
163 if (pread(kd->pmfd, &u, sizeof(u), 0) != sizeof(u)) {
164 _kvm_err(kd, kd->program, "cannot read dump header");
167 if (strncmp(MINIDUMP1_MAGIC, u.hdr1.magic, sizeof(u.hdr1.magic)) == 0 &&
168 u.hdr1.version == MINIDUMP1_VERSION) {
169 error = _kvm_minidump_init_hdr1(kd, vmst, &u.hdr1);
171 if (strncmp(MINIDUMP2_MAGIC, u.hdr1.magic, sizeof(u.hdr1.magic)) == 0 &&
172 u.hdr2.version == MINIDUMP2_VERSION) {
173 error = _kvm_minidump_init_hdr2(kd, vmst, &u.hdr2);
175 _kvm_err(kd, kd->program, "not a minidump for this platform");
183 _kvm_minidump_init_hdr1(kvm_t *kd, struct vmstate *vmst,
184 struct minidumphdr1 *hdr)
188 /* Skip header and msgbuf */
189 off = PAGE_SIZE + round_page(hdr->msgbufsize);
191 vmst->bitmap = _kvm_malloc(kd, hdr->bitmapsize);
192 if (vmst->bitmap == NULL) {
193 _kvm_err(kd, kd->program,
194 "cannot allocate %jd bytes for bitmap",
195 (intmax_t)hdr->bitmapsize);
198 if (pread(kd->pmfd, vmst->bitmap, hdr->bitmapsize, off) !=
200 _kvm_err(kd, kd->program,
201 "cannot read %jd bytes for page bitmap",
202 (intmax_t)hdr->bitmapsize);
205 off += round_page(vmst->bitmapsize);
207 vmst->ptemap = _kvm_malloc(kd, hdr->ptesize);
208 if (vmst->ptemap == NULL) {
209 _kvm_err(kd, kd->program,
210 "cannot allocate %jd bytes for ptemap",
211 (intmax_t)hdr->ptesize);
214 if (pread(kd->pmfd, vmst->ptemap, hdr->ptesize, off) !=
216 _kvm_err(kd, kd->program,
217 "cannot read %jd bytes for ptemap",
218 (intmax_t)hdr->ptesize);
223 vmst->kernbase = hdr->kernbase;
224 vmst->dmapbase = hdr->dmapbase;
225 vmst->dmapend = hdr->dmapend;
226 vmst->bitmapsize = hdr->bitmapsize;
229 /* build physical address hash table for sparse pages */
230 inithash(kd, vmst->bitmap, hdr->bitmapsize, off);
237 _kvm_minidump_init_hdr2(kvm_t *kd, struct vmstate *vmst,
238 struct minidumphdr2 *hdr)
242 /* Skip header and msgbuf */
243 off = PAGE_SIZE + round_page(hdr->msgbufsize);
245 vmst->bitmap = _kvm_malloc(kd, hdr->bitmapsize);
246 if (vmst->bitmap == NULL) {
247 _kvm_err(kd, kd->program,
248 "cannot allocate %jd bytes for bitmap",
249 (intmax_t)hdr->bitmapsize);
252 if (pread(kd->pmfd, vmst->bitmap, hdr->bitmapsize, off) !=
253 (intmax_t)hdr->bitmapsize) {
254 _kvm_err(kd, kd->program,
255 "cannot read %jd bytes for page bitmap",
256 (intmax_t)hdr->bitmapsize);
259 off += round_page(hdr->bitmapsize);
261 vmst->ptemap = _kvm_malloc(kd, hdr->ptesize);
262 if (vmst->ptemap == NULL) {
263 _kvm_err(kd, kd->program,
264 "cannot allocate %jd bytes for ptemap",
265 (intmax_t)hdr->ptesize);
268 if (pread(kd->pmfd, vmst->ptemap, hdr->ptesize, off) !=
269 (intmax_t)hdr->ptesize) {
270 _kvm_err(kd, kd->program,
271 "cannot read %jd bytes for ptemap",
272 (intmax_t)hdr->ptesize);
277 vmst->kernbase = hdr->kernbase;
278 vmst->dmapbase = hdr->dmapbase;
279 vmst->bitmapsize = hdr->bitmapsize;
282 /* build physical address hash table for sparse pages */
283 inithash(kd, vmst->bitmap, hdr->bitmapsize, off);
289 _kvm_minidump_vatop(kvm_t *kd, u_long va, off_t *pa)
299 offset = va & (PAGE_SIZE - 1);
300 va -= offset; /* put va on page boundary */
302 if (va >= vm->kernbase) {
303 switch (vm->pgtable) {
306 * Page tables are specifically dumped (old style)
308 pteindex = (va - vm->kernbase) >> PAGE_SHIFT;
309 pte = vm->ptemap[pteindex];
310 if (((u_long)pte & X86_PG_V) == 0) {
311 _kvm_err(kd, kd->program,
312 "_kvm_vatop: pte not valid");
319 * Kernel page table pages are included in the
320 * sparse map. We only dump the contents of
321 * the PDs (zero-filling any empty entries).
323 * Index of PD entry in PDP & PDP in PML4E together.
325 * First shift by 30 (1GB) - gives us an index
326 * into PD entries. We do not PDP entries in the
327 * PML4E, so there are 512 * 512 PD entries possible.
329 pteindex = (va >> PDPSHIFT) & (512 * 512 - 1);
330 pte = vm->ptemap[pteindex];
331 if ((pte & X86_PG_V) == 0) {
332 _kvm_err(kd, kd->program,
333 "_kvm_vatop: pd not valid");
336 if (pte & X86_PG_PS) { /* 1GB pages */
337 a = (pte & PG_PS_FRAME) +
338 (va & (1024 * 1024 * 1024 - 1));
342 ofs = hpt_find(kd, pte & PG_FRAME);
344 _kvm_err(kd, kd->program,
345 "_kvm_vatop: no phys page for pd");
350 * Index of PT entry in PD
352 pteindex = (va >> PDRSHIFT) & 511;
353 if (pread(kd->pmfd, &pte, sizeof(pte),
354 ofs + pteindex * sizeof(pte)) != sizeof(pte)) {
355 _kvm_err(kd, kd->program,
356 "_kvm_vatop: pd lookup not valid");
359 if ((pte & X86_PG_V) == 0) {
360 _kvm_err(kd, kd->program,
361 "_kvm_vatop: pt not valid");
364 if (pte & X86_PG_PS) { /* 2MB pages */
365 a = (pte & PG_PS_FRAME) +
366 (va & (2048 * 1024 - 1));
369 ofs = hpt_find(kd, pte & PG_FRAME);
371 _kvm_err(kd, kd->program,
372 "_kvm_vatop: no phys page for pt");
377 * Index of pte entry in PT
379 pteindex = (va >> PAGE_SHIFT) & 511;
380 if (pread(kd->pmfd, &pte, sizeof(pte),
381 ofs + pteindex * sizeof(pte)) != sizeof(pte)) {
382 _kvm_err(kd, kd->program,
383 "_kvm_vatop: pte lookup not valid");
393 _kvm_err(kd, kd->program,
394 "_kvm_vatop: bad pgtable mode ");
397 ofs = hpt_find(kd, a);
399 _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a);
403 return (PAGE_SIZE - offset);
404 } else if (va >= vm->dmapbase && va < vm->dmapend) {
405 a = (va - vm->dmapbase) & ~PAGE_MASK;
406 ofs = hpt_find(kd, a);
408 _kvm_err(kd, kd->program, "_kvm_vatop: direct map address 0x%lx not in minidump", va);
412 return (PAGE_SIZE - offset);
414 _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va);
419 _kvm_err(kd, 0, "invalid address (0x%lx)", va);
424 _kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa)
426 if (kvm_ishost(kd)) {
427 _kvm_err(kd, 0, "kvm_vatop called in live kernel!");
431 return (_kvm_minidump_vatop(kd, va, pa));