procfs: Add vnode path to /proc/<pid>/map for vn-backed vm objects.
[dragonfly.git] / sys / vfs / procfs / procfs_map.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1993 Jan-Simon Pendry
3 * Copyright (c) 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)procfs_status.c 8.3 (Berkeley) 2/17/94
34 *
35 * $FreeBSD: src/sys/miscfs/procfs/procfs_map.c,v 1.24.2.1 2001/08/04 13:12:24 rwatson Exp $
36 * $DragonFly: src/sys/vfs/procfs/procfs_map.c,v 1.7 2007/02/19 01:14:24 corecode Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/vnode.h>
43#include <vfs/procfs/procfs.h>
44
45#include <vm/vm.h>
46#include <sys/lock.h>
47#include <vm/pmap.h>
48#include <vm/vm_map.h>
49#include <vm/vm_page.h>
50#include <vm/vm_object.h>
51
52
53#define MEBUFFERSIZE 256
54
55/*
56 * The map entries can *almost* be read with programs like cat. However,
57 * large maps need special programs to read. It is not easy to implement
58 * a program that can sense the required size of the buffer, and then
59 * subsequently do a read with the appropriate size. This operation cannot
60 * be atomic. The best that we can do is to allow the program to do a read
61 * with an arbitrarily large buffer, and return as much as we can. We can
62 * return an error code if the buffer is too small (EFBIG), then the program
63 * can try a bigger buffer.
64 */
65int
66procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
67 struct uio *uio)
68{
69 struct proc *p = lp->lwp_proc;
70 int len;
71 struct vnode *vp;
72 char *fullpath, *freepath;
73 int error;
74 vm_map_t map = &p->p_vmspace->vm_map;
75 pmap_t pmap = vmspace_pmap(p->p_vmspace);
76 vm_map_entry_t entry;
77 char mebuffer[MEBUFFERSIZE];
78
79 if (uio->uio_rw != UIO_READ)
80 return (EOPNOTSUPP);
81
82 if (uio->uio_offset != 0)
83 return (0);
84
85 error = 0;
86 vm_map_lock_read(map);
87 for (entry = map->header.next;
88 ((uio->uio_resid > 0) && (entry != &map->header));
89 entry = entry->next) {
90 vm_object_t obj, tobj, lobj;
91 int ref_count, shadow_count, flags;
92 vm_offset_t addr;
93 vm_offset_t ostart;
94 int resident, privateresident;
95 char *type;
96
97 if (entry->maptype != VM_MAPTYPE_NORMAL &&
98 entry->maptype != VM_MAPTYPE_VPAGETABLE) {
99 continue;
100 }
101
102 obj = entry->object.vm_object;
103 if (obj && (obj->shadow_count == 1))
104 privateresident = obj->resident_page_count;
105 else
106 privateresident = 0;
107
108 /*
109 * Use map->hint as a poor man's ripout detector.
110 */
111 map->hint = entry;
112 ostart = entry->start;
113
114 /*
115 * Count resident pages (XXX can be horrible on 64-bit)
116 */
117 resident = 0;
118 addr = entry->start;
119 while (addr < entry->end) {
120 if (pmap_extract( pmap, addr))
121 resident++;
122 addr += PAGE_SIZE;
123 }
124
125 for( lobj = tobj = obj; tobj; tobj = tobj->backing_object)
126 lobj = tobj;
127
128 freepath = NULL;
129 fullpath = "-";
130 if (lobj) {
131 switch(lobj->type) {
132 default:
133 case OBJT_DEFAULT:
134 type = "default";
135 vp = NULL;
136 break;
137 case OBJT_VNODE:
138 type = "vnode";
139 vp = lobj->handle;
140 vref(vp);
141 break;
142 case OBJT_SWAP:
143 type = "swap";
144 vp = NULL;
145 break;
146 case OBJT_DEVICE:
147 type = "device";
148 vp = NULL;
149 break;
150 }
151
152 flags = obj->flags;
153 ref_count = obj->ref_count;
154 shadow_count = obj->shadow_count;
155 if (vp != NULL) {
156 vn_fullpath(p, vp, &fullpath, &freepath, 0);
157 vrele(vp);
158 }
159 } else {
160 type = "none";
161 flags = 0;
162 ref_count = 0;
163 shadow_count = 0;
164 }
165
166 /*
167 * format:
168 * start, end, res, priv res, cow, access, type, (fullpath).
169 */
170 ksnprintf(mebuffer, sizeof(mebuffer),
171 "0x%lx 0x%lx %d %d %p %s%s%s %d %d 0x%x %s %s %s %s\n",
172 (u_long)entry->start, (u_long)entry->end,
173 resident, privateresident, obj,
174 (entry->protection & VM_PROT_READ)?"r":"-",
175 (entry->protection & VM_PROT_WRITE)?"w":"-",
176 (entry->protection & VM_PROT_EXECUTE)?"x":"-",
177 ref_count, shadow_count, flags,
178 (entry->eflags & MAP_ENTRY_COW)?"COW":"NCOW",
179 (entry->eflags & MAP_ENTRY_NEEDS_COPY)?"NC":"NNC",
180 type, fullpath);
181
182 if (freepath != NULL) {
183 kfree(freepath, M_TEMP);
184 freepath = NULL;
185 }
186
187 len = strlen(mebuffer);
188 if (len > uio->uio_resid) {
189 error = EFBIG;
190 break;
191 }
192
193 /*
194 * We cannot safely hold the map locked while accessing
195 * userspace as a VM fault might recurse the locked map.
196 */
197 vm_map_unlock_read(map);
198 error = uiomove(mebuffer, len, uio);
199 vm_map_lock_read(map);
200 if (error)
201 break;
202
203 /*
204 * We use map->hint as a poor man's ripout detector. If
205 * it does not match the entry we set it to prior to
206 * unlocking the map the entry MIGHT now be stale. In
207 * this case we do an expensive lookup to find our place
208 * in the iteration again.
209 */
210 if (map->hint != entry) {
211 vm_map_entry_t reentry;
212
213 vm_map_lookup_entry(map, ostart, &reentry);
214 entry = reentry;
215 }
216 }
217 vm_map_unlock_read(map);
218
219 return error;
220}
221
222int
223procfs_validmap(struct lwp *lp)
224{
225 return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0);
226}