| 1 | /* |
| 2 | * Copyright (c) 1994,1997 John S. Dyson |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice immediately at the beginning of the file, without modification, |
| 10 | * this list of conditions, and the following disclaimer. |
| 11 | * 2. Absolutely no warranty of function or purpose is made by the author |
| 12 | * John S. Dyson. |
| 13 | * |
| 14 | * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * this file contains a new buffer I/O scheme implementing a coherent |
| 19 | * VM object and buffer cache scheme. Pains have been taken to make |
| 20 | * sure that the performance degradation associated with schemes such |
| 21 | * as this is not realized. |
| 22 | * |
| 23 | * Author: John S. Dyson |
| 24 | * Significant help during the development and debugging phases |
| 25 | * had been provided by David Greenman, also of the FreeBSD core team. |
| 26 | * |
| 27 | * see man buf(9) for more info. |
| 28 | */ |
| 29 | |
| 30 | #include <sys/param.h> |
| 31 | #include <sys/systm.h> |
| 32 | #include <sys/buf.h> |
| 33 | #include <sys/conf.h> |
| 34 | #include <sys/devicestat.h> |
| 35 | #include <sys/eventhandler.h> |
| 36 | #include <sys/lock.h> |
| 37 | #include <sys/malloc.h> |
| 38 | #include <sys/mount.h> |
| 39 | #include <sys/kernel.h> |
| 40 | #include <sys/kthread.h> |
| 41 | #include <sys/proc.h> |
| 42 | #include <sys/reboot.h> |
| 43 | #include <sys/resourcevar.h> |
| 44 | #include <sys/sysctl.h> |
| 45 | #include <sys/vmmeter.h> |
| 46 | #include <sys/vnode.h> |
| 47 | #include <sys/dsched.h> |
| 48 | #include <vm/vm.h> |
| 49 | #include <vm/vm_param.h> |
| 50 | #include <vm/vm_kern.h> |
| 51 | #include <vm/vm_pageout.h> |
| 52 | #include <vm/vm_page.h> |
| 53 | #include <vm/vm_object.h> |
| 54 | #include <vm/vm_extern.h> |
| 55 | #include <vm/vm_map.h> |
| 56 | #include <vm/vm_pager.h> |
| 57 | #include <vm/swap_pager.h> |
| 58 | |
| 59 | #include <sys/buf2.h> |
| 60 | #include <sys/thread2.h> |
| 61 | #include <sys/spinlock2.h> |
| 62 | #include <sys/mplock2.h> |
| 63 | #include <vm/vm_page2.h> |
| 64 | |
| 65 | #include "opt_ddb.h" |
| 66 | #ifdef DDB |
| 67 | #include <ddb/ddb.h> |
| 68 | #endif |
| 69 | |
| 70 | /* |
| 71 | * Buffer queues. |
| 72 | */ |
| 73 | enum bufq_type { |
| 74 | BQUEUE_NONE, /* not on any queue */ |
| 75 | BQUEUE_LOCKED, /* locked buffers */ |
| 76 | BQUEUE_CLEAN, /* non-B_DELWRI buffers */ |
| 77 | BQUEUE_DIRTY, /* B_DELWRI buffers */ |
| 78 | BQUEUE_DIRTY_HW, /* B_DELWRI buffers - heavy weight */ |
| 79 | BQUEUE_EMPTYKVA, /* empty buffer headers with KVA assignment */ |
| 80 | BQUEUE_EMPTY, /* empty buffer headers */ |
| 81 | |
| 82 | BUFFER_QUEUES /* number of buffer queues */ |
| 83 | }; |
| 84 | |
| 85 | typedef enum bufq_type bufq_type_t; |
| 86 | |
| 87 | #define BD_WAKE_SIZE 16384 |
| 88 | #define BD_WAKE_MASK (BD_WAKE_SIZE - 1) |
| 89 | |
| 90 | TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; |
| 91 | static struct spinlock bufqspin = SPINLOCK_INITIALIZER(&bufqspin); |
| 92 | static struct spinlock bufcspin = SPINLOCK_INITIALIZER(&bufcspin); |
| 93 | |
| 94 | static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); |
| 95 | |
| 96 | struct buf *buf; /* buffer header pool */ |
| 97 | |
| 98 | static void vfs_clean_pages(struct buf *bp); |
| 99 | static void vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m); |
| 100 | #if 0 |
| 101 | static void vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m); |
| 102 | #endif |
| 103 | static void vfs_vmio_release(struct buf *bp); |
| 104 | static int flushbufqueues(struct buf *marker, bufq_type_t q); |
| 105 | static vm_page_t bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit); |
| 106 | |
| 107 | static void bd_signal(int totalspace); |
| 108 | static void buf_daemon(void); |
| 109 | static void buf_daemon_hw(void); |
| 110 | |
| 111 | /* |
| 112 | * bogus page -- for I/O to/from partially complete buffers |
| 113 | * this is a temporary solution to the problem, but it is not |
| 114 | * really that bad. it would be better to split the buffer |
| 115 | * for input in the case of buffers partially already in memory, |
| 116 | * but the code is intricate enough already. |
| 117 | */ |
| 118 | vm_page_t bogus_page; |
| 119 | |
| 120 | /* |
| 121 | * These are all static, but make the ones we export globals so we do |
| 122 | * not need to use compiler magic. |
| 123 | */ |
| 124 | long bufspace; /* locked by buffer_map */ |
| 125 | long maxbufspace; |
| 126 | static long bufmallocspace; /* atomic ops */ |
| 127 | long maxbufmallocspace, lobufspace, hibufspace; |
| 128 | static int bufreusecnt, bufdefragcnt, buffreekvacnt; |
| 129 | static long lorunningspace; |
| 130 | static long hirunningspace; |
| 131 | static int runningbufreq; /* locked by bufcspin */ |
| 132 | static long dirtybufspace; /* locked by bufcspin */ |
| 133 | static int dirtybufcount; /* locked by bufcspin */ |
| 134 | static long dirtybufspacehw; /* locked by bufcspin */ |
| 135 | static int dirtybufcounthw; /* locked by bufcspin */ |
| 136 | static long runningbufspace; /* locked by bufcspin */ |
| 137 | static int runningbufcount; /* locked by bufcspin */ |
| 138 | long lodirtybufspace; |
| 139 | long hidirtybufspace; |
| 140 | static int getnewbufcalls; |
| 141 | static int getnewbufrestarts; |
| 142 | static int recoverbufcalls; |
| 143 | static int needsbuffer; /* locked by bufcspin */ |
| 144 | static int bd_request; /* locked by bufcspin */ |
| 145 | static int bd_request_hw; /* locked by bufcspin */ |
| 146 | static u_int bd_wake_ary[BD_WAKE_SIZE]; |
| 147 | static u_int bd_wake_index; |
| 148 | static u_int vm_cycle_point = 40; /* 23-36 will migrate more act->inact */ |
| 149 | static int debug_commit; |
| 150 | |
| 151 | static struct thread *bufdaemon_td; |
| 152 | static struct thread *bufdaemonhw_td; |
| 153 | static u_int lowmempgallocs; |
| 154 | static u_int lowmempgfails; |
| 155 | |
| 156 | /* |
| 157 | * Sysctls for operational control of the buffer cache. |
| 158 | */ |
| 159 | SYSCTL_LONG(_vfs, OID_AUTO, lodirtybufspace, CTLFLAG_RW, &lodirtybufspace, 0, |
| 160 | "Number of dirty buffers to flush before bufdaemon becomes inactive"); |
| 161 | SYSCTL_LONG(_vfs, OID_AUTO, hidirtybufspace, CTLFLAG_RW, &hidirtybufspace, 0, |
| 162 | "High watermark used to trigger explicit flushing of dirty buffers"); |
| 163 | SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, |
| 164 | "Minimum amount of buffer space required for active I/O"); |
| 165 | SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, |
| 166 | "Maximum amount of buffer space to usable for active I/O"); |
| 167 | SYSCTL_UINT(_vfs, OID_AUTO, lowmempgallocs, CTLFLAG_RW, &lowmempgallocs, 0, |
| 168 | "Page allocations done during periods of very low free memory"); |
| 169 | SYSCTL_UINT(_vfs, OID_AUTO, lowmempgfails, CTLFLAG_RW, &lowmempgfails, 0, |
| 170 | "Page allocations which failed during periods of very low free memory"); |
| 171 | SYSCTL_UINT(_vfs, OID_AUTO, vm_cycle_point, CTLFLAG_RW, &vm_cycle_point, 0, |
| 172 | "Recycle pages to active or inactive queue transition pt 0-64"); |
| 173 | /* |
| 174 | * Sysctls determining current state of the buffer cache. |
| 175 | */ |
| 176 | SYSCTL_INT(_vfs, OID_AUTO, nbuf, CTLFLAG_RD, &nbuf, 0, |
| 177 | "Total number of buffers in buffer cache"); |
| 178 | SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspace, CTLFLAG_RD, &dirtybufspace, 0, |
| 179 | "Pending bytes of dirty buffers (all)"); |
| 180 | SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspacehw, CTLFLAG_RD, &dirtybufspacehw, 0, |
| 181 | "Pending bytes of dirty buffers (heavy weight)"); |
| 182 | SYSCTL_INT(_vfs, OID_AUTO, dirtybufcount, CTLFLAG_RD, &dirtybufcount, 0, |
| 183 | "Pending number of dirty buffers"); |
| 184 | SYSCTL_INT(_vfs, OID_AUTO, dirtybufcounthw, CTLFLAG_RD, &dirtybufcounthw, 0, |
| 185 | "Pending number of dirty buffers (heavy weight)"); |
| 186 | SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, |
| 187 | "I/O bytes currently in progress due to asynchronous writes"); |
| 188 | SYSCTL_INT(_vfs, OID_AUTO, runningbufcount, CTLFLAG_RD, &runningbufcount, 0, |
| 189 | "I/O buffers currently in progress due to asynchronous writes"); |
| 190 | SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, |
| 191 | "Hard limit on maximum amount of memory usable for buffer space"); |
| 192 | SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, |
| 193 | "Soft limit on maximum amount of memory usable for buffer space"); |
| 194 | SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, |
| 195 | "Minimum amount of memory to reserve for system buffer space"); |
| 196 | SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, |
| 197 | "Amount of memory available for buffers"); |
| 198 | SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace, |
| 199 | 0, "Maximum amount of memory reserved for buffers using malloc"); |
| 200 | SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, |
| 201 | "Amount of memory left for buffers using malloc-scheme"); |
| 202 | SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, |
| 203 | "New buffer header acquisition requests"); |
| 204 | SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD, &getnewbufrestarts, |
| 205 | 0, "New buffer header acquisition restarts"); |
| 206 | SYSCTL_INT(_vfs, OID_AUTO, recoverbufcalls, CTLFLAG_RD, &recoverbufcalls, 0, |
| 207 | "Recover VM space in an emergency"); |
| 208 | SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RD, &bufdefragcnt, 0, |
| 209 | "Buffer acquisition restarts due to fragmented buffer map"); |
| 210 | SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RD, &buffreekvacnt, 0, |
| 211 | "Amount of time KVA space was deallocated in an arbitrary buffer"); |
| 212 | SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RD, &bufreusecnt, 0, |
| 213 | "Amount of time buffer re-use operations were successful"); |
| 214 | SYSCTL_INT(_vfs, OID_AUTO, debug_commit, CTLFLAG_RW, &debug_commit, 0, ""); |
| 215 | SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf), |
| 216 | "sizeof(struct buf)"); |
| 217 | |
| 218 | char *buf_wmesg = BUF_WMESG; |
| 219 | |
| 220 | #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ |
| 221 | #define VFS_BIO_NEED_UNUSED02 0x02 |
| 222 | #define VFS_BIO_NEED_UNUSED04 0x04 |
| 223 | #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ |
| 224 | |
| 225 | /* |
| 226 | * bufspacewakeup: |
| 227 | * |
| 228 | * Called when buffer space is potentially available for recovery. |
| 229 | * getnewbuf() will block on this flag when it is unable to free |
| 230 | * sufficient buffer space. Buffer space becomes recoverable when |
| 231 | * bp's get placed back in the queues. |
| 232 | */ |
| 233 | static __inline void |
| 234 | bufspacewakeup(void) |
| 235 | { |
| 236 | /* |
| 237 | * If someone is waiting for BUF space, wake them up. Even |
| 238 | * though we haven't freed the kva space yet, the waiting |
| 239 | * process will be able to now. |
| 240 | */ |
| 241 | spin_lock(&bufcspin); |
| 242 | if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { |
| 243 | needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; |
| 244 | spin_unlock(&bufcspin); |
| 245 | wakeup(&needsbuffer); |
| 246 | } else { |
| 247 | spin_unlock(&bufcspin); |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * runningbufwakeup: |
| 253 | * |
| 254 | * Accounting for I/O in progress. |
| 255 | * |
| 256 | */ |
| 257 | static __inline void |
| 258 | runningbufwakeup(struct buf *bp) |
| 259 | { |
| 260 | long totalspace; |
| 261 | long limit; |
| 262 | |
| 263 | if ((totalspace = bp->b_runningbufspace) != 0) { |
| 264 | spin_lock(&bufcspin); |
| 265 | runningbufspace -= totalspace; |
| 266 | --runningbufcount; |
| 267 | bp->b_runningbufspace = 0; |
| 268 | |
| 269 | /* |
| 270 | * see waitrunningbufspace() for limit test. |
| 271 | */ |
| 272 | limit = hirunningspace * 3 / 6; |
| 273 | if (runningbufreq && runningbufspace <= limit) { |
| 274 | runningbufreq = 0; |
| 275 | spin_unlock(&bufcspin); |
| 276 | wakeup(&runningbufreq); |
| 277 | } else { |
| 278 | spin_unlock(&bufcspin); |
| 279 | } |
| 280 | bd_signal(totalspace); |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | * bufcountwakeup: |
| 286 | * |
| 287 | * Called when a buffer has been added to one of the free queues to |
| 288 | * account for the buffer and to wakeup anyone waiting for free buffers. |
| 289 | * This typically occurs when large amounts of metadata are being handled |
| 290 | * by the buffer cache ( else buffer space runs out first, usually ). |
| 291 | * |
| 292 | * MPSAFE |
| 293 | */ |
| 294 | static __inline void |
| 295 | bufcountwakeup(void) |
| 296 | { |
| 297 | spin_lock(&bufcspin); |
| 298 | if (needsbuffer) { |
| 299 | needsbuffer &= ~VFS_BIO_NEED_ANY; |
| 300 | spin_unlock(&bufcspin); |
| 301 | wakeup(&needsbuffer); |
| 302 | } else { |
| 303 | spin_unlock(&bufcspin); |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * waitrunningbufspace() |
| 309 | * |
| 310 | * If runningbufspace exceeds 4/6 hirunningspace we block until |
| 311 | * runningbufspace drops to 3/6 hirunningspace. We also block if another |
| 312 | * thread blocked here in order to be fair, even if runningbufspace |
| 313 | * is now lower than the limit. |
| 314 | * |
| 315 | * The caller may be using this function to block in a tight loop, we |
| 316 | * must block while runningbufspace is greater than at least |
| 317 | * hirunningspace * 3 / 6. |
| 318 | */ |
| 319 | void |
| 320 | waitrunningbufspace(void) |
| 321 | { |
| 322 | long limit = hirunningspace * 4 / 6; |
| 323 | |
| 324 | if (runningbufspace > limit || runningbufreq) { |
| 325 | spin_lock(&bufcspin); |
| 326 | while (runningbufspace > limit || runningbufreq) { |
| 327 | runningbufreq = 1; |
| 328 | ssleep(&runningbufreq, &bufcspin, 0, "wdrn1", 0); |
| 329 | } |
| 330 | spin_unlock(&bufcspin); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | /* |
| 335 | * buf_dirty_count_severe: |
| 336 | * |
| 337 | * Return true if we have too many dirty buffers. |
| 338 | */ |
| 339 | int |
| 340 | buf_dirty_count_severe(void) |
| 341 | { |
| 342 | return (runningbufspace + dirtybufspace >= hidirtybufspace || |
| 343 | dirtybufcount >= nbuf / 2); |
| 344 | } |
| 345 | |
| 346 | /* |
| 347 | * Return true if the amount of running I/O is severe and BIOQ should |
| 348 | * start bursting. |
| 349 | */ |
| 350 | int |
| 351 | buf_runningbufspace_severe(void) |
| 352 | { |
| 353 | return (runningbufspace >= hirunningspace * 4 / 6); |
| 354 | } |
| 355 | |
| 356 | /* |
| 357 | * vfs_buf_test_cache: |
| 358 | * |
| 359 | * Called when a buffer is extended. This function clears the B_CACHE |
| 360 | * bit if the newly extended portion of the buffer does not contain |
| 361 | * valid data. |
| 362 | * |
| 363 | * NOTE! Dirty VM pages are not processed into dirty (B_DELWRI) buffer |
| 364 | * cache buffers. The VM pages remain dirty, as someone had mmap()'d |
| 365 | * them while a clean buffer was present. |
| 366 | */ |
| 367 | static __inline__ |
| 368 | void |
| 369 | vfs_buf_test_cache(struct buf *bp, |
| 370 | vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, |
| 371 | vm_page_t m) |
| 372 | { |
| 373 | if (bp->b_flags & B_CACHE) { |
| 374 | int base = (foff + off) & PAGE_MASK; |
| 375 | if (vm_page_is_valid(m, base, size) == 0) |
| 376 | bp->b_flags &= ~B_CACHE; |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | /* |
| 381 | * bd_speedup() |
| 382 | * |
| 383 | * Spank the buf_daemon[_hw] if the total dirty buffer space exceeds the |
| 384 | * low water mark. |
| 385 | * |
| 386 | * MPSAFE |
| 387 | */ |
| 388 | static __inline__ |
| 389 | void |
| 390 | bd_speedup(void) |
| 391 | { |
| 392 | if (dirtybufspace < lodirtybufspace && dirtybufcount < nbuf / 2) |
| 393 | return; |
| 394 | |
| 395 | if (bd_request == 0 && |
| 396 | (dirtybufspace - dirtybufspacehw > lodirtybufspace / 2 || |
| 397 | dirtybufcount - dirtybufcounthw >= nbuf / 2)) { |
| 398 | spin_lock(&bufcspin); |
| 399 | bd_request = 1; |
| 400 | spin_unlock(&bufcspin); |
| 401 | wakeup(&bd_request); |
| 402 | } |
| 403 | if (bd_request_hw == 0 && |
| 404 | (dirtybufspacehw > lodirtybufspace / 2 || |
| 405 | dirtybufcounthw >= nbuf / 2)) { |
| 406 | spin_lock(&bufcspin); |
| 407 | bd_request_hw = 1; |
| 408 | spin_unlock(&bufcspin); |
| 409 | wakeup(&bd_request_hw); |
| 410 | } |
| 411 | } |
| 412 | |
| 413 | /* |
| 414 | * bd_heatup() |
| 415 | * |
| 416 | * Get the buf_daemon heated up when the number of running and dirty |
| 417 | * buffers exceeds the mid-point. |
| 418 | * |
| 419 | * Return the total number of dirty bytes past the second mid point |
| 420 | * as a measure of how much excess dirty data there is in the system. |
| 421 | * |
| 422 | * MPSAFE |
| 423 | */ |
| 424 | int |
| 425 | bd_heatup(void) |
| 426 | { |
| 427 | long mid1; |
| 428 | long mid2; |
| 429 | long totalspace; |
| 430 | |
| 431 | mid1 = lodirtybufspace + (hidirtybufspace - lodirtybufspace) / 2; |
| 432 | |
| 433 | totalspace = runningbufspace + dirtybufspace; |
| 434 | if (totalspace >= mid1 || dirtybufcount >= nbuf / 2) { |
| 435 | bd_speedup(); |
| 436 | mid2 = mid1 + (hidirtybufspace - mid1) / 2; |
| 437 | if (totalspace >= mid2) |
| 438 | return(totalspace - mid2); |
| 439 | } |
| 440 | return(0); |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * bd_wait() |
| 445 | * |
| 446 | * Wait for the buffer cache to flush (totalspace) bytes worth of |
| 447 | * buffers, then return. |
| 448 | * |
| 449 | * Regardless this function blocks while the number of dirty buffers |
| 450 | * exceeds hidirtybufspace. |
| 451 | * |
| 452 | * MPSAFE |
| 453 | */ |
| 454 | void |
| 455 | bd_wait(int totalspace) |
| 456 | { |
| 457 | u_int i; |
| 458 | int count; |
| 459 | |
| 460 | if (curthread == bufdaemonhw_td || curthread == bufdaemon_td) |
| 461 | return; |
| 462 | |
| 463 | while (totalspace > 0) { |
| 464 | bd_heatup(); |
| 465 | if (totalspace > runningbufspace + dirtybufspace) |
| 466 | totalspace = runningbufspace + dirtybufspace; |
| 467 | count = totalspace / BKVASIZE; |
| 468 | if (count >= BD_WAKE_SIZE) |
| 469 | count = BD_WAKE_SIZE - 1; |
| 470 | |
| 471 | spin_lock(&bufcspin); |
| 472 | i = (bd_wake_index + count) & BD_WAKE_MASK; |
| 473 | ++bd_wake_ary[i]; |
| 474 | |
| 475 | /* |
| 476 | * This is not a strict interlock, so we play a bit loose |
| 477 | * with locking access to dirtybufspace* |
| 478 | */ |
| 479 | tsleep_interlock(&bd_wake_ary[i], 0); |
| 480 | spin_unlock(&bufcspin); |
| 481 | tsleep(&bd_wake_ary[i], PINTERLOCKED, "flstik", hz); |
| 482 | |
| 483 | totalspace = runningbufspace + dirtybufspace - hidirtybufspace; |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | /* |
| 488 | * bd_signal() |
| 489 | * |
| 490 | * This function is called whenever runningbufspace or dirtybufspace |
| 491 | * is reduced. Track threads waiting for run+dirty buffer I/O |
| 492 | * complete. |
| 493 | * |
| 494 | * MPSAFE |
| 495 | */ |
| 496 | static void |
| 497 | bd_signal(int totalspace) |
| 498 | { |
| 499 | u_int i; |
| 500 | |
| 501 | if (totalspace > 0) { |
| 502 | if (totalspace > BKVASIZE * BD_WAKE_SIZE) |
| 503 | totalspace = BKVASIZE * BD_WAKE_SIZE; |
| 504 | spin_lock(&bufcspin); |
| 505 | while (totalspace > 0) { |
| 506 | i = bd_wake_index++; |
| 507 | i &= BD_WAKE_MASK; |
| 508 | if (bd_wake_ary[i]) { |
| 509 | bd_wake_ary[i] = 0; |
| 510 | spin_unlock(&bufcspin); |
| 511 | wakeup(&bd_wake_ary[i]); |
| 512 | spin_lock(&bufcspin); |
| 513 | } |
| 514 | totalspace -= BKVASIZE; |
| 515 | } |
| 516 | spin_unlock(&bufcspin); |
| 517 | } |
| 518 | } |
| 519 | |
| 520 | /* |
| 521 | * BIO tracking support routines. |
| 522 | * |
| 523 | * Release a ref on a bio_track. Wakeup requests are atomically released |
| 524 | * along with the last reference so bk_active will never wind up set to |
| 525 | * only 0x80000000. |
| 526 | * |
| 527 | * MPSAFE |
| 528 | */ |
| 529 | static |
| 530 | void |
| 531 | bio_track_rel(struct bio_track *track) |
| 532 | { |
| 533 | int active; |
| 534 | int desired; |
| 535 | |
| 536 | /* |
| 537 | * Shortcut |
| 538 | */ |
| 539 | active = track->bk_active; |
| 540 | if (active == 1 && atomic_cmpset_int(&track->bk_active, 1, 0)) |
| 541 | return; |
| 542 | |
| 543 | /* |
| 544 | * Full-on. Note that the wait flag is only atomically released on |
| 545 | * the 1->0 count transition. |
| 546 | * |
| 547 | * We check for a negative count transition using bit 30 since bit 31 |
| 548 | * has a different meaning. |
| 549 | */ |
| 550 | for (;;) { |
| 551 | desired = (active & 0x7FFFFFFF) - 1; |
| 552 | if (desired) |
| 553 | desired |= active & 0x80000000; |
| 554 | if (atomic_cmpset_int(&track->bk_active, active, desired)) { |
| 555 | if (desired & 0x40000000) |
| 556 | panic("bio_track_rel: bad count: %p\n", track); |
| 557 | if (active & 0x80000000) |
| 558 | wakeup(track); |
| 559 | break; |
| 560 | } |
| 561 | active = track->bk_active; |
| 562 | } |
| 563 | } |
| 564 | |
| 565 | /* |
| 566 | * Wait for the tracking count to reach 0. |
| 567 | * |
| 568 | * Use atomic ops such that the wait flag is only set atomically when |
| 569 | * bk_active is non-zero. |
| 570 | * |
| 571 | * MPSAFE |
| 572 | */ |
| 573 | int |
| 574 | bio_track_wait(struct bio_track *track, int slp_flags, int slp_timo) |
| 575 | { |
| 576 | int active; |
| 577 | int desired; |
| 578 | int error; |
| 579 | |
| 580 | /* |
| 581 | * Shortcut |
| 582 | */ |
| 583 | if (track->bk_active == 0) |
| 584 | return(0); |
| 585 | |
| 586 | /* |
| 587 | * Full-on. Note that the wait flag may only be atomically set if |
| 588 | * the active count is non-zero. |
| 589 | * |
| 590 | * NOTE: We cannot optimize active == desired since a wakeup could |
| 591 | * clear active prior to our tsleep_interlock(). |
| 592 | */ |
| 593 | error = 0; |
| 594 | while ((active = track->bk_active) != 0) { |
| 595 | cpu_ccfence(); |
| 596 | desired = active | 0x80000000; |
| 597 | tsleep_interlock(track, slp_flags); |
| 598 | if (atomic_cmpset_int(&track->bk_active, active, desired)) { |
| 599 | error = tsleep(track, slp_flags | PINTERLOCKED, |
| 600 | "trwait", slp_timo); |
| 601 | if (error) |
| 602 | break; |
| 603 | } |
| 604 | } |
| 605 | return (error); |
| 606 | } |
| 607 | |
| 608 | /* |
| 609 | * bufinit: |
| 610 | * |
| 611 | * Load time initialisation of the buffer cache, called from machine |
| 612 | * dependant initialization code. |
| 613 | */ |
| 614 | void |
| 615 | bufinit(void) |
| 616 | { |
| 617 | struct buf *bp; |
| 618 | vm_offset_t bogus_offset; |
| 619 | int i; |
| 620 | |
| 621 | /* next, make a null set of free lists */ |
| 622 | for (i = 0; i < BUFFER_QUEUES; i++) |
| 623 | TAILQ_INIT(&bufqueues[i]); |
| 624 | |
| 625 | /* finally, initialize each buffer header and stick on empty q */ |
| 626 | for (i = 0; i < nbuf; i++) { |
| 627 | bp = &buf[i]; |
| 628 | bzero(bp, sizeof *bp); |
| 629 | bp->b_flags = B_INVAL; /* we're just an empty header */ |
| 630 | bp->b_cmd = BUF_CMD_DONE; |
| 631 | bp->b_qindex = BQUEUE_EMPTY; |
| 632 | initbufbio(bp); |
| 633 | xio_init(&bp->b_xio); |
| 634 | buf_dep_init(bp); |
| 635 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_EMPTY], bp, b_freelist); |
| 636 | } |
| 637 | |
| 638 | /* |
| 639 | * maxbufspace is the absolute maximum amount of buffer space we are |
| 640 | * allowed to reserve in KVM and in real terms. The absolute maximum |
| 641 | * is nominally used by buf_daemon. hibufspace is the nominal maximum |
| 642 | * used by most other processes. The differential is required to |
| 643 | * ensure that buf_daemon is able to run when other processes might |
| 644 | * be blocked waiting for buffer space. |
| 645 | * |
| 646 | * maxbufspace is based on BKVASIZE. Allocating buffers larger then |
| 647 | * this may result in KVM fragmentation which is not handled optimally |
| 648 | * by the system. |
| 649 | */ |
| 650 | maxbufspace = (long)nbuf * BKVASIZE; |
| 651 | hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); |
| 652 | lobufspace = hibufspace - MAXBSIZE; |
| 653 | |
| 654 | lorunningspace = 512 * 1024; |
| 655 | /* hirunningspace -- see below */ |
| 656 | |
| 657 | /* |
| 658 | * Limit the amount of malloc memory since it is wired permanently |
| 659 | * into the kernel space. Even though this is accounted for in |
| 660 | * the buffer allocation, we don't want the malloced region to grow |
| 661 | * uncontrolled. The malloc scheme improves memory utilization |
| 662 | * significantly on average (small) directories. |
| 663 | */ |
| 664 | maxbufmallocspace = hibufspace / 20; |
| 665 | |
| 666 | /* |
| 667 | * Reduce the chance of a deadlock occuring by limiting the number |
| 668 | * of delayed-write dirty buffers we allow to stack up. |
| 669 | * |
| 670 | * We don't want too much actually queued to the device at once |
| 671 | * (XXX this needs to be per-mount!), because the buffers will |
| 672 | * wind up locked for a very long period of time while the I/O |
| 673 | * drains. |
| 674 | */ |
| 675 | hidirtybufspace = hibufspace / 2; /* dirty + running */ |
| 676 | hirunningspace = hibufspace / 16; /* locked & queued to device */ |
| 677 | if (hirunningspace < 1024 * 1024) |
| 678 | hirunningspace = 1024 * 1024; |
| 679 | |
| 680 | dirtybufspace = 0; |
| 681 | dirtybufspacehw = 0; |
| 682 | |
| 683 | lodirtybufspace = hidirtybufspace / 2; |
| 684 | |
| 685 | /* |
| 686 | * Maximum number of async ops initiated per buf_daemon loop. This is |
| 687 | * somewhat of a hack at the moment, we really need to limit ourselves |
| 688 | * based on the number of bytes of I/O in-transit that were initiated |
| 689 | * from buf_daemon. |
| 690 | */ |
| 691 | |
| 692 | bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE); |
| 693 | vm_object_hold(&kernel_object); |
| 694 | bogus_page = vm_page_alloc(&kernel_object, |
| 695 | (bogus_offset >> PAGE_SHIFT), |
| 696 | VM_ALLOC_NORMAL); |
| 697 | vm_object_drop(&kernel_object); |
| 698 | vmstats.v_wire_count++; |
| 699 | |
| 700 | } |
| 701 | |
| 702 | /* |
| 703 | * Initialize the embedded bio structures, typically used by |
| 704 | * deprecated code which tries to allocate its own struct bufs. |
| 705 | */ |
| 706 | void |
| 707 | initbufbio(struct buf *bp) |
| 708 | { |
| 709 | bp->b_bio1.bio_buf = bp; |
| 710 | bp->b_bio1.bio_prev = NULL; |
| 711 | bp->b_bio1.bio_offset = NOOFFSET; |
| 712 | bp->b_bio1.bio_next = &bp->b_bio2; |
| 713 | bp->b_bio1.bio_done = NULL; |
| 714 | bp->b_bio1.bio_flags = 0; |
| 715 | |
| 716 | bp->b_bio2.bio_buf = bp; |
| 717 | bp->b_bio2.bio_prev = &bp->b_bio1; |
| 718 | bp->b_bio2.bio_offset = NOOFFSET; |
| 719 | bp->b_bio2.bio_next = NULL; |
| 720 | bp->b_bio2.bio_done = NULL; |
| 721 | bp->b_bio2.bio_flags = 0; |
| 722 | |
| 723 | BUF_LOCKINIT(bp); |
| 724 | } |
| 725 | |
| 726 | /* |
| 727 | * Reinitialize the embedded bio structures as well as any additional |
| 728 | * translation cache layers. |
| 729 | */ |
| 730 | void |
| 731 | reinitbufbio(struct buf *bp) |
| 732 | { |
| 733 | struct bio *bio; |
| 734 | |
| 735 | for (bio = &bp->b_bio1; bio; bio = bio->bio_next) { |
| 736 | bio->bio_done = NULL; |
| 737 | bio->bio_offset = NOOFFSET; |
| 738 | } |
| 739 | } |
| 740 | |
| 741 | /* |
| 742 | * Undo the effects of an initbufbio(). |
| 743 | */ |
| 744 | void |
| 745 | uninitbufbio(struct buf *bp) |
| 746 | { |
| 747 | dsched_exit_buf(bp); |
| 748 | BUF_LOCKFREE(bp); |
| 749 | } |
| 750 | |
| 751 | /* |
| 752 | * Push another BIO layer onto an existing BIO and return it. The new |
| 753 | * BIO layer may already exist, holding cached translation data. |
| 754 | */ |
| 755 | struct bio * |
| 756 | push_bio(struct bio *bio) |
| 757 | { |
| 758 | struct bio *nbio; |
| 759 | |
| 760 | if ((nbio = bio->bio_next) == NULL) { |
| 761 | int index = bio - &bio->bio_buf->b_bio_array[0]; |
| 762 | if (index >= NBUF_BIO - 1) { |
| 763 | panic("push_bio: too many layers bp %p\n", |
| 764 | bio->bio_buf); |
| 765 | } |
| 766 | nbio = &bio->bio_buf->b_bio_array[index + 1]; |
| 767 | bio->bio_next = nbio; |
| 768 | nbio->bio_prev = bio; |
| 769 | nbio->bio_buf = bio->bio_buf; |
| 770 | nbio->bio_offset = NOOFFSET; |
| 771 | nbio->bio_done = NULL; |
| 772 | nbio->bio_next = NULL; |
| 773 | } |
| 774 | KKASSERT(nbio->bio_done == NULL); |
| 775 | return(nbio); |
| 776 | } |
| 777 | |
| 778 | /* |
| 779 | * Pop a BIO translation layer, returning the previous layer. The |
| 780 | * must have been previously pushed. |
| 781 | */ |
| 782 | struct bio * |
| 783 | pop_bio(struct bio *bio) |
| 784 | { |
| 785 | return(bio->bio_prev); |
| 786 | } |
| 787 | |
| 788 | void |
| 789 | clearbiocache(struct bio *bio) |
| 790 | { |
| 791 | while (bio) { |
| 792 | bio->bio_offset = NOOFFSET; |
| 793 | bio = bio->bio_next; |
| 794 | } |
| 795 | } |
| 796 | |
| 797 | /* |
| 798 | * bfreekva: |
| 799 | * |
| 800 | * Free the KVA allocation for buffer 'bp'. |
| 801 | * |
| 802 | * Must be called from a critical section as this is the only locking for |
| 803 | * buffer_map. |
| 804 | * |
| 805 | * Since this call frees up buffer space, we call bufspacewakeup(). |
| 806 | * |
| 807 | * MPALMOSTSAFE |
| 808 | */ |
| 809 | static void |
| 810 | bfreekva(struct buf *bp) |
| 811 | { |
| 812 | int count; |
| 813 | |
| 814 | if (bp->b_kvasize) { |
| 815 | ++buffreekvacnt; |
| 816 | count = vm_map_entry_reserve(MAP_RESERVE_COUNT); |
| 817 | vm_map_lock(&buffer_map); |
| 818 | bufspace -= bp->b_kvasize; |
| 819 | vm_map_delete(&buffer_map, |
| 820 | (vm_offset_t) bp->b_kvabase, |
| 821 | (vm_offset_t) bp->b_kvabase + bp->b_kvasize, |
| 822 | &count |
| 823 | ); |
| 824 | vm_map_unlock(&buffer_map); |
| 825 | vm_map_entry_release(count); |
| 826 | bp->b_kvasize = 0; |
| 827 | bp->b_kvabase = NULL; |
| 828 | bufspacewakeup(); |
| 829 | } |
| 830 | } |
| 831 | |
| 832 | /* |
| 833 | * bremfree: |
| 834 | * |
| 835 | * Remove the buffer from the appropriate free list. |
| 836 | */ |
| 837 | static __inline void |
| 838 | _bremfree(struct buf *bp) |
| 839 | { |
| 840 | if (bp->b_qindex != BQUEUE_NONE) { |
| 841 | KASSERT(BUF_REFCNTNB(bp) == 1, |
| 842 | ("bremfree: bp %p not locked",bp)); |
| 843 | TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); |
| 844 | bp->b_qindex = BQUEUE_NONE; |
| 845 | } else { |
| 846 | if (BUF_REFCNTNB(bp) <= 1) |
| 847 | panic("bremfree: removing a buffer not on a queue"); |
| 848 | } |
| 849 | } |
| 850 | |
| 851 | void |
| 852 | bremfree(struct buf *bp) |
| 853 | { |
| 854 | spin_lock(&bufqspin); |
| 855 | _bremfree(bp); |
| 856 | spin_unlock(&bufqspin); |
| 857 | } |
| 858 | |
| 859 | static void |
| 860 | bremfree_locked(struct buf *bp) |
| 861 | { |
| 862 | _bremfree(bp); |
| 863 | } |
| 864 | |
| 865 | /* |
| 866 | * This version of bread issues any required I/O asyncnronously and |
| 867 | * makes a callback on completion. |
| 868 | * |
| 869 | * The callback must check whether BIO_DONE is set in the bio and issue |
| 870 | * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing |
| 871 | * BIO_DONE and disposing of the I/O (bqrelse()ing it). |
| 872 | */ |
| 873 | void |
| 874 | breadcb(struct vnode *vp, off_t loffset, int size, |
| 875 | void (*func)(struct bio *), void *arg) |
| 876 | { |
| 877 | struct buf *bp; |
| 878 | |
| 879 | bp = getblk(vp, loffset, size, 0, 0); |
| 880 | |
| 881 | /* if not found in cache, do some I/O */ |
| 882 | if ((bp->b_flags & B_CACHE) == 0) { |
| 883 | bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL); |
| 884 | bp->b_cmd = BUF_CMD_READ; |
| 885 | bp->b_bio1.bio_done = func; |
| 886 | bp->b_bio1.bio_caller_info1.ptr = arg; |
| 887 | vfs_busy_pages(vp, bp); |
| 888 | BUF_KERNPROC(bp); |
| 889 | vn_strategy(vp, &bp->b_bio1); |
| 890 | } else if (func) { |
| 891 | /* |
| 892 | * Since we are issuing the callback synchronously it cannot |
| 893 | * race the BIO_DONE, so no need for atomic ops here. |
| 894 | */ |
| 895 | /*bp->b_bio1.bio_done = func;*/ |
| 896 | bp->b_bio1.bio_caller_info1.ptr = arg; |
| 897 | bp->b_bio1.bio_flags |= BIO_DONE; |
| 898 | func(&bp->b_bio1); |
| 899 | } else { |
| 900 | bqrelse(bp); |
| 901 | } |
| 902 | } |
| 903 | |
| 904 | /* |
| 905 | * breadnx() - Terminal function for bread() and breadn(). |
| 906 | * |
| 907 | * This function will start asynchronous I/O on read-ahead blocks as well |
| 908 | * as satisfy the primary request. |
| 909 | * |
| 910 | * We must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE is |
| 911 | * set, the buffer is valid and we do not have to do anything. |
| 912 | */ |
| 913 | int |
| 914 | breadnx(struct vnode *vp, off_t loffset, int size, off_t *raoffset, |
| 915 | int *rabsize, int cnt, struct buf **bpp) |
| 916 | { |
| 917 | struct buf *bp, *rabp; |
| 918 | int i; |
| 919 | int rv = 0, readwait = 0; |
| 920 | |
| 921 | if (*bpp) |
| 922 | bp = *bpp; |
| 923 | else |
| 924 | *bpp = bp = getblk(vp, loffset, size, 0, 0); |
| 925 | |
| 926 | /* if not found in cache, do some I/O */ |
| 927 | if ((bp->b_flags & B_CACHE) == 0) { |
| 928 | bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL); |
| 929 | bp->b_cmd = BUF_CMD_READ; |
| 930 | bp->b_bio1.bio_done = biodone_sync; |
| 931 | bp->b_bio1.bio_flags |= BIO_SYNC; |
| 932 | vfs_busy_pages(vp, bp); |
| 933 | vn_strategy(vp, &bp->b_bio1); |
| 934 | ++readwait; |
| 935 | } |
| 936 | |
| 937 | for (i = 0; i < cnt; i++, raoffset++, rabsize++) { |
| 938 | if (inmem(vp, *raoffset)) |
| 939 | continue; |
| 940 | rabp = getblk(vp, *raoffset, *rabsize, 0, 0); |
| 941 | |
| 942 | if ((rabp->b_flags & B_CACHE) == 0) { |
| 943 | rabp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL); |
| 944 | rabp->b_cmd = BUF_CMD_READ; |
| 945 | vfs_busy_pages(vp, rabp); |
| 946 | BUF_KERNPROC(rabp); |
| 947 | vn_strategy(vp, &rabp->b_bio1); |
| 948 | } else { |
| 949 | brelse(rabp); |
| 950 | } |
| 951 | } |
| 952 | if (readwait) |
| 953 | rv = biowait(&bp->b_bio1, "biord"); |
| 954 | return (rv); |
| 955 | } |
| 956 | |
| 957 | /* |
| 958 | * bwrite: |
| 959 | * |
| 960 | * Synchronous write, waits for completion. |
| 961 | * |
| 962 | * Write, release buffer on completion. (Done by iodone |
| 963 | * if async). Do not bother writing anything if the buffer |
| 964 | * is invalid. |
| 965 | * |
| 966 | * Note that we set B_CACHE here, indicating that buffer is |
| 967 | * fully valid and thus cacheable. This is true even of NFS |
| 968 | * now so we set it generally. This could be set either here |
| 969 | * or in biodone() since the I/O is synchronous. We put it |
| 970 | * here. |
| 971 | */ |
| 972 | int |
| 973 | bwrite(struct buf *bp) |
| 974 | { |
| 975 | int error; |
| 976 | |
| 977 | if (bp->b_flags & B_INVAL) { |
| 978 | brelse(bp); |
| 979 | return (0); |
| 980 | } |
| 981 | if (BUF_REFCNTNB(bp) == 0) |
| 982 | panic("bwrite: buffer is not busy???"); |
| 983 | |
| 984 | /* Mark the buffer clean */ |
| 985 | bundirty(bp); |
| 986 | |
| 987 | bp->b_flags &= ~(B_ERROR | B_EINTR); |
| 988 | bp->b_flags |= B_CACHE; |
| 989 | bp->b_cmd = BUF_CMD_WRITE; |
| 990 | bp->b_bio1.bio_done = biodone_sync; |
| 991 | bp->b_bio1.bio_flags |= BIO_SYNC; |
| 992 | vfs_busy_pages(bp->b_vp, bp); |
| 993 | |
| 994 | /* |
| 995 | * Normal bwrites pipeline writes. NOTE: b_bufsize is only |
| 996 | * valid for vnode-backed buffers. |
| 997 | */ |
| 998 | bsetrunningbufspace(bp, bp->b_bufsize); |
| 999 | vn_strategy(bp->b_vp, &bp->b_bio1); |
| 1000 | error = biowait(&bp->b_bio1, "biows"); |
| 1001 | brelse(bp); |
| 1002 | |
| 1003 | return (error); |
| 1004 | } |
| 1005 | |
| 1006 | /* |
| 1007 | * bawrite: |
| 1008 | * |
| 1009 | * Asynchronous write. Start output on a buffer, but do not wait for |
| 1010 | * it to complete. The buffer is released when the output completes. |
| 1011 | * |
| 1012 | * bwrite() ( or the VOP routine anyway ) is responsible for handling |
| 1013 | * B_INVAL buffers. Not us. |
| 1014 | */ |
| 1015 | void |
| 1016 | bawrite(struct buf *bp) |
| 1017 | { |
| 1018 | if (bp->b_flags & B_INVAL) { |
| 1019 | brelse(bp); |
| 1020 | return; |
| 1021 | } |
| 1022 | if (BUF_REFCNTNB(bp) == 0) |
| 1023 | panic("bwrite: buffer is not busy???"); |
| 1024 | |
| 1025 | /* Mark the buffer clean */ |
| 1026 | bundirty(bp); |
| 1027 | |
| 1028 | bp->b_flags &= ~(B_ERROR | B_EINTR); |
| 1029 | bp->b_flags |= B_CACHE; |
| 1030 | bp->b_cmd = BUF_CMD_WRITE; |
| 1031 | KKASSERT(bp->b_bio1.bio_done == NULL); |
| 1032 | vfs_busy_pages(bp->b_vp, bp); |
| 1033 | |
| 1034 | /* |
| 1035 | * Normal bwrites pipeline writes. NOTE: b_bufsize is only |
| 1036 | * valid for vnode-backed buffers. |
| 1037 | */ |
| 1038 | bsetrunningbufspace(bp, bp->b_bufsize); |
| 1039 | BUF_KERNPROC(bp); |
| 1040 | vn_strategy(bp->b_vp, &bp->b_bio1); |
| 1041 | } |
| 1042 | |
| 1043 | /* |
| 1044 | * bowrite: |
| 1045 | * |
| 1046 | * Ordered write. Start output on a buffer, and flag it so that the |
| 1047 | * device will write it in the order it was queued. The buffer is |
| 1048 | * released when the output completes. bwrite() ( or the VOP routine |
| 1049 | * anyway ) is responsible for handling B_INVAL buffers. |
| 1050 | */ |
| 1051 | int |
| 1052 | bowrite(struct buf *bp) |
| 1053 | { |
| 1054 | bp->b_flags |= B_ORDERED; |
| 1055 | bawrite(bp); |
| 1056 | return (0); |
| 1057 | } |
| 1058 | |
| 1059 | /* |
| 1060 | * bdwrite: |
| 1061 | * |
| 1062 | * Delayed write. (Buffer is marked dirty). Do not bother writing |
| 1063 | * anything if the buffer is marked invalid. |
| 1064 | * |
| 1065 | * Note that since the buffer must be completely valid, we can safely |
| 1066 | * set B_CACHE. In fact, we have to set B_CACHE here rather then in |
| 1067 | * biodone() in order to prevent getblk from writing the buffer |
| 1068 | * out synchronously. |
| 1069 | */ |
| 1070 | void |
| 1071 | bdwrite(struct buf *bp) |
| 1072 | { |
| 1073 | if (BUF_REFCNTNB(bp) == 0) |
| 1074 | panic("bdwrite: buffer is not busy"); |
| 1075 | |
| 1076 | if (bp->b_flags & B_INVAL) { |
| 1077 | brelse(bp); |
| 1078 | return; |
| 1079 | } |
| 1080 | bdirty(bp); |
| 1081 | |
| 1082 | if (dsched_is_clear_buf_priv(bp)) |
| 1083 | dsched_new_buf(bp); |
| 1084 | |
| 1085 | /* |
| 1086 | * Set B_CACHE, indicating that the buffer is fully valid. This is |
| 1087 | * true even of NFS now. |
| 1088 | */ |
| 1089 | bp->b_flags |= B_CACHE; |
| 1090 | |
| 1091 | /* |
| 1092 | * This bmap keeps the system from needing to do the bmap later, |
| 1093 | * perhaps when the system is attempting to do a sync. Since it |
| 1094 | * is likely that the indirect block -- or whatever other datastructure |
| 1095 | * that the filesystem needs is still in memory now, it is a good |
| 1096 | * thing to do this. Note also, that if the pageout daemon is |
| 1097 | * requesting a sync -- there might not be enough memory to do |
| 1098 | * the bmap then... So, this is important to do. |
| 1099 | */ |
| 1100 | if (bp->b_bio2.bio_offset == NOOFFSET) { |
| 1101 | VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset, |
| 1102 | NULL, NULL, BUF_CMD_WRITE); |
| 1103 | } |
| 1104 | |
| 1105 | /* |
| 1106 | * Because the underlying pages may still be mapped and |
| 1107 | * writable trying to set the dirty buffer (b_dirtyoff/end) |
| 1108 | * range here will be inaccurate. |
| 1109 | * |
| 1110 | * However, we must still clean the pages to satisfy the |
| 1111 | * vnode_pager and pageout daemon, so theythink the pages |
| 1112 | * have been "cleaned". What has really occured is that |
| 1113 | * they've been earmarked for later writing by the buffer |
| 1114 | * cache. |
| 1115 | * |
| 1116 | * So we get the b_dirtyoff/end update but will not actually |
| 1117 | * depend on it (NFS that is) until the pages are busied for |
| 1118 | * writing later on. |
| 1119 | */ |
| 1120 | vfs_clean_pages(bp); |
| 1121 | bqrelse(bp); |
| 1122 | |
| 1123 | /* |
| 1124 | * note: we cannot initiate I/O from a bdwrite even if we wanted to, |
| 1125 | * due to the softdep code. |
| 1126 | */ |
| 1127 | } |
| 1128 | |
| 1129 | /* |
| 1130 | * Fake write - return pages to VM system as dirty, leave the buffer clean. |
| 1131 | * This is used by tmpfs. |
| 1132 | * |
| 1133 | * It is important for any VFS using this routine to NOT use it for |
| 1134 | * IO_SYNC or IO_ASYNC operations which occur when the system really |
| 1135 | * wants to flush VM pages to backing store. |
| 1136 | */ |
| 1137 | void |
| 1138 | buwrite(struct buf *bp) |
| 1139 | { |
| 1140 | vm_page_t m; |
| 1141 | int i; |
| 1142 | |
| 1143 | /* |
| 1144 | * Only works for VMIO buffers. If the buffer is already |
| 1145 | * marked for delayed-write we can't avoid the bdwrite(). |
| 1146 | */ |
| 1147 | if ((bp->b_flags & B_VMIO) == 0 || (bp->b_flags & B_DELWRI)) { |
| 1148 | bdwrite(bp); |
| 1149 | return; |
| 1150 | } |
| 1151 | |
| 1152 | /* |
| 1153 | * Mark as needing a commit. |
| 1154 | */ |
| 1155 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 1156 | m = bp->b_xio.xio_pages[i]; |
| 1157 | vm_page_need_commit(m); |
| 1158 | } |
| 1159 | bqrelse(bp); |
| 1160 | } |
| 1161 | |
| 1162 | /* |
| 1163 | * bdirty: |
| 1164 | * |
| 1165 | * Turn buffer into delayed write request by marking it B_DELWRI. |
| 1166 | * B_RELBUF and B_NOCACHE must be cleared. |
| 1167 | * |
| 1168 | * We reassign the buffer to itself to properly update it in the |
| 1169 | * dirty/clean lists. |
| 1170 | * |
| 1171 | * Must be called from a critical section. |
| 1172 | * The buffer must be on BQUEUE_NONE. |
| 1173 | */ |
| 1174 | void |
| 1175 | bdirty(struct buf *bp) |
| 1176 | { |
| 1177 | KASSERT(bp->b_qindex == BQUEUE_NONE, |
| 1178 | ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); |
| 1179 | if (bp->b_flags & B_NOCACHE) { |
| 1180 | kprintf("bdirty: clearing B_NOCACHE on buf %p\n", bp); |
| 1181 | bp->b_flags &= ~B_NOCACHE; |
| 1182 | } |
| 1183 | if (bp->b_flags & B_INVAL) { |
| 1184 | kprintf("bdirty: warning, dirtying invalid buffer %p\n", bp); |
| 1185 | } |
| 1186 | bp->b_flags &= ~B_RELBUF; |
| 1187 | |
| 1188 | if ((bp->b_flags & B_DELWRI) == 0) { |
| 1189 | lwkt_gettoken(&bp->b_vp->v_token); |
| 1190 | bp->b_flags |= B_DELWRI; |
| 1191 | reassignbuf(bp); |
| 1192 | lwkt_reltoken(&bp->b_vp->v_token); |
| 1193 | |
| 1194 | spin_lock(&bufcspin); |
| 1195 | ++dirtybufcount; |
| 1196 | dirtybufspace += bp->b_bufsize; |
| 1197 | if (bp->b_flags & B_HEAVY) { |
| 1198 | ++dirtybufcounthw; |
| 1199 | dirtybufspacehw += bp->b_bufsize; |
| 1200 | } |
| 1201 | spin_unlock(&bufcspin); |
| 1202 | |
| 1203 | bd_heatup(); |
| 1204 | } |
| 1205 | } |
| 1206 | |
| 1207 | /* |
| 1208 | * Set B_HEAVY, indicating that this is a heavy-weight buffer that |
| 1209 | * needs to be flushed with a different buf_daemon thread to avoid |
| 1210 | * deadlocks. B_HEAVY also imposes restrictions in getnewbuf(). |
| 1211 | */ |
| 1212 | void |
| 1213 | bheavy(struct buf *bp) |
| 1214 | { |
| 1215 | if ((bp->b_flags & B_HEAVY) == 0) { |
| 1216 | bp->b_flags |= B_HEAVY; |
| 1217 | if (bp->b_flags & B_DELWRI) { |
| 1218 | spin_lock(&bufcspin); |
| 1219 | ++dirtybufcounthw; |
| 1220 | dirtybufspacehw += bp->b_bufsize; |
| 1221 | spin_unlock(&bufcspin); |
| 1222 | } |
| 1223 | } |
| 1224 | } |
| 1225 | |
| 1226 | /* |
| 1227 | * bundirty: |
| 1228 | * |
| 1229 | * Clear B_DELWRI for buffer. |
| 1230 | * |
| 1231 | * Must be called from a critical section. |
| 1232 | * |
| 1233 | * The buffer is typically on BQUEUE_NONE but there is one case in |
| 1234 | * brelse() that calls this function after placing the buffer on |
| 1235 | * a different queue. |
| 1236 | * |
| 1237 | * MPSAFE |
| 1238 | */ |
| 1239 | void |
| 1240 | bundirty(struct buf *bp) |
| 1241 | { |
| 1242 | if (bp->b_flags & B_DELWRI) { |
| 1243 | lwkt_gettoken(&bp->b_vp->v_token); |
| 1244 | bp->b_flags &= ~B_DELWRI; |
| 1245 | reassignbuf(bp); |
| 1246 | lwkt_reltoken(&bp->b_vp->v_token); |
| 1247 | |
| 1248 | spin_lock(&bufcspin); |
| 1249 | --dirtybufcount; |
| 1250 | dirtybufspace -= bp->b_bufsize; |
| 1251 | if (bp->b_flags & B_HEAVY) { |
| 1252 | --dirtybufcounthw; |
| 1253 | dirtybufspacehw -= bp->b_bufsize; |
| 1254 | } |
| 1255 | spin_unlock(&bufcspin); |
| 1256 | |
| 1257 | bd_signal(bp->b_bufsize); |
| 1258 | } |
| 1259 | /* |
| 1260 | * Since it is now being written, we can clear its deferred write flag. |
| 1261 | */ |
| 1262 | bp->b_flags &= ~B_DEFERRED; |
| 1263 | } |
| 1264 | |
| 1265 | /* |
| 1266 | * Set the b_runningbufspace field, used to track how much I/O is |
| 1267 | * in progress at any given moment. |
| 1268 | */ |
| 1269 | void |
| 1270 | bsetrunningbufspace(struct buf *bp, int bytes) |
| 1271 | { |
| 1272 | bp->b_runningbufspace = bytes; |
| 1273 | if (bytes) { |
| 1274 | spin_lock(&bufcspin); |
| 1275 | runningbufspace += bytes; |
| 1276 | ++runningbufcount; |
| 1277 | spin_unlock(&bufcspin); |
| 1278 | } |
| 1279 | } |
| 1280 | |
| 1281 | /* |
| 1282 | * brelse: |
| 1283 | * |
| 1284 | * Release a busy buffer and, if requested, free its resources. The |
| 1285 | * buffer will be stashed in the appropriate bufqueue[] allowing it |
| 1286 | * to be accessed later as a cache entity or reused for other purposes. |
| 1287 | * |
| 1288 | * MPALMOSTSAFE |
| 1289 | */ |
| 1290 | void |
| 1291 | brelse(struct buf *bp) |
| 1292 | { |
| 1293 | #ifdef INVARIANTS |
| 1294 | int saved_flags = bp->b_flags; |
| 1295 | #endif |
| 1296 | |
| 1297 | KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); |
| 1298 | |
| 1299 | /* |
| 1300 | * If B_NOCACHE is set we are being asked to destroy the buffer and |
| 1301 | * its backing store. Clear B_DELWRI. |
| 1302 | * |
| 1303 | * B_NOCACHE is set in two cases: (1) when the caller really wants |
| 1304 | * to destroy the buffer and backing store and (2) when the caller |
| 1305 | * wants to destroy the buffer and backing store after a write |
| 1306 | * completes. |
| 1307 | */ |
| 1308 | if ((bp->b_flags & (B_NOCACHE|B_DELWRI)) == (B_NOCACHE|B_DELWRI)) { |
| 1309 | bundirty(bp); |
| 1310 | } |
| 1311 | |
| 1312 | if ((bp->b_flags & (B_INVAL | B_DELWRI)) == B_DELWRI) { |
| 1313 | /* |
| 1314 | * A re-dirtied buffer is only subject to destruction |
| 1315 | * by B_INVAL. B_ERROR and B_NOCACHE are ignored. |
| 1316 | */ |
| 1317 | /* leave buffer intact */ |
| 1318 | } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || |
| 1319 | (bp->b_bufsize <= 0)) { |
| 1320 | /* |
| 1321 | * Either a failed read or we were asked to free or not |
| 1322 | * cache the buffer. This path is reached with B_DELWRI |
| 1323 | * set only if B_INVAL is already set. B_NOCACHE governs |
| 1324 | * backing store destruction. |
| 1325 | * |
| 1326 | * NOTE: HAMMER will set B_LOCKED in buf_deallocate if the |
| 1327 | * buffer cannot be immediately freed. |
| 1328 | */ |
| 1329 | bp->b_flags |= B_INVAL; |
| 1330 | if (LIST_FIRST(&bp->b_dep) != NULL) |
| 1331 | buf_deallocate(bp); |
| 1332 | if (bp->b_flags & B_DELWRI) { |
| 1333 | spin_lock(&bufcspin); |
| 1334 | --dirtybufcount; |
| 1335 | dirtybufspace -= bp->b_bufsize; |
| 1336 | if (bp->b_flags & B_HEAVY) { |
| 1337 | --dirtybufcounthw; |
| 1338 | dirtybufspacehw -= bp->b_bufsize; |
| 1339 | } |
| 1340 | spin_unlock(&bufcspin); |
| 1341 | |
| 1342 | bd_signal(bp->b_bufsize); |
| 1343 | } |
| 1344 | bp->b_flags &= ~(B_DELWRI | B_CACHE); |
| 1345 | } |
| 1346 | |
| 1347 | /* |
| 1348 | * We must clear B_RELBUF if B_DELWRI or B_LOCKED is set, |
| 1349 | * or if b_refs is non-zero. |
| 1350 | * |
| 1351 | * If vfs_vmio_release() is called with either bit set, the |
| 1352 | * underlying pages may wind up getting freed causing a previous |
| 1353 | * write (bdwrite()) to get 'lost' because pages associated with |
| 1354 | * a B_DELWRI bp are marked clean. Pages associated with a |
| 1355 | * B_LOCKED buffer may be mapped by the filesystem. |
| 1356 | * |
| 1357 | * If we want to release the buffer ourselves (rather then the |
| 1358 | * originator asking us to release it), give the originator a |
| 1359 | * chance to countermand the release by setting B_LOCKED. |
| 1360 | * |
| 1361 | * We still allow the B_INVAL case to call vfs_vmio_release(), even |
| 1362 | * if B_DELWRI is set. |
| 1363 | * |
| 1364 | * If B_DELWRI is not set we may have to set B_RELBUF if we are low |
| 1365 | * on pages to return pages to the VM page queues. |
| 1366 | */ |
| 1367 | if ((bp->b_flags & (B_DELWRI | B_LOCKED)) || bp->b_refs) { |
| 1368 | bp->b_flags &= ~B_RELBUF; |
| 1369 | } else if (vm_page_count_min(0)) { |
| 1370 | if (LIST_FIRST(&bp->b_dep) != NULL) |
| 1371 | buf_deallocate(bp); /* can set B_LOCKED */ |
| 1372 | if (bp->b_flags & (B_DELWRI | B_LOCKED)) |
| 1373 | bp->b_flags &= ~B_RELBUF; |
| 1374 | else |
| 1375 | bp->b_flags |= B_RELBUF; |
| 1376 | } |
| 1377 | |
| 1378 | /* |
| 1379 | * Make sure b_cmd is clear. It may have already been cleared by |
| 1380 | * biodone(). |
| 1381 | * |
| 1382 | * At this point destroying the buffer is governed by the B_INVAL |
| 1383 | * or B_RELBUF flags. |
| 1384 | */ |
| 1385 | bp->b_cmd = BUF_CMD_DONE; |
| 1386 | dsched_exit_buf(bp); |
| 1387 | |
| 1388 | /* |
| 1389 | * VMIO buffer rundown. Make sure the VM page array is restored |
| 1390 | * after an I/O may have replaces some of the pages with bogus pages |
| 1391 | * in order to not destroy dirty pages in a fill-in read. |
| 1392 | * |
| 1393 | * Note that due to the code above, if a buffer is marked B_DELWRI |
| 1394 | * then the B_RELBUF and B_NOCACHE bits will always be clear. |
| 1395 | * B_INVAL may still be set, however. |
| 1396 | * |
| 1397 | * For clean buffers, B_INVAL or B_RELBUF will destroy the buffer |
| 1398 | * but not the backing store. B_NOCACHE will destroy the backing |
| 1399 | * store. |
| 1400 | * |
| 1401 | * Note that dirty NFS buffers contain byte-granular write ranges |
| 1402 | * and should not be destroyed w/ B_INVAL even if the backing store |
| 1403 | * is left intact. |
| 1404 | */ |
| 1405 | if (bp->b_flags & B_VMIO) { |
| 1406 | /* |
| 1407 | * Rundown for VMIO buffers which are not dirty NFS buffers. |
| 1408 | */ |
| 1409 | int i, j, resid; |
| 1410 | vm_page_t m; |
| 1411 | off_t foff; |
| 1412 | vm_pindex_t poff; |
| 1413 | vm_object_t obj; |
| 1414 | struct vnode *vp; |
| 1415 | |
| 1416 | vp = bp->b_vp; |
| 1417 | |
| 1418 | /* |
| 1419 | * Get the base offset and length of the buffer. Note that |
| 1420 | * in the VMIO case if the buffer block size is not |
| 1421 | * page-aligned then b_data pointer may not be page-aligned. |
| 1422 | * But our b_xio.xio_pages array *IS* page aligned. |
| 1423 | * |
| 1424 | * block sizes less then DEV_BSIZE (usually 512) are not |
| 1425 | * supported due to the page granularity bits (m->valid, |
| 1426 | * m->dirty, etc...). |
| 1427 | * |
| 1428 | * See man buf(9) for more information |
| 1429 | */ |
| 1430 | |
| 1431 | resid = bp->b_bufsize; |
| 1432 | foff = bp->b_loffset; |
| 1433 | |
| 1434 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 1435 | m = bp->b_xio.xio_pages[i]; |
| 1436 | vm_page_flag_clear(m, PG_ZERO); |
| 1437 | /* |
| 1438 | * If we hit a bogus page, fixup *all* of them |
| 1439 | * now. Note that we left these pages wired |
| 1440 | * when we removed them so they had better exist, |
| 1441 | * and they cannot be ripped out from under us so |
| 1442 | * no critical section protection is necessary. |
| 1443 | */ |
| 1444 | if (m == bogus_page) { |
| 1445 | obj = vp->v_object; |
| 1446 | poff = OFF_TO_IDX(bp->b_loffset); |
| 1447 | |
| 1448 | vm_object_hold(obj); |
| 1449 | for (j = i; j < bp->b_xio.xio_npages; j++) { |
| 1450 | vm_page_t mtmp; |
| 1451 | |
| 1452 | mtmp = bp->b_xio.xio_pages[j]; |
| 1453 | if (mtmp == bogus_page) { |
| 1454 | mtmp = vm_page_lookup(obj, poff + j); |
| 1455 | if (!mtmp) { |
| 1456 | panic("brelse: page missing"); |
| 1457 | } |
| 1458 | bp->b_xio.xio_pages[j] = mtmp; |
| 1459 | } |
| 1460 | } |
| 1461 | bp->b_flags &= ~B_HASBOGUS; |
| 1462 | vm_object_drop(obj); |
| 1463 | |
| 1464 | if ((bp->b_flags & B_INVAL) == 0) { |
| 1465 | pmap_qenter(trunc_page((vm_offset_t)bp->b_data), |
| 1466 | bp->b_xio.xio_pages, bp->b_xio.xio_npages); |
| 1467 | } |
| 1468 | m = bp->b_xio.xio_pages[i]; |
| 1469 | } |
| 1470 | |
| 1471 | /* |
| 1472 | * Invalidate the backing store if B_NOCACHE is set |
| 1473 | * (e.g. used with vinvalbuf()). If this is NFS |
| 1474 | * we impose a requirement that the block size be |
| 1475 | * a multiple of PAGE_SIZE and create a temporary |
| 1476 | * hack to basically invalidate the whole page. The |
| 1477 | * problem is that NFS uses really odd buffer sizes |
| 1478 | * especially when tracking piecemeal writes and |
| 1479 | * it also vinvalbuf()'s a lot, which would result |
| 1480 | * in only partial page validation and invalidation |
| 1481 | * here. If the file page is mmap()'d, however, |
| 1482 | * all the valid bits get set so after we invalidate |
| 1483 | * here we would end up with weird m->valid values |
| 1484 | * like 0xfc. nfs_getpages() can't handle this so |
| 1485 | * we clear all the valid bits for the NFS case |
| 1486 | * instead of just some of them. |
| 1487 | * |
| 1488 | * The real bug is the VM system having to set m->valid |
| 1489 | * to VM_PAGE_BITS_ALL for faulted-in pages, which |
| 1490 | * itself is an artifact of the whole 512-byte |
| 1491 | * granular mess that exists to support odd block |
| 1492 | * sizes and UFS meta-data block sizes (e.g. 6144). |
| 1493 | * A complete rewrite is required. |
| 1494 | * |
| 1495 | * XXX |
| 1496 | */ |
| 1497 | if (bp->b_flags & (B_NOCACHE|B_ERROR)) { |
| 1498 | int poffset = foff & PAGE_MASK; |
| 1499 | int presid; |
| 1500 | |
| 1501 | presid = PAGE_SIZE - poffset; |
| 1502 | if (bp->b_vp->v_tag == VT_NFS && |
| 1503 | bp->b_vp->v_type == VREG) { |
| 1504 | ; /* entire page */ |
| 1505 | } else if (presid > resid) { |
| 1506 | presid = resid; |
| 1507 | } |
| 1508 | KASSERT(presid >= 0, ("brelse: extra page")); |
| 1509 | vm_page_set_invalid(m, poffset, presid); |
| 1510 | |
| 1511 | /* |
| 1512 | * Also make sure any swap cache is removed |
| 1513 | * as it is now stale (HAMMER in particular |
| 1514 | * uses B_NOCACHE to deal with buffer |
| 1515 | * aliasing). |
| 1516 | */ |
| 1517 | swap_pager_unswapped(m); |
| 1518 | } |
| 1519 | resid -= PAGE_SIZE - (foff & PAGE_MASK); |
| 1520 | foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; |
| 1521 | } |
| 1522 | if (bp->b_flags & (B_INVAL | B_RELBUF)) |
| 1523 | vfs_vmio_release(bp); |
| 1524 | } else { |
| 1525 | /* |
| 1526 | * Rundown for non-VMIO buffers. |
| 1527 | */ |
| 1528 | if (bp->b_flags & (B_INVAL | B_RELBUF)) { |
| 1529 | if (bp->b_bufsize) |
| 1530 | allocbuf(bp, 0); |
| 1531 | KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); |
| 1532 | if (bp->b_vp) |
| 1533 | brelvp(bp); |
| 1534 | } |
| 1535 | } |
| 1536 | |
| 1537 | if (bp->b_qindex != BQUEUE_NONE) |
| 1538 | panic("brelse: free buffer onto another queue???"); |
| 1539 | if (BUF_REFCNTNB(bp) > 1) { |
| 1540 | /* Temporary panic to verify exclusive locking */ |
| 1541 | /* This panic goes away when we allow shared refs */ |
| 1542 | panic("brelse: multiple refs"); |
| 1543 | /* NOT REACHED */ |
| 1544 | return; |
| 1545 | } |
| 1546 | |
| 1547 | /* |
| 1548 | * Figure out the correct queue to place the cleaned up buffer on. |
| 1549 | * Buffers placed in the EMPTY or EMPTYKVA had better already be |
| 1550 | * disassociated from their vnode. |
| 1551 | */ |
| 1552 | spin_lock(&bufqspin); |
| 1553 | if (bp->b_flags & B_LOCKED) { |
| 1554 | /* |
| 1555 | * Buffers that are locked are placed in the locked queue |
| 1556 | * immediately, regardless of their state. |
| 1557 | */ |
| 1558 | bp->b_qindex = BQUEUE_LOCKED; |
| 1559 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); |
| 1560 | } else if (bp->b_bufsize == 0) { |
| 1561 | /* |
| 1562 | * Buffers with no memory. Due to conditionals near the top |
| 1563 | * of brelse() such buffers should probably already be |
| 1564 | * marked B_INVAL and disassociated from their vnode. |
| 1565 | */ |
| 1566 | bp->b_flags |= B_INVAL; |
| 1567 | KASSERT(bp->b_vp == NULL, ("bp1 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); |
| 1568 | KKASSERT((bp->b_flags & B_HASHED) == 0); |
| 1569 | if (bp->b_kvasize) { |
| 1570 | bp->b_qindex = BQUEUE_EMPTYKVA; |
| 1571 | } else { |
| 1572 | bp->b_qindex = BQUEUE_EMPTY; |
| 1573 | } |
| 1574 | TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); |
| 1575 | } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) { |
| 1576 | /* |
| 1577 | * Buffers with junk contents. Again these buffers had better |
| 1578 | * already be disassociated from their vnode. |
| 1579 | */ |
| 1580 | KASSERT(bp->b_vp == NULL, ("bp2 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); |
| 1581 | KKASSERT((bp->b_flags & B_HASHED) == 0); |
| 1582 | bp->b_flags |= B_INVAL; |
| 1583 | bp->b_qindex = BQUEUE_CLEAN; |
| 1584 | TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); |
| 1585 | } else { |
| 1586 | /* |
| 1587 | * Remaining buffers. These buffers are still associated with |
| 1588 | * their vnode. |
| 1589 | */ |
| 1590 | switch(bp->b_flags & (B_DELWRI|B_HEAVY)) { |
| 1591 | case B_DELWRI: |
| 1592 | bp->b_qindex = BQUEUE_DIRTY; |
| 1593 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], bp, b_freelist); |
| 1594 | break; |
| 1595 | case B_DELWRI | B_HEAVY: |
| 1596 | bp->b_qindex = BQUEUE_DIRTY_HW; |
| 1597 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY_HW], bp, |
| 1598 | b_freelist); |
| 1599 | break; |
| 1600 | default: |
| 1601 | /* |
| 1602 | * NOTE: Buffers are always placed at the end of the |
| 1603 | * queue. If B_AGE is not set the buffer will cycle |
| 1604 | * through the queue twice. |
| 1605 | */ |
| 1606 | bp->b_qindex = BQUEUE_CLEAN; |
| 1607 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); |
| 1608 | break; |
| 1609 | } |
| 1610 | } |
| 1611 | spin_unlock(&bufqspin); |
| 1612 | |
| 1613 | /* |
| 1614 | * If B_INVAL, clear B_DELWRI. We've already placed the buffer |
| 1615 | * on the correct queue. |
| 1616 | */ |
| 1617 | if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) |
| 1618 | bundirty(bp); |
| 1619 | |
| 1620 | /* |
| 1621 | * The bp is on an appropriate queue unless locked. If it is not |
| 1622 | * locked or dirty we can wakeup threads waiting for buffer space. |
| 1623 | * |
| 1624 | * We've already handled the B_INVAL case ( B_DELWRI will be clear |
| 1625 | * if B_INVAL is set ). |
| 1626 | */ |
| 1627 | if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) |
| 1628 | bufcountwakeup(); |
| 1629 | |
| 1630 | /* |
| 1631 | * Something we can maybe free or reuse |
| 1632 | */ |
| 1633 | if (bp->b_bufsize || bp->b_kvasize) |
| 1634 | bufspacewakeup(); |
| 1635 | |
| 1636 | /* |
| 1637 | * Clean up temporary flags and unlock the buffer. |
| 1638 | */ |
| 1639 | bp->b_flags &= ~(B_ORDERED | B_NOCACHE | B_RELBUF | B_DIRECT); |
| 1640 | BUF_UNLOCK(bp); |
| 1641 | } |
| 1642 | |
| 1643 | /* |
| 1644 | * bqrelse: |
| 1645 | * |
| 1646 | * Release a buffer back to the appropriate queue but do not try to free |
| 1647 | * it. The buffer is expected to be used again soon. |
| 1648 | * |
| 1649 | * bqrelse() is used by bdwrite() to requeue a delayed write, and used by |
| 1650 | * biodone() to requeue an async I/O on completion. It is also used when |
| 1651 | * known good buffers need to be requeued but we think we may need the data |
| 1652 | * again soon. |
| 1653 | * |
| 1654 | * XXX we should be able to leave the B_RELBUF hint set on completion. |
| 1655 | * |
| 1656 | * MPSAFE |
| 1657 | */ |
| 1658 | void |
| 1659 | bqrelse(struct buf *bp) |
| 1660 | { |
| 1661 | KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); |
| 1662 | |
| 1663 | if (bp->b_qindex != BQUEUE_NONE) |
| 1664 | panic("bqrelse: free buffer onto another queue???"); |
| 1665 | if (BUF_REFCNTNB(bp) > 1) { |
| 1666 | /* do not release to free list */ |
| 1667 | panic("bqrelse: multiple refs"); |
| 1668 | return; |
| 1669 | } |
| 1670 | |
| 1671 | buf_act_advance(bp); |
| 1672 | |
| 1673 | spin_lock(&bufqspin); |
| 1674 | if (bp->b_flags & B_LOCKED) { |
| 1675 | /* |
| 1676 | * Locked buffers are released to the locked queue. However, |
| 1677 | * if the buffer is dirty it will first go into the dirty |
| 1678 | * queue and later on after the I/O completes successfully it |
| 1679 | * will be released to the locked queue. |
| 1680 | */ |
| 1681 | bp->b_qindex = BQUEUE_LOCKED; |
| 1682 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); |
| 1683 | } else if (bp->b_flags & B_DELWRI) { |
| 1684 | bp->b_qindex = (bp->b_flags & B_HEAVY) ? |
| 1685 | BQUEUE_DIRTY_HW : BQUEUE_DIRTY; |
| 1686 | TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); |
| 1687 | } else if (vm_page_count_min(0)) { |
| 1688 | /* |
| 1689 | * We are too low on memory, we have to try to free the |
| 1690 | * buffer (most importantly: the wired pages making up its |
| 1691 | * backing store) *now*. |
| 1692 | */ |
| 1693 | spin_unlock(&bufqspin); |
| 1694 | brelse(bp); |
| 1695 | return; |
| 1696 | } else { |
| 1697 | bp->b_qindex = BQUEUE_CLEAN; |
| 1698 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); |
| 1699 | } |
| 1700 | spin_unlock(&bufqspin); |
| 1701 | |
| 1702 | if ((bp->b_flags & B_LOCKED) == 0 && |
| 1703 | ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) { |
| 1704 | bufcountwakeup(); |
| 1705 | } |
| 1706 | |
| 1707 | /* |
| 1708 | * Something we can maybe free or reuse. |
| 1709 | */ |
| 1710 | if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) |
| 1711 | bufspacewakeup(); |
| 1712 | |
| 1713 | /* |
| 1714 | * Final cleanup and unlock. Clear bits that are only used while a |
| 1715 | * buffer is actively locked. |
| 1716 | */ |
| 1717 | bp->b_flags &= ~(B_ORDERED | B_NOCACHE | B_RELBUF); |
| 1718 | dsched_exit_buf(bp); |
| 1719 | BUF_UNLOCK(bp); |
| 1720 | } |
| 1721 | |
| 1722 | /* |
| 1723 | * Hold a buffer, preventing it from being reused. This will prevent |
| 1724 | * normal B_RELBUF operations on the buffer but will not prevent B_INVAL |
| 1725 | * operations. If a B_INVAL operation occurs the buffer will remain held |
| 1726 | * but the underlying pages may get ripped out. |
| 1727 | * |
| 1728 | * These functions are typically used in VOP_READ/VOP_WRITE functions |
| 1729 | * to hold a buffer during a copyin or copyout, preventing deadlocks |
| 1730 | * or recursive lock panics when read()/write() is used over mmap()'d |
| 1731 | * space. |
| 1732 | * |
| 1733 | * NOTE: bqhold() requires that the buffer be locked at the time of the |
| 1734 | * hold. bqdrop() has no requirements other than the buffer having |
| 1735 | * previously been held. |
| 1736 | */ |
| 1737 | void |
| 1738 | bqhold(struct buf *bp) |
| 1739 | { |
| 1740 | atomic_add_int(&bp->b_refs, 1); |
| 1741 | } |
| 1742 | |
| 1743 | void |
| 1744 | bqdrop(struct buf *bp) |
| 1745 | { |
| 1746 | KKASSERT(bp->b_refs > 0); |
| 1747 | atomic_add_int(&bp->b_refs, -1); |
| 1748 | } |
| 1749 | |
| 1750 | /* |
| 1751 | * Return backing pages held by the buffer 'bp' back to the VM system. |
| 1752 | * This routine is called when the bp is invalidated, released, or |
| 1753 | * reused. |
| 1754 | * |
| 1755 | * The KVA mapping (b_data) for the underlying pages is removed by |
| 1756 | * this function. |
| 1757 | * |
| 1758 | * WARNING! This routine is integral to the low memory critical path |
| 1759 | * when a buffer is B_RELBUF'd. If the system has a severe page |
| 1760 | * deficit we need to get the page(s) onto the PQ_FREE or PQ_CACHE |
| 1761 | * queues so they can be reused in the current pageout daemon |
| 1762 | * pass. |
| 1763 | */ |
| 1764 | static void |
| 1765 | vfs_vmio_release(struct buf *bp) |
| 1766 | { |
| 1767 | int i; |
| 1768 | vm_page_t m; |
| 1769 | |
| 1770 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 1771 | m = bp->b_xio.xio_pages[i]; |
| 1772 | bp->b_xio.xio_pages[i] = NULL; |
| 1773 | |
| 1774 | /* |
| 1775 | * We need to own the page in order to safely unwire it. |
| 1776 | */ |
| 1777 | vm_page_busy_wait(m, FALSE, "vmiopg"); |
| 1778 | |
| 1779 | /* |
| 1780 | * The VFS is telling us this is not a meta-data buffer |
| 1781 | * even if it is backed by a block device. |
| 1782 | */ |
| 1783 | if (bp->b_flags & B_NOTMETA) |
| 1784 | vm_page_flag_set(m, PG_NOTMETA); |
| 1785 | |
| 1786 | /* |
| 1787 | * This is a very important bit of code. We try to track |
| 1788 | * VM page use whether the pages are wired into the buffer |
| 1789 | * cache or not. While wired into the buffer cache the |
| 1790 | * bp tracks the act_count. |
| 1791 | * |
| 1792 | * We can choose to place unwired pages on the inactive |
| 1793 | * queue (0) or active queue (1). If we place too many |
| 1794 | * on the active queue the queue will cycle the act_count |
| 1795 | * on pages we'd like to keep, just from single-use pages |
| 1796 | * (such as when doing a tar-up or file scan). |
| 1797 | */ |
| 1798 | if (bp->b_act_count < vm_cycle_point) |
| 1799 | vm_page_unwire(m, 0); |
| 1800 | else |
| 1801 | vm_page_unwire(m, 1); |
| 1802 | |
| 1803 | /* |
| 1804 | * If the wire_count has dropped to 0 we may need to take |
| 1805 | * further action before unbusying the page. |
| 1806 | * |
| 1807 | * WARNING: vm_page_try_*() also checks PG_NEED_COMMIT for us. |
| 1808 | */ |
| 1809 | if (m->wire_count == 0) { |
| 1810 | vm_page_flag_clear(m, PG_ZERO); |
| 1811 | |
| 1812 | if (bp->b_flags & B_DIRECT) { |
| 1813 | /* |
| 1814 | * Attempt to free the page if B_DIRECT is |
| 1815 | * set, the caller does not desire the page |
| 1816 | * to be cached. |
| 1817 | */ |
| 1818 | vm_page_wakeup(m); |
| 1819 | vm_page_try_to_free(m); |
| 1820 | } else if ((bp->b_flags & B_NOTMETA) || |
| 1821 | vm_page_count_min(0)) { |
| 1822 | /* |
| 1823 | * Attempt to move the page to PQ_CACHE |
| 1824 | * if B_NOTMETA is set. This flag is set |
| 1825 | * by HAMMER to remove one of the two pages |
| 1826 | * present when double buffering is enabled. |
| 1827 | * |
| 1828 | * Attempt to move the page to PQ_CACHE |
| 1829 | * If we have a severe page deficit. This |
| 1830 | * will cause buffer cache operations related |
| 1831 | * to pageouts to recycle the related pages |
| 1832 | * in order to avoid a low memory deadlock. |
| 1833 | */ |
| 1834 | m->act_count = bp->b_act_count; |
| 1835 | vm_page_wakeup(m); |
| 1836 | vm_page_try_to_cache(m); |
| 1837 | } else { |
| 1838 | /* |
| 1839 | * Nominal case, leave the page on the |
| 1840 | * queue the original unwiring placed it on |
| 1841 | * (active or inactive). |
| 1842 | */ |
| 1843 | m->act_count = bp->b_act_count; |
| 1844 | vm_page_wakeup(m); |
| 1845 | } |
| 1846 | } else { |
| 1847 | vm_page_wakeup(m); |
| 1848 | } |
| 1849 | } |
| 1850 | |
| 1851 | pmap_qremove(trunc_page((vm_offset_t) bp->b_data), |
| 1852 | bp->b_xio.xio_npages); |
| 1853 | if (bp->b_bufsize) { |
| 1854 | bufspacewakeup(); |
| 1855 | bp->b_bufsize = 0; |
| 1856 | } |
| 1857 | bp->b_xio.xio_npages = 0; |
| 1858 | bp->b_flags &= ~B_VMIO; |
| 1859 | KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); |
| 1860 | if (bp->b_vp) |
| 1861 | brelvp(bp); |
| 1862 | } |
| 1863 | |
| 1864 | /* |
| 1865 | * getnewbuf: |
| 1866 | * |
| 1867 | * Find and initialize a new buffer header, freeing up existing buffers |
| 1868 | * in the bufqueues as necessary. The new buffer is returned locked. |
| 1869 | * |
| 1870 | * Important: B_INVAL is not set. If the caller wishes to throw the |
| 1871 | * buffer away, the caller must set B_INVAL prior to calling brelse(). |
| 1872 | * |
| 1873 | * We block if: |
| 1874 | * We have insufficient buffer headers |
| 1875 | * We have insufficient buffer space |
| 1876 | * buffer_map is too fragmented ( space reservation fails ) |
| 1877 | * If we have to flush dirty buffers ( but we try to avoid this ) |
| 1878 | * |
| 1879 | * To avoid VFS layer recursion we do not flush dirty buffers ourselves. |
| 1880 | * Instead we ask the buf daemon to do it for us. We attempt to |
| 1881 | * avoid piecemeal wakeups of the pageout daemon. |
| 1882 | * |
| 1883 | * MPALMOSTSAFE |
| 1884 | */ |
| 1885 | struct buf * |
| 1886 | getnewbuf(int blkflags, int slptimeo, int size, int maxsize) |
| 1887 | { |
| 1888 | struct buf *bp; |
| 1889 | struct buf *nbp; |
| 1890 | int defrag = 0; |
| 1891 | int nqindex; |
| 1892 | int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; |
| 1893 | static int flushingbufs; |
| 1894 | |
| 1895 | /* |
| 1896 | * We can't afford to block since we might be holding a vnode lock, |
| 1897 | * which may prevent system daemons from running. We deal with |
| 1898 | * low-memory situations by proactively returning memory and running |
| 1899 | * async I/O rather then sync I/O. |
| 1900 | */ |
| 1901 | |
| 1902 | ++getnewbufcalls; |
| 1903 | --getnewbufrestarts; |
| 1904 | restart: |
| 1905 | ++getnewbufrestarts; |
| 1906 | |
| 1907 | /* |
| 1908 | * Setup for scan. If we do not have enough free buffers, |
| 1909 | * we setup a degenerate case that immediately fails. Note |
| 1910 | * that if we are specially marked process, we are allowed to |
| 1911 | * dip into our reserves. |
| 1912 | * |
| 1913 | * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN |
| 1914 | * |
| 1915 | * We start with EMPTYKVA. If the list is empty we backup to EMPTY. |
| 1916 | * However, there are a number of cases (defragging, reusing, ...) |
| 1917 | * where we cannot backup. |
| 1918 | */ |
| 1919 | nqindex = BQUEUE_EMPTYKVA; |
| 1920 | spin_lock(&bufqspin); |
| 1921 | nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]); |
| 1922 | |
| 1923 | if (nbp == NULL) { |
| 1924 | /* |
| 1925 | * If no EMPTYKVA buffers and we are either |
| 1926 | * defragging or reusing, locate a CLEAN buffer |
| 1927 | * to free or reuse. If bufspace useage is low |
| 1928 | * skip this step so we can allocate a new buffer. |
| 1929 | */ |
| 1930 | if (defrag || bufspace >= lobufspace) { |
| 1931 | nqindex = BQUEUE_CLEAN; |
| 1932 | nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); |
| 1933 | } |
| 1934 | |
| 1935 | /* |
| 1936 | * If we could not find or were not allowed to reuse a |
| 1937 | * CLEAN buffer, check to see if it is ok to use an EMPTY |
| 1938 | * buffer. We can only use an EMPTY buffer if allocating |
| 1939 | * its KVA would not otherwise run us out of buffer space. |
| 1940 | */ |
| 1941 | if (nbp == NULL && defrag == 0 && |
| 1942 | bufspace + maxsize < hibufspace) { |
| 1943 | nqindex = BQUEUE_EMPTY; |
| 1944 | nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTY]); |
| 1945 | } |
| 1946 | } |
| 1947 | |
| 1948 | /* |
| 1949 | * Run scan, possibly freeing data and/or kva mappings on the fly |
| 1950 | * depending. |
| 1951 | * |
| 1952 | * WARNING! bufqspin is held! |
| 1953 | */ |
| 1954 | while ((bp = nbp) != NULL) { |
| 1955 | int qindex = nqindex; |
| 1956 | |
| 1957 | nbp = TAILQ_NEXT(bp, b_freelist); |
| 1958 | |
| 1959 | /* |
| 1960 | * BQUEUE_CLEAN - B_AGE special case. If not set the bp |
| 1961 | * cycles through the queue twice before being selected. |
| 1962 | */ |
| 1963 | if (qindex == BQUEUE_CLEAN && |
| 1964 | (bp->b_flags & B_AGE) == 0 && nbp) { |
| 1965 | bp->b_flags |= B_AGE; |
| 1966 | TAILQ_REMOVE(&bufqueues[qindex], bp, b_freelist); |
| 1967 | TAILQ_INSERT_TAIL(&bufqueues[qindex], bp, b_freelist); |
| 1968 | continue; |
| 1969 | } |
| 1970 | |
| 1971 | /* |
| 1972 | * Calculate next bp ( we can only use it if we do not block |
| 1973 | * or do other fancy things ). |
| 1974 | */ |
| 1975 | if (nbp == NULL) { |
| 1976 | switch(qindex) { |
| 1977 | case BQUEUE_EMPTY: |
| 1978 | nqindex = BQUEUE_EMPTYKVA; |
| 1979 | if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]))) |
| 1980 | break; |
| 1981 | /* fall through */ |
| 1982 | case BQUEUE_EMPTYKVA: |
| 1983 | nqindex = BQUEUE_CLEAN; |
| 1984 | if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]))) |
| 1985 | break; |
| 1986 | /* fall through */ |
| 1987 | case BQUEUE_CLEAN: |
| 1988 | /* |
| 1989 | * nbp is NULL. |
| 1990 | */ |
| 1991 | break; |
| 1992 | } |
| 1993 | } |
| 1994 | |
| 1995 | /* |
| 1996 | * Sanity Checks |
| 1997 | */ |
| 1998 | KASSERT(bp->b_qindex == qindex, |
| 1999 | ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); |
| 2000 | |
| 2001 | /* |
| 2002 | * Note: we no longer distinguish between VMIO and non-VMIO |
| 2003 | * buffers. |
| 2004 | */ |
| 2005 | KASSERT((bp->b_flags & B_DELWRI) == 0, |
| 2006 | ("delwri buffer %p found in queue %d", bp, qindex)); |
| 2007 | |
| 2008 | /* |
| 2009 | * Do not try to reuse a buffer with a non-zero b_refs. |
| 2010 | * This is an unsynchronized test. A synchronized test |
| 2011 | * is also performed after we lock the buffer. |
| 2012 | */ |
| 2013 | if (bp->b_refs) |
| 2014 | continue; |
| 2015 | |
| 2016 | /* |
| 2017 | * If we are defragging then we need a buffer with |
| 2018 | * b_kvasize != 0. XXX this situation should no longer |
| 2019 | * occur, if defrag is non-zero the buffer's b_kvasize |
| 2020 | * should also be non-zero at this point. XXX |
| 2021 | */ |
| 2022 | if (defrag && bp->b_kvasize == 0) { |
| 2023 | kprintf("Warning: defrag empty buffer %p\n", bp); |
| 2024 | continue; |
| 2025 | } |
| 2026 | |
| 2027 | /* |
| 2028 | * Start freeing the bp. This is somewhat involved. nbp |
| 2029 | * remains valid only for BQUEUE_EMPTY[KVA] bp's. Buffers |
| 2030 | * on the clean list must be disassociated from their |
| 2031 | * current vnode. Buffers on the empty[kva] lists have |
| 2032 | * already been disassociated. |
| 2033 | * |
| 2034 | * b_refs is checked after locking along with queue changes. |
| 2035 | * We must check here to deal with zero->nonzero transitions |
| 2036 | * made by the owner of the buffer lock, which is used by |
| 2037 | * VFS's to hold the buffer while issuing an unlocked |
| 2038 | * uiomove()s. We cannot invalidate the buffer's pages |
| 2039 | * for this case. Once we successfully lock a buffer the |
| 2040 | * only 0->1 transitions of b_refs will occur via findblk(). |
| 2041 | * |
| 2042 | * We must also check for queue changes after successful |
| 2043 | * locking as the current lock holder may dispose of the |
| 2044 | * buffer and change its queue. |
| 2045 | */ |
| 2046 | if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { |
| 2047 | spin_unlock(&bufqspin); |
| 2048 | tsleep(&bd_request, 0, "gnbxxx", (hz + 99) / 100); |
| 2049 | goto restart; |
| 2050 | } |
| 2051 | if (bp->b_qindex != qindex || bp->b_refs) { |
| 2052 | spin_unlock(&bufqspin); |
| 2053 | BUF_UNLOCK(bp); |
| 2054 | goto restart; |
| 2055 | } |
| 2056 | bremfree_locked(bp); |
| 2057 | spin_unlock(&bufqspin); |
| 2058 | |
| 2059 | /* |
| 2060 | * Dependancies must be handled before we disassociate the |
| 2061 | * vnode. |
| 2062 | * |
| 2063 | * NOTE: HAMMER will set B_LOCKED if the buffer cannot |
| 2064 | * be immediately disassociated. HAMMER then becomes |
| 2065 | * responsible for releasing the buffer. |
| 2066 | * |
| 2067 | * NOTE: bufqspin is UNLOCKED now. |
| 2068 | */ |
| 2069 | if (LIST_FIRST(&bp->b_dep) != NULL) { |
| 2070 | buf_deallocate(bp); |
| 2071 | if (bp->b_flags & B_LOCKED) { |
| 2072 | bqrelse(bp); |
| 2073 | goto restart; |
| 2074 | } |
| 2075 | KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); |
| 2076 | } |
| 2077 | |
| 2078 | if (qindex == BQUEUE_CLEAN) { |
| 2079 | if (bp->b_flags & B_VMIO) |
| 2080 | vfs_vmio_release(bp); |
| 2081 | if (bp->b_vp) |
| 2082 | brelvp(bp); |
| 2083 | } |
| 2084 | |
| 2085 | /* |
| 2086 | * NOTE: nbp is now entirely invalid. We can only restart |
| 2087 | * the scan from this point on. |
| 2088 | * |
| 2089 | * Get the rest of the buffer freed up. b_kva* is still |
| 2090 | * valid after this operation. |
| 2091 | */ |
| 2092 | KASSERT(bp->b_vp == NULL, |
| 2093 | ("bp3 %p flags %08x vnode %p qindex %d " |
| 2094 | "unexpectededly still associated!", |
| 2095 | bp, bp->b_flags, bp->b_vp, qindex)); |
| 2096 | KKASSERT((bp->b_flags & B_HASHED) == 0); |
| 2097 | |
| 2098 | /* |
| 2099 | * critical section protection is not required when |
| 2100 | * scrapping a buffer's contents because it is already |
| 2101 | * wired. |
| 2102 | */ |
| 2103 | if (bp->b_bufsize) |
| 2104 | allocbuf(bp, 0); |
| 2105 | |
| 2106 | bp->b_flags = B_BNOCLIP; |
| 2107 | bp->b_cmd = BUF_CMD_DONE; |
| 2108 | bp->b_vp = NULL; |
| 2109 | bp->b_error = 0; |
| 2110 | bp->b_resid = 0; |
| 2111 | bp->b_bcount = 0; |
| 2112 | bp->b_xio.xio_npages = 0; |
| 2113 | bp->b_dirtyoff = bp->b_dirtyend = 0; |
| 2114 | bp->b_act_count = ACT_INIT; |
| 2115 | reinitbufbio(bp); |
| 2116 | KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); |
| 2117 | buf_dep_init(bp); |
| 2118 | if (blkflags & GETBLK_BHEAVY) |
| 2119 | bp->b_flags |= B_HEAVY; |
| 2120 | |
| 2121 | /* |
| 2122 | * If we are defragging then free the buffer. |
| 2123 | */ |
| 2124 | if (defrag) { |
| 2125 | bp->b_flags |= B_INVAL; |
| 2126 | bfreekva(bp); |
| 2127 | brelse(bp); |
| 2128 | defrag = 0; |
| 2129 | goto restart; |
| 2130 | } |
| 2131 | |
| 2132 | /* |
| 2133 | * If we are overcomitted then recover the buffer and its |
| 2134 | * KVM space. This occurs in rare situations when multiple |
| 2135 | * processes are blocked in getnewbuf() or allocbuf(). |
| 2136 | */ |
| 2137 | if (bufspace >= hibufspace) |
| 2138 | flushingbufs = 1; |
| 2139 | if (flushingbufs && bp->b_kvasize != 0) { |
| 2140 | bp->b_flags |= B_INVAL; |
| 2141 | bfreekva(bp); |
| 2142 | brelse(bp); |
| 2143 | goto restart; |
| 2144 | } |
| 2145 | if (bufspace < lobufspace) |
| 2146 | flushingbufs = 0; |
| 2147 | |
| 2148 | /* |
| 2149 | * b_refs can transition to a non-zero value while we hold |
| 2150 | * the buffer locked due to a findblk(). Our brelvp() above |
| 2151 | * interlocked any future possible transitions due to |
| 2152 | * findblk()s. |
| 2153 | * |
| 2154 | * If we find b_refs to be non-zero we can destroy the |
| 2155 | * buffer's contents but we cannot yet reuse the buffer. |
| 2156 | */ |
| 2157 | if (bp->b_refs) { |
| 2158 | bp->b_flags |= B_INVAL; |
| 2159 | bfreekva(bp); |
| 2160 | brelse(bp); |
| 2161 | goto restart; |
| 2162 | } |
| 2163 | break; |
| 2164 | /* NOT REACHED, bufqspin not held */ |
| 2165 | } |
| 2166 | |
| 2167 | /* |
| 2168 | * If we exhausted our list, sleep as appropriate. We may have to |
| 2169 | * wakeup various daemons and write out some dirty buffers. |
| 2170 | * |
| 2171 | * Generally we are sleeping due to insufficient buffer space. |
| 2172 | * |
| 2173 | * NOTE: bufqspin is held if bp is NULL, else it is not held. |
| 2174 | */ |
| 2175 | if (bp == NULL) { |
| 2176 | int flags; |
| 2177 | char *waitmsg; |
| 2178 | |
| 2179 | spin_unlock(&bufqspin); |
| 2180 | if (defrag) { |
| 2181 | flags = VFS_BIO_NEED_BUFSPACE; |
| 2182 | waitmsg = "nbufkv"; |
| 2183 | } else if (bufspace >= hibufspace) { |
| 2184 | waitmsg = "nbufbs"; |
| 2185 | flags = VFS_BIO_NEED_BUFSPACE; |
| 2186 | } else { |
| 2187 | waitmsg = "newbuf"; |
| 2188 | flags = VFS_BIO_NEED_ANY; |
| 2189 | } |
| 2190 | |
| 2191 | bd_speedup(); /* heeeelp */ |
| 2192 | spin_lock(&bufcspin); |
| 2193 | needsbuffer |= flags; |
| 2194 | while (needsbuffer & flags) { |
| 2195 | if (ssleep(&needsbuffer, &bufcspin, |
| 2196 | slpflags, waitmsg, slptimeo)) { |
| 2197 | spin_unlock(&bufcspin); |
| 2198 | return (NULL); |
| 2199 | } |
| 2200 | } |
| 2201 | spin_unlock(&bufcspin); |
| 2202 | } else { |
| 2203 | /* |
| 2204 | * We finally have a valid bp. We aren't quite out of the |
| 2205 | * woods, we still have to reserve kva space. In order |
| 2206 | * to keep fragmentation sane we only allocate kva in |
| 2207 | * BKVASIZE chunks. |
| 2208 | * |
| 2209 | * (bufqspin is not held) |
| 2210 | */ |
| 2211 | maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; |
| 2212 | |
| 2213 | if (maxsize != bp->b_kvasize) { |
| 2214 | vm_offset_t addr = 0; |
| 2215 | int count; |
| 2216 | |
| 2217 | bfreekva(bp); |
| 2218 | |
| 2219 | count = vm_map_entry_reserve(MAP_RESERVE_COUNT); |
| 2220 | vm_map_lock(&buffer_map); |
| 2221 | |
| 2222 | if (vm_map_findspace(&buffer_map, |
| 2223 | vm_map_min(&buffer_map), maxsize, |
| 2224 | maxsize, 0, &addr)) { |
| 2225 | /* |
| 2226 | * Uh oh. Buffer map is too fragmented. We |
| 2227 | * must defragment the map. |
| 2228 | */ |
| 2229 | vm_map_unlock(&buffer_map); |
| 2230 | vm_map_entry_release(count); |
| 2231 | ++bufdefragcnt; |
| 2232 | defrag = 1; |
| 2233 | bp->b_flags |= B_INVAL; |
| 2234 | brelse(bp); |
| 2235 | goto restart; |
| 2236 | } |
| 2237 | if (addr) { |
| 2238 | vm_map_insert(&buffer_map, &count, |
| 2239 | NULL, 0, |
| 2240 | addr, addr + maxsize, |
| 2241 | VM_MAPTYPE_NORMAL, |
| 2242 | VM_PROT_ALL, VM_PROT_ALL, |
| 2243 | MAP_NOFAULT); |
| 2244 | |
| 2245 | bp->b_kvabase = (caddr_t) addr; |
| 2246 | bp->b_kvasize = maxsize; |
| 2247 | bufspace += bp->b_kvasize; |
| 2248 | ++bufreusecnt; |
| 2249 | } |
| 2250 | vm_map_unlock(&buffer_map); |
| 2251 | vm_map_entry_release(count); |
| 2252 | } |
| 2253 | bp->b_data = bp->b_kvabase; |
| 2254 | } |
| 2255 | return(bp); |
| 2256 | } |
| 2257 | |
| 2258 | #if 0 |
| 2259 | /* |
| 2260 | * This routine is called in an emergency to recover VM pages from the |
| 2261 | * buffer cache by cashing in clean buffers. The idea is to recover |
| 2262 | * enough pages to be able to satisfy a stuck bio_page_alloc(). |
| 2263 | * |
| 2264 | * XXX Currently not implemented. This function can wind up deadlocking |
| 2265 | * against another thread holding one or more of the backing pages busy. |
| 2266 | */ |
| 2267 | static int |
| 2268 | recoverbufpages(void) |
| 2269 | { |
| 2270 | struct buf *bp; |
| 2271 | int bytes = 0; |
| 2272 | |
| 2273 | ++recoverbufcalls; |
| 2274 | |
| 2275 | spin_lock(&bufqspin); |
| 2276 | while (bytes < MAXBSIZE) { |
| 2277 | bp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); |
| 2278 | if (bp == NULL) |
| 2279 | break; |
| 2280 | |
| 2281 | /* |
| 2282 | * BQUEUE_CLEAN - B_AGE special case. If not set the bp |
| 2283 | * cycles through the queue twice before being selected. |
| 2284 | */ |
| 2285 | if ((bp->b_flags & B_AGE) == 0 && TAILQ_NEXT(bp, b_freelist)) { |
| 2286 | bp->b_flags |= B_AGE; |
| 2287 | TAILQ_REMOVE(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); |
| 2288 | TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], |
| 2289 | bp, b_freelist); |
| 2290 | continue; |
| 2291 | } |
| 2292 | |
| 2293 | /* |
| 2294 | * Sanity Checks |
| 2295 | */ |
| 2296 | KKASSERT(bp->b_qindex == BQUEUE_CLEAN); |
| 2297 | KKASSERT((bp->b_flags & B_DELWRI) == 0); |
| 2298 | |
| 2299 | /* |
| 2300 | * Start freeing the bp. This is somewhat involved. |
| 2301 | * |
| 2302 | * Buffers on the clean list must be disassociated from |
| 2303 | * their current vnode |
| 2304 | */ |
| 2305 | |
| 2306 | if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { |
| 2307 | kprintf("recoverbufpages: warning, locked buf %p, " |
| 2308 | "race corrected\n", |
| 2309 | bp); |
| 2310 | ssleep(&bd_request, &bufqspin, 0, "gnbxxx", hz / 100); |
| 2311 | continue; |
| 2312 | } |
| 2313 | if (bp->b_qindex != BQUEUE_CLEAN) { |
| 2314 | kprintf("recoverbufpages: warning, BUF_LOCK blocked " |
| 2315 | "unexpectedly on buf %p index %d, race " |
| 2316 | "corrected\n", |
| 2317 | bp, bp->b_qindex); |
| 2318 | BUF_UNLOCK(bp); |
| 2319 | continue; |
| 2320 | } |
| 2321 | bremfree_locked(bp); |
| 2322 | spin_unlock(&bufqspin); |
| 2323 | |
| 2324 | /* |
| 2325 | * Sanity check. Only BQUEUE_DIRTY[_HW] employs markers. |
| 2326 | */ |
| 2327 | KKASSERT((bp->b_flags & B_MARKER) == 0); |
| 2328 | |
| 2329 | /* |
| 2330 | * Dependancies must be handled before we disassociate the |
| 2331 | * vnode. |
| 2332 | * |
| 2333 | * NOTE: HAMMER will set B_LOCKED if the buffer cannot |
| 2334 | * be immediately disassociated. HAMMER then becomes |
| 2335 | * responsible for releasing the buffer. |
| 2336 | */ |
| 2337 | if (LIST_FIRST(&bp->b_dep) != NULL) { |
| 2338 | buf_deallocate(bp); |
| 2339 | if (bp->b_flags & B_LOCKED) { |
| 2340 | bqrelse(bp); |
| 2341 | spin_lock(&bufqspin); |
| 2342 | continue; |
| 2343 | } |
| 2344 | KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); |
| 2345 | } |
| 2346 | |
| 2347 | bytes += bp->b_bufsize; |
| 2348 | |
| 2349 | if (bp->b_flags & B_VMIO) { |
| 2350 | bp->b_flags |= B_DIRECT; /* try to free pages */ |
| 2351 | vfs_vmio_release(bp); |
| 2352 | } |
| 2353 | if (bp->b_vp) |
| 2354 | brelvp(bp); |
| 2355 | |
| 2356 | KKASSERT(bp->b_vp == NULL); |
| 2357 | KKASSERT((bp->b_flags & B_HASHED) == 0); |
| 2358 | |
| 2359 | /* |
| 2360 | * critical section protection is not required when |
| 2361 | * scrapping a buffer's contents because it is already |
| 2362 | * wired. |
| 2363 | */ |
| 2364 | if (bp->b_bufsize) |
| 2365 | allocbuf(bp, 0); |
| 2366 | |
| 2367 | bp->b_flags = B_BNOCLIP; |
| 2368 | bp->b_cmd = BUF_CMD_DONE; |
| 2369 | bp->b_vp = NULL; |
| 2370 | bp->b_error = 0; |
| 2371 | bp->b_resid = 0; |
| 2372 | bp->b_bcount = 0; |
| 2373 | bp->b_xio.xio_npages = 0; |
| 2374 | bp->b_dirtyoff = bp->b_dirtyend = 0; |
| 2375 | reinitbufbio(bp); |
| 2376 | KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); |
| 2377 | buf_dep_init(bp); |
| 2378 | bp->b_flags |= B_INVAL; |
| 2379 | /* bfreekva(bp); */ |
| 2380 | brelse(bp); |
| 2381 | spin_lock(&bufqspin); |
| 2382 | } |
| 2383 | spin_unlock(&bufqspin); |
| 2384 | return(bytes); |
| 2385 | } |
| 2386 | #endif |
| 2387 | |
| 2388 | /* |
| 2389 | * buf_daemon: |
| 2390 | * |
| 2391 | * Buffer flushing daemon. Buffers are normally flushed by the |
| 2392 | * update daemon but if it cannot keep up this process starts to |
| 2393 | * take the load in an attempt to prevent getnewbuf() from blocking. |
| 2394 | * |
| 2395 | * Once a flush is initiated it does not stop until the number |
| 2396 | * of buffers falls below lodirtybuffers, but we will wake up anyone |
| 2397 | * waiting at the mid-point. |
| 2398 | */ |
| 2399 | static struct kproc_desc buf_kp = { |
| 2400 | "bufdaemon", |
| 2401 | buf_daemon, |
| 2402 | &bufdaemon_td |
| 2403 | }; |
| 2404 | SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, |
| 2405 | kproc_start, &buf_kp) |
| 2406 | |
| 2407 | static struct kproc_desc bufhw_kp = { |
| 2408 | "bufdaemon_hw", |
| 2409 | buf_daemon_hw, |
| 2410 | &bufdaemonhw_td |
| 2411 | }; |
| 2412 | SYSINIT(bufdaemon_hw, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, |
| 2413 | kproc_start, &bufhw_kp) |
| 2414 | |
| 2415 | /* |
| 2416 | * MPSAFE thread |
| 2417 | */ |
| 2418 | static void |
| 2419 | buf_daemon1(struct thread *td, int queue, int (*buf_limit_fn)(long), |
| 2420 | int *bd_req) |
| 2421 | { |
| 2422 | long limit; |
| 2423 | struct buf *marker; |
| 2424 | |
| 2425 | marker = kmalloc(sizeof(*marker), M_BIOBUF, M_WAITOK | M_ZERO); |
| 2426 | marker->b_flags |= B_MARKER; |
| 2427 | marker->b_qindex = BQUEUE_NONE; |
| 2428 | |
| 2429 | /* |
| 2430 | * This process needs to be suspended prior to shutdown sync. |
| 2431 | */ |
| 2432 | EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, |
| 2433 | td, SHUTDOWN_PRI_LAST); |
| 2434 | curthread->td_flags |= TDF_SYSTHREAD; |
| 2435 | |
| 2436 | /* |
| 2437 | * This process is allowed to take the buffer cache to the limit |
| 2438 | */ |
| 2439 | for (;;) { |
| 2440 | kproc_suspend_loop(); |
| 2441 | |
| 2442 | /* |
| 2443 | * Do the flush as long as the number of dirty buffers |
| 2444 | * (including those running) exceeds lodirtybufspace. |
| 2445 | * |
| 2446 | * When flushing limit running I/O to hirunningspace |
| 2447 | * Do the flush. Limit the amount of in-transit I/O we |
| 2448 | * allow to build up, otherwise we would completely saturate |
| 2449 | * the I/O system. Wakeup any waiting processes before we |
| 2450 | * normally would so they can run in parallel with our drain. |
| 2451 | * |
| 2452 | * Our aggregate normal+HW lo water mark is lodirtybufspace, |
| 2453 | * but because we split the operation into two threads we |
| 2454 | * have to cut it in half for each thread. |
| 2455 | */ |
| 2456 | waitrunningbufspace(); |
| 2457 | limit = lodirtybufspace / 2; |
| 2458 | while (buf_limit_fn(limit)) { |
| 2459 | if (flushbufqueues(marker, queue) == 0) |
| 2460 | break; |
| 2461 | if (runningbufspace < hirunningspace) |
| 2462 | continue; |
| 2463 | waitrunningbufspace(); |
| 2464 | } |
| 2465 | |
| 2466 | /* |
| 2467 | * We reached our low water mark, reset the |
| 2468 | * request and sleep until we are needed again. |
| 2469 | * The sleep is just so the suspend code works. |
| 2470 | */ |
| 2471 | spin_lock(&bufcspin); |
| 2472 | if (*bd_req == 0) |
| 2473 | ssleep(bd_req, &bufcspin, 0, "psleep", hz); |
| 2474 | *bd_req = 0; |
| 2475 | spin_unlock(&bufcspin); |
| 2476 | } |
| 2477 | /* NOT REACHED */ |
| 2478 | /*kfree(marker, M_BIOBUF);*/ |
| 2479 | } |
| 2480 | |
| 2481 | static int |
| 2482 | buf_daemon_limit(long limit) |
| 2483 | { |
| 2484 | return (runningbufspace + dirtybufspace > limit || |
| 2485 | dirtybufcount - dirtybufcounthw >= nbuf / 2); |
| 2486 | } |
| 2487 | |
| 2488 | static int |
| 2489 | buf_daemon_hw_limit(long limit) |
| 2490 | { |
| 2491 | return (runningbufspace + dirtybufspacehw > limit || |
| 2492 | dirtybufcounthw >= nbuf / 2); |
| 2493 | } |
| 2494 | |
| 2495 | static void |
| 2496 | buf_daemon(void) |
| 2497 | { |
| 2498 | buf_daemon1(bufdaemon_td, BQUEUE_DIRTY, buf_daemon_limit, |
| 2499 | &bd_request); |
| 2500 | } |
| 2501 | |
| 2502 | static void |
| 2503 | buf_daemon_hw(void) |
| 2504 | { |
| 2505 | buf_daemon1(bufdaemonhw_td, BQUEUE_DIRTY_HW, buf_daemon_hw_limit, |
| 2506 | &bd_request_hw); |
| 2507 | } |
| 2508 | |
| 2509 | /* |
| 2510 | * flushbufqueues: |
| 2511 | * |
| 2512 | * Try to flush a buffer in the dirty queue. We must be careful to |
| 2513 | * free up B_INVAL buffers instead of write them, which NFS is |
| 2514 | * particularly sensitive to. |
| 2515 | * |
| 2516 | * B_RELBUF may only be set by VFSs. We do set B_AGE to indicate |
| 2517 | * that we really want to try to get the buffer out and reuse it |
| 2518 | * due to the write load on the machine. |
| 2519 | * |
| 2520 | * We must lock the buffer in order to check its validity before we |
| 2521 | * can mess with its contents. bufqspin isn't enough. |
| 2522 | */ |
| 2523 | static int |
| 2524 | flushbufqueues(struct buf *marker, bufq_type_t q) |
| 2525 | { |
| 2526 | struct buf *bp; |
| 2527 | int r = 0; |
| 2528 | |
| 2529 | KKASSERT(marker->b_qindex == BQUEUE_NONE); |
| 2530 | KKASSERT(marker->b_flags & B_MARKER); |
| 2531 | |
| 2532 | /* |
| 2533 | * Spinlock needed to perform operations on the queue and may be |
| 2534 | * held through a non-blocking BUF_LOCK(), but cannot be held when |
| 2535 | * BUF_UNLOCK()ing or through any other major operation. |
| 2536 | */ |
| 2537 | spin_lock(&bufqspin); |
| 2538 | marker->b_qindex = q; |
| 2539 | TAILQ_INSERT_HEAD(&bufqueues[q], marker, b_freelist); |
| 2540 | bp = marker; |
| 2541 | |
| 2542 | while ((bp = TAILQ_NEXT(bp, b_freelist)) != NULL) { |
| 2543 | /* |
| 2544 | * NOTE: spinlock is always held at the top of the loop |
| 2545 | */ |
| 2546 | if (bp->b_flags & B_MARKER) |
| 2547 | continue; |
| 2548 | if ((bp->b_flags & B_DELWRI) == 0) { |
| 2549 | kprintf("Unexpected clean buffer %p\n", bp); |
| 2550 | continue; |
| 2551 | } |
| 2552 | if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) |
| 2553 | continue; |
| 2554 | KKASSERT(bp->b_qindex == q); |
| 2555 | |
| 2556 | /* |
| 2557 | * Once the buffer is locked we will have no choice but to |
| 2558 | * unlock the spinlock around a later BUF_UNLOCK and re-set |
| 2559 | * bp = marker when looping. Move the marker now to make |
| 2560 | * things easier. |
| 2561 | */ |
| 2562 | TAILQ_REMOVE(&bufqueues[q], marker, b_freelist); |
| 2563 | TAILQ_INSERT_AFTER(&bufqueues[q], bp, marker, b_freelist); |
| 2564 | |
| 2565 | /* |
| 2566 | * Must recheck B_DELWRI after successfully locking |
| 2567 | * the buffer. |
| 2568 | */ |
| 2569 | if ((bp->b_flags & B_DELWRI) == 0) { |
| 2570 | spin_unlock(&bufqspin); |
| 2571 | BUF_UNLOCK(bp); |
| 2572 | spin_lock(&bufqspin); |
| 2573 | bp = marker; |
| 2574 | continue; |
| 2575 | } |
| 2576 | |
| 2577 | /* |
| 2578 | * Remove the buffer from its queue. We still own the |
| 2579 | * spinlock here. |
| 2580 | */ |
| 2581 | _bremfree(bp); |
| 2582 | |
| 2583 | /* |
| 2584 | * Disposing of an invalid buffer counts as a flush op |
| 2585 | */ |
| 2586 | if (bp->b_flags & B_INVAL) { |
| 2587 | spin_unlock(&bufqspin); |
| 2588 | brelse(bp); |
| 2589 | spin_lock(&bufqspin); |
| 2590 | ++r; |
| 2591 | break; |
| 2592 | } |
| 2593 | |
| 2594 | /* |
| 2595 | * Release the spinlock for the more complex ops we |
| 2596 | * are now going to do. |
| 2597 | */ |
| 2598 | spin_unlock(&bufqspin); |
| 2599 | lwkt_yield(); |
| 2600 | |
| 2601 | /* |
| 2602 | * This is a bit messy |
| 2603 | */ |
| 2604 | if (LIST_FIRST(&bp->b_dep) != NULL && |
| 2605 | (bp->b_flags & B_DEFERRED) == 0 && |
| 2606 | buf_countdeps(bp, 0)) { |
| 2607 | spin_lock(&bufqspin); |
| 2608 | TAILQ_INSERT_TAIL(&bufqueues[q], bp, b_freelist); |
| 2609 | bp->b_qindex = q; |
| 2610 | bp->b_flags |= B_DEFERRED; |
| 2611 | spin_unlock(&bufqspin); |
| 2612 | BUF_UNLOCK(bp); |
| 2613 | spin_lock(&bufqspin); |
| 2614 | bp = marker; |
| 2615 | continue; |
| 2616 | } |
| 2617 | |
| 2618 | /* |
| 2619 | * spinlock not held here. |
| 2620 | * |
| 2621 | * If the buffer has a dependancy, buf_checkwrite() must |
| 2622 | * also return 0 for us to be able to initate the write. |
| 2623 | * |
| 2624 | * If the buffer is flagged B_ERROR it may be requeued |
| 2625 | * over and over again, we try to avoid a live lock. |
| 2626 | * |
| 2627 | * NOTE: buf_checkwrite is MPSAFE. |
| 2628 | */ |
| 2629 | bremfree(bp); |
| 2630 | if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { |
| 2631 | brelse(bp); |
| 2632 | } else if (bp->b_flags & B_ERROR) { |
| 2633 | tsleep(bp, 0, "bioer", 1); |
| 2634 | bp->b_flags &= ~B_AGE; |
| 2635 | cluster_awrite(bp); |
| 2636 | } else { |
| 2637 | bp->b_flags |= B_AGE; |
| 2638 | cluster_awrite(bp); |
| 2639 | } |
| 2640 | spin_lock(&bufqspin); |
| 2641 | ++r; |
| 2642 | break; |
| 2643 | } |
| 2644 | TAILQ_REMOVE(&bufqueues[q], marker, b_freelist); |
| 2645 | marker->b_qindex = BQUEUE_NONE; |
| 2646 | spin_unlock(&bufqspin); |
| 2647 | |
| 2648 | return (r); |
| 2649 | } |
| 2650 | |
| 2651 | /* |
| 2652 | * inmem: |
| 2653 | * |
| 2654 | * Returns true if no I/O is needed to access the associated VM object. |
| 2655 | * This is like findblk except it also hunts around in the VM system for |
| 2656 | * the data. |
| 2657 | * |
| 2658 | * Note that we ignore vm_page_free() races from interrupts against our |
| 2659 | * lookup, since if the caller is not protected our return value will not |
| 2660 | * be any more valid then otherwise once we exit the critical section. |
| 2661 | */ |
| 2662 | int |
| 2663 | inmem(struct vnode *vp, off_t loffset) |
| 2664 | { |
| 2665 | vm_object_t obj; |
| 2666 | vm_offset_t toff, tinc, size; |
| 2667 | vm_page_t m; |
| 2668 | int res = 1; |
| 2669 | |
| 2670 | if (findblk(vp, loffset, FINDBLK_TEST)) |
| 2671 | return 1; |
| 2672 | if (vp->v_mount == NULL) |
| 2673 | return 0; |
| 2674 | if ((obj = vp->v_object) == NULL) |
| 2675 | return 0; |
| 2676 | |
| 2677 | size = PAGE_SIZE; |
| 2678 | if (size > vp->v_mount->mnt_stat.f_iosize) |
| 2679 | size = vp->v_mount->mnt_stat.f_iosize; |
| 2680 | |
| 2681 | vm_object_hold(obj); |
| 2682 | for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { |
| 2683 | m = vm_page_lookup(obj, OFF_TO_IDX(loffset + toff)); |
| 2684 | if (m == NULL) { |
| 2685 | res = 0; |
| 2686 | break; |
| 2687 | } |
| 2688 | tinc = size; |
| 2689 | if (tinc > PAGE_SIZE - ((toff + loffset) & PAGE_MASK)) |
| 2690 | tinc = PAGE_SIZE - ((toff + loffset) & PAGE_MASK); |
| 2691 | if (vm_page_is_valid(m, |
| 2692 | (vm_offset_t) ((toff + loffset) & PAGE_MASK), tinc) == 0) { |
| 2693 | res = 0; |
| 2694 | break; |
| 2695 | } |
| 2696 | } |
| 2697 | vm_object_drop(obj); |
| 2698 | return (res); |
| 2699 | } |
| 2700 | |
| 2701 | /* |
| 2702 | * findblk: |
| 2703 | * |
| 2704 | * Locate and return the specified buffer. Unless flagged otherwise, |
| 2705 | * a locked buffer will be returned if it exists or NULL if it does not. |
| 2706 | * |
| 2707 | * findblk()'d buffers are still on the bufqueues and if you intend |
| 2708 | * to use your (locked NON-TEST) buffer you need to bremfree(bp) |
| 2709 | * and possibly do other stuff to it. |
| 2710 | * |
| 2711 | * FINDBLK_TEST - Do not lock the buffer. The caller is responsible |
| 2712 | * for locking the buffer and ensuring that it remains |
| 2713 | * the desired buffer after locking. |
| 2714 | * |
| 2715 | * FINDBLK_NBLOCK - Lock the buffer non-blocking. If we are unable |
| 2716 | * to acquire the lock we return NULL, even if the |
| 2717 | * buffer exists. |
| 2718 | * |
| 2719 | * FINDBLK_REF - Returns the buffer ref'd, which prevents normal |
| 2720 | * reuse by getnewbuf() but does not prevent |
| 2721 | * disassociation (B_INVAL). Used to avoid deadlocks |
| 2722 | * against random (vp,loffset)s due to reassignment. |
| 2723 | * |
| 2724 | * (0) - Lock the buffer blocking. |
| 2725 | * |
| 2726 | * MPSAFE |
| 2727 | */ |
| 2728 | struct buf * |
| 2729 | findblk(struct vnode *vp, off_t loffset, int flags) |
| 2730 | { |
| 2731 | struct buf *bp; |
| 2732 | int lkflags; |
| 2733 | |
| 2734 | lkflags = LK_EXCLUSIVE; |
| 2735 | if (flags & FINDBLK_NBLOCK) |
| 2736 | lkflags |= LK_NOWAIT; |
| 2737 | |
| 2738 | for (;;) { |
| 2739 | /* |
| 2740 | * Lookup. Ref the buf while holding v_token to prevent |
| 2741 | * reuse (but does not prevent diassociation). |
| 2742 | */ |
| 2743 | lwkt_gettoken_shared(&vp->v_token); |
| 2744 | bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset); |
| 2745 | if (bp == NULL) { |
| 2746 | lwkt_reltoken(&vp->v_token); |
| 2747 | return(NULL); |
| 2748 | } |
| 2749 | bqhold(bp); |
| 2750 | lwkt_reltoken(&vp->v_token); |
| 2751 | |
| 2752 | /* |
| 2753 | * If testing only break and return bp, do not lock. |
| 2754 | */ |
| 2755 | if (flags & FINDBLK_TEST) |
| 2756 | break; |
| 2757 | |
| 2758 | /* |
| 2759 | * Lock the buffer, return an error if the lock fails. |
| 2760 | * (only FINDBLK_NBLOCK can cause the lock to fail). |
| 2761 | */ |
| 2762 | if (BUF_LOCK(bp, lkflags)) { |
| 2763 | atomic_subtract_int(&bp->b_refs, 1); |
| 2764 | /* bp = NULL; not needed */ |
| 2765 | return(NULL); |
| 2766 | } |
| 2767 | |
| 2768 | /* |
| 2769 | * Revalidate the locked buf before allowing it to be |
| 2770 | * returned. |
| 2771 | */ |
| 2772 | if (bp->b_vp == vp && bp->b_loffset == loffset) |
| 2773 | break; |
| 2774 | atomic_subtract_int(&bp->b_refs, 1); |
| 2775 | BUF_UNLOCK(bp); |
| 2776 | } |
| 2777 | |
| 2778 | /* |
| 2779 | * Success |
| 2780 | */ |
| 2781 | if ((flags & FINDBLK_REF) == 0) |
| 2782 | atomic_subtract_int(&bp->b_refs, 1); |
| 2783 | return(bp); |
| 2784 | } |
| 2785 | |
| 2786 | /* |
| 2787 | * getcacheblk: |
| 2788 | * |
| 2789 | * Similar to getblk() except only returns the buffer if it is |
| 2790 | * B_CACHE and requires no other manipulation. Otherwise NULL |
| 2791 | * is returned. |
| 2792 | * |
| 2793 | * If B_RAM is set the buffer might be just fine, but we return |
| 2794 | * NULL anyway because we want the code to fall through to the |
| 2795 | * cluster read. Otherwise read-ahead breaks. |
| 2796 | * |
| 2797 | * If blksize is 0 the buffer cache buffer must already be fully |
| 2798 | * cached. |
| 2799 | * |
| 2800 | * If blksize is non-zero getblk() will be used, allowing a buffer |
| 2801 | * to be reinstantiated from its VM backing store. The buffer must |
| 2802 | * still be fully cached after reinstantiation to be returned. |
| 2803 | */ |
| 2804 | struct buf * |
| 2805 | getcacheblk(struct vnode *vp, off_t loffset, int blksize, int blkflags) |
| 2806 | { |
| 2807 | struct buf *bp; |
| 2808 | int fndflags = (blkflags & GETBLK_NOWAIT) ? FINDBLK_NBLOCK : 0; |
| 2809 | |
| 2810 | if (blksize) { |
| 2811 | bp = getblk(vp, loffset, blksize, blkflags, 0); |
| 2812 | if (bp) { |
| 2813 | if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == |
| 2814 | B_CACHE) { |
| 2815 | bp->b_flags &= ~B_AGE; |
| 2816 | } else { |
| 2817 | brelse(bp); |
| 2818 | bp = NULL; |
| 2819 | } |
| 2820 | } |
| 2821 | } else { |
| 2822 | bp = findblk(vp, loffset, fndflags); |
| 2823 | if (bp) { |
| 2824 | if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == |
| 2825 | B_CACHE) { |
| 2826 | bp->b_flags &= ~B_AGE; |
| 2827 | bremfree(bp); |
| 2828 | } else { |
| 2829 | BUF_UNLOCK(bp); |
| 2830 | bp = NULL; |
| 2831 | } |
| 2832 | } |
| 2833 | } |
| 2834 | return (bp); |
| 2835 | } |
| 2836 | |
| 2837 | /* |
| 2838 | * getblk: |
| 2839 | * |
| 2840 | * Get a block given a specified block and offset into a file/device. |
| 2841 | * B_INVAL may or may not be set on return. The caller should clear |
| 2842 | * B_INVAL prior to initiating a READ. |
| 2843 | * |
| 2844 | * IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE |
| 2845 | * IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ, |
| 2846 | * OR SET B_INVAL BEFORE RETIRING IT. If you retire a getblk'd buffer |
| 2847 | * without doing any of those things the system will likely believe |
| 2848 | * the buffer to be valid (especially if it is not B_VMIO), and the |
| 2849 | * next getblk() will return the buffer with B_CACHE set. |
| 2850 | * |
| 2851 | * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for |
| 2852 | * an existing buffer. |
| 2853 | * |
| 2854 | * For a VMIO buffer, B_CACHE is modified according to the backing VM. |
| 2855 | * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set |
| 2856 | * and then cleared based on the backing VM. If the previous buffer is |
| 2857 | * non-0-sized but invalid, B_CACHE will be cleared. |
| 2858 | * |
| 2859 | * If getblk() must create a new buffer, the new buffer is returned with |
| 2860 | * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which |
| 2861 | * case it is returned with B_INVAL clear and B_CACHE set based on the |
| 2862 | * backing VM. |
| 2863 | * |
| 2864 | * getblk() also forces a bwrite() for any B_DELWRI buffer whos |
| 2865 | * B_CACHE bit is clear. |
| 2866 | * |
| 2867 | * What this means, basically, is that the caller should use B_CACHE to |
| 2868 | * determine whether the buffer is fully valid or not and should clear |
| 2869 | * B_INVAL prior to issuing a read. If the caller intends to validate |
| 2870 | * the buffer by loading its data area with something, the caller needs |
| 2871 | * to clear B_INVAL. If the caller does this without issuing an I/O, |
| 2872 | * the caller should set B_CACHE ( as an optimization ), else the caller |
| 2873 | * should issue the I/O and biodone() will set B_CACHE if the I/O was |
| 2874 | * a write attempt or if it was a successfull read. If the caller |
| 2875 | * intends to issue a READ, the caller must clear B_INVAL and B_ERROR |
| 2876 | * prior to issuing the READ. biodone() will *not* clear B_INVAL. |
| 2877 | * |
| 2878 | * getblk flags: |
| 2879 | * |
| 2880 | * GETBLK_PCATCH - catch signal if blocked, can cause NULL return |
| 2881 | * GETBLK_BHEAVY - heavy-weight buffer cache buffer |
| 2882 | * |
| 2883 | * MPALMOSTSAFE |
| 2884 | */ |
| 2885 | struct buf * |
| 2886 | getblk(struct vnode *vp, off_t loffset, int size, int blkflags, int slptimeo) |
| 2887 | { |
| 2888 | struct buf *bp; |
| 2889 | int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; |
| 2890 | int error; |
| 2891 | int lkflags; |
| 2892 | |
| 2893 | if (size > MAXBSIZE) |
| 2894 | panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE); |
| 2895 | if (vp->v_object == NULL) |
| 2896 | panic("getblk: vnode %p has no object!", vp); |
| 2897 | |
| 2898 | loop: |
| 2899 | if ((bp = findblk(vp, loffset, FINDBLK_REF | FINDBLK_TEST)) != NULL) { |
| 2900 | /* |
| 2901 | * The buffer was found in the cache, but we need to lock it. |
| 2902 | * We must acquire a ref on the bp to prevent reuse, but |
| 2903 | * this will not prevent disassociation (brelvp()) so we |
| 2904 | * must recheck (vp,loffset) after acquiring the lock. |
| 2905 | * |
| 2906 | * Without the ref the buffer could potentially be reused |
| 2907 | * before we acquire the lock and create a deadlock |
| 2908 | * situation between the thread trying to reuse the buffer |
| 2909 | * and us due to the fact that we would wind up blocking |
| 2910 | * on a random (vp,loffset). |
| 2911 | */ |
| 2912 | if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { |
| 2913 | if (blkflags & GETBLK_NOWAIT) { |
| 2914 | bqdrop(bp); |
| 2915 | return(NULL); |
| 2916 | } |
| 2917 | lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; |
| 2918 | if (blkflags & GETBLK_PCATCH) |
| 2919 | lkflags |= LK_PCATCH; |
| 2920 | error = BUF_TIMELOCK(bp, lkflags, "getblk", slptimeo); |
| 2921 | if (error) { |
| 2922 | bqdrop(bp); |
| 2923 | if (error == ENOLCK) |
| 2924 | goto loop; |
| 2925 | return (NULL); |
| 2926 | } |
| 2927 | /* buffer may have changed on us */ |
| 2928 | } |
| 2929 | bqdrop(bp); |
| 2930 | |
| 2931 | /* |
| 2932 | * Once the buffer has been locked, make sure we didn't race |
| 2933 | * a buffer recyclement. Buffers that are no longer hashed |
| 2934 | * will have b_vp == NULL, so this takes care of that check |
| 2935 | * as well. |
| 2936 | */ |
| 2937 | if (bp->b_vp != vp || bp->b_loffset != loffset) { |
| 2938 | kprintf("Warning buffer %p (vp %p loffset %lld) " |
| 2939 | "was recycled\n", |
| 2940 | bp, vp, (long long)loffset); |
| 2941 | BUF_UNLOCK(bp); |
| 2942 | goto loop; |
| 2943 | } |
| 2944 | |
| 2945 | /* |
| 2946 | * If SZMATCH any pre-existing buffer must be of the requested |
| 2947 | * size or NULL is returned. The caller absolutely does not |
| 2948 | * want getblk() to bwrite() the buffer on a size mismatch. |
| 2949 | */ |
| 2950 | if ((blkflags & GETBLK_SZMATCH) && size != bp->b_bcount) { |
| 2951 | BUF_UNLOCK(bp); |
| 2952 | return(NULL); |
| 2953 | } |
| 2954 | |
| 2955 | /* |
| 2956 | * All vnode-based buffers must be backed by a VM object. |
| 2957 | */ |
| 2958 | KKASSERT(bp->b_flags & B_VMIO); |
| 2959 | KKASSERT(bp->b_cmd == BUF_CMD_DONE); |
| 2960 | bp->b_flags &= ~B_AGE; |
| 2961 | |
| 2962 | /* |
| 2963 | * Make sure that B_INVAL buffers do not have a cached |
| 2964 | * block number translation. |
| 2965 | */ |
| 2966 | if ((bp->b_flags & B_INVAL) && (bp->b_bio2.bio_offset != NOOFFSET)) { |
| 2967 | kprintf("Warning invalid buffer %p (vp %p loffset %lld)" |
| 2968 | " did not have cleared bio_offset cache\n", |
| 2969 | bp, vp, (long long)loffset); |
| 2970 | clearbiocache(&bp->b_bio2); |
| 2971 | } |
| 2972 | |
| 2973 | /* |
| 2974 | * The buffer is locked. B_CACHE is cleared if the buffer is |
| 2975 | * invalid. |
| 2976 | */ |
| 2977 | if (bp->b_flags & B_INVAL) |
| 2978 | bp->b_flags &= ~B_CACHE; |
| 2979 | bremfree(bp); |
| 2980 | |
| 2981 | /* |
| 2982 | * Any size inconsistancy with a dirty buffer or a buffer |
| 2983 | * with a softupdates dependancy must be resolved. Resizing |
| 2984 | * the buffer in such circumstances can lead to problems. |
| 2985 | * |
| 2986 | * Dirty or dependant buffers are written synchronously. |
| 2987 | * Other types of buffers are simply released and |
| 2988 | * reconstituted as they may be backed by valid, dirty VM |
| 2989 | * pages (but not marked B_DELWRI). |
| 2990 | * |
| 2991 | * NFS NOTE: NFS buffers which straddle EOF are oddly-sized |
| 2992 | * and may be left over from a prior truncation (and thus |
| 2993 | * no longer represent the actual EOF point), so we |
| 2994 | * definitely do not want to B_NOCACHE the backing store. |
| 2995 | */ |
| 2996 | if (size != bp->b_bcount) { |
| 2997 | if (bp->b_flags & B_DELWRI) { |
| 2998 | bp->b_flags |= B_RELBUF; |
| 2999 | bwrite(bp); |
| 3000 | } else if (LIST_FIRST(&bp->b_dep)) { |
| 3001 | bp->b_flags |= B_RELBUF; |
| 3002 | bwrite(bp); |
| 3003 | } else { |
| 3004 | bp->b_flags |= B_RELBUF; |
| 3005 | brelse(bp); |
| 3006 | } |
| 3007 | goto loop; |
| 3008 | } |
| 3009 | KKASSERT(size <= bp->b_kvasize); |
| 3010 | KASSERT(bp->b_loffset != NOOFFSET, |
| 3011 | ("getblk: no buffer offset")); |
| 3012 | |
| 3013 | /* |
| 3014 | * A buffer with B_DELWRI set and B_CACHE clear must |
| 3015 | * be committed before we can return the buffer in |
| 3016 | * order to prevent the caller from issuing a read |
| 3017 | * ( due to B_CACHE not being set ) and overwriting |
| 3018 | * it. |
| 3019 | * |
| 3020 | * Most callers, including NFS and FFS, need this to |
| 3021 | * operate properly either because they assume they |
| 3022 | * can issue a read if B_CACHE is not set, or because |
| 3023 | * ( for example ) an uncached B_DELWRI might loop due |
| 3024 | * to softupdates re-dirtying the buffer. In the latter |
| 3025 | * case, B_CACHE is set after the first write completes, |
| 3026 | * preventing further loops. |
| 3027 | * |
| 3028 | * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE |
| 3029 | * above while extending the buffer, we cannot allow the |
| 3030 | * buffer to remain with B_CACHE set after the write |
| 3031 | * completes or it will represent a corrupt state. To |
| 3032 | * deal with this we set B_NOCACHE to scrap the buffer |
| 3033 | * after the write. |
| 3034 | * |
| 3035 | * XXX Should this be B_RELBUF instead of B_NOCACHE? |
| 3036 | * I'm not even sure this state is still possible |
| 3037 | * now that getblk() writes out any dirty buffers |
| 3038 | * on size changes. |
| 3039 | * |
| 3040 | * We might be able to do something fancy, like setting |
| 3041 | * B_CACHE in bwrite() except if B_DELWRI is already set, |
| 3042 | * so the below call doesn't set B_CACHE, but that gets real |
| 3043 | * confusing. This is much easier. |
| 3044 | */ |
| 3045 | |
| 3046 | if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { |
| 3047 | kprintf("getblk: Warning, bp %p loff=%jx DELWRI set " |
| 3048 | "and CACHE clear, b_flags %08x\n", |
| 3049 | bp, (intmax_t)bp->b_loffset, bp->b_flags); |
| 3050 | bp->b_flags |= B_NOCACHE; |
| 3051 | bwrite(bp); |
| 3052 | goto loop; |
| 3053 | } |
| 3054 | } else { |
| 3055 | /* |
| 3056 | * Buffer is not in-core, create new buffer. The buffer |
| 3057 | * returned by getnewbuf() is locked. Note that the returned |
| 3058 | * buffer is also considered valid (not marked B_INVAL). |
| 3059 | * |
| 3060 | * Calculating the offset for the I/O requires figuring out |
| 3061 | * the block size. We use DEV_BSIZE for VBLK or VCHR and |
| 3062 | * the mount's f_iosize otherwise. If the vnode does not |
| 3063 | * have an associated mount we assume that the passed size is |
| 3064 | * the block size. |
| 3065 | * |
| 3066 | * Note that vn_isdisk() cannot be used here since it may |
| 3067 | * return a failure for numerous reasons. Note that the |
| 3068 | * buffer size may be larger then the block size (the caller |
| 3069 | * will use block numbers with the proper multiple). Beware |
| 3070 | * of using any v_* fields which are part of unions. In |
| 3071 | * particular, in DragonFly the mount point overloading |
| 3072 | * mechanism uses the namecache only and the underlying |
| 3073 | * directory vnode is not a special case. |
| 3074 | */ |
| 3075 | int bsize, maxsize; |
| 3076 | |
| 3077 | if (vp->v_type == VBLK || vp->v_type == VCHR) |
| 3078 | bsize = DEV_BSIZE; |
| 3079 | else if (vp->v_mount) |
| 3080 | bsize = vp->v_mount->mnt_stat.f_iosize; |
| 3081 | else |
| 3082 | bsize = size; |
| 3083 | |
| 3084 | maxsize = size + (loffset & PAGE_MASK); |
| 3085 | maxsize = imax(maxsize, bsize); |
| 3086 | |
| 3087 | bp = getnewbuf(blkflags, slptimeo, size, maxsize); |
| 3088 | if (bp == NULL) { |
| 3089 | if (slpflags || slptimeo) |
| 3090 | return NULL; |
| 3091 | goto loop; |
| 3092 | } |
| 3093 | |
| 3094 | /* |
| 3095 | * Atomically insert the buffer into the hash, so that it can |
| 3096 | * be found by findblk(). |
| 3097 | * |
| 3098 | * If bgetvp() returns non-zero a collision occured, and the |
| 3099 | * bp will not be associated with the vnode. |
| 3100 | * |
| 3101 | * Make sure the translation layer has been cleared. |
| 3102 | */ |
| 3103 | bp->b_loffset = loffset; |
| 3104 | bp->b_bio2.bio_offset = NOOFFSET; |
| 3105 | /* bp->b_bio2.bio_next = NULL; */ |
| 3106 | |
| 3107 | if (bgetvp(vp, bp, size)) { |
| 3108 | bp->b_flags |= B_INVAL; |
| 3109 | brelse(bp); |
| 3110 | goto loop; |
| 3111 | } |
| 3112 | |
| 3113 | /* |
| 3114 | * All vnode-based buffers must be backed by a VM object. |
| 3115 | */ |
| 3116 | KKASSERT(vp->v_object != NULL); |
| 3117 | bp->b_flags |= B_VMIO; |
| 3118 | KKASSERT(bp->b_cmd == BUF_CMD_DONE); |
| 3119 | |
| 3120 | allocbuf(bp, size); |
| 3121 | } |
| 3122 | KKASSERT(dsched_is_clear_buf_priv(bp)); |
| 3123 | return (bp); |
| 3124 | } |
| 3125 | |
| 3126 | /* |
| 3127 | * regetblk(bp) |
| 3128 | * |
| 3129 | * Reacquire a buffer that was previously released to the locked queue, |
| 3130 | * or reacquire a buffer which is interlocked by having bioops->io_deallocate |
| 3131 | * set B_LOCKED (which handles the acquisition race). |
| 3132 | * |
| 3133 | * To this end, either B_LOCKED must be set or the dependancy list must be |
| 3134 | * non-empty. |
| 3135 | * |
| 3136 | * MPSAFE |
| 3137 | */ |
| 3138 | void |
| 3139 | regetblk(struct buf *bp) |
| 3140 | { |
| 3141 | KKASSERT((bp->b_flags & B_LOCKED) || LIST_FIRST(&bp->b_dep) != NULL); |
| 3142 | BUF_LOCK(bp, LK_EXCLUSIVE | LK_RETRY); |
| 3143 | bremfree(bp); |
| 3144 | } |
| 3145 | |
| 3146 | /* |
| 3147 | * geteblk: |
| 3148 | * |
| 3149 | * Get an empty, disassociated buffer of given size. The buffer is |
| 3150 | * initially set to B_INVAL. |
| 3151 | * |
| 3152 | * critical section protection is not required for the allocbuf() |
| 3153 | * call because races are impossible here. |
| 3154 | * |
| 3155 | * MPALMOSTSAFE |
| 3156 | */ |
| 3157 | struct buf * |
| 3158 | geteblk(int size) |
| 3159 | { |
| 3160 | struct buf *bp; |
| 3161 | int maxsize; |
| 3162 | |
| 3163 | maxsize = (size + BKVAMASK) & ~BKVAMASK; |
| 3164 | |
| 3165 | while ((bp = getnewbuf(0, 0, size, maxsize)) == NULL) |
| 3166 | ; |
| 3167 | allocbuf(bp, size); |
| 3168 | bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ |
| 3169 | KKASSERT(dsched_is_clear_buf_priv(bp)); |
| 3170 | return (bp); |
| 3171 | } |
| 3172 | |
| 3173 | |
| 3174 | /* |
| 3175 | * allocbuf: |
| 3176 | * |
| 3177 | * This code constitutes the buffer memory from either anonymous system |
| 3178 | * memory (in the case of non-VMIO operations) or from an associated |
| 3179 | * VM object (in the case of VMIO operations). This code is able to |
| 3180 | * resize a buffer up or down. |
| 3181 | * |
| 3182 | * Note that this code is tricky, and has many complications to resolve |
| 3183 | * deadlock or inconsistant data situations. Tread lightly!!! |
| 3184 | * There are B_CACHE and B_DELWRI interactions that must be dealt with by |
| 3185 | * the caller. Calling this code willy nilly can result in the loss of |
| 3186 | * data. |
| 3187 | * |
| 3188 | * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with |
| 3189 | * B_CACHE for the non-VMIO case. |
| 3190 | * |
| 3191 | * This routine does not need to be called from a critical section but you |
| 3192 | * must own the buffer. |
| 3193 | * |
| 3194 | * MPSAFE |
| 3195 | */ |
| 3196 | int |
| 3197 | allocbuf(struct buf *bp, int size) |
| 3198 | { |
| 3199 | int newbsize, mbsize; |
| 3200 | int i; |
| 3201 | |
| 3202 | if (BUF_REFCNT(bp) == 0) |
| 3203 | panic("allocbuf: buffer not busy"); |
| 3204 | |
| 3205 | if (bp->b_kvasize < size) |
| 3206 | panic("allocbuf: buffer too small"); |
| 3207 | |
| 3208 | if ((bp->b_flags & B_VMIO) == 0) { |
| 3209 | caddr_t origbuf; |
| 3210 | int origbufsize; |
| 3211 | /* |
| 3212 | * Just get anonymous memory from the kernel. Don't |
| 3213 | * mess with B_CACHE. |
| 3214 | */ |
| 3215 | mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); |
| 3216 | if (bp->b_flags & B_MALLOC) |
| 3217 | newbsize = mbsize; |
| 3218 | else |
| 3219 | newbsize = round_page(size); |
| 3220 | |
| 3221 | if (newbsize < bp->b_bufsize) { |
| 3222 | /* |
| 3223 | * Malloced buffers are not shrunk |
| 3224 | */ |
| 3225 | if (bp->b_flags & B_MALLOC) { |
| 3226 | if (newbsize) { |
| 3227 | bp->b_bcount = size; |
| 3228 | } else { |
| 3229 | kfree(bp->b_data, M_BIOBUF); |
| 3230 | if (bp->b_bufsize) { |
| 3231 | atomic_subtract_long(&bufmallocspace, bp->b_bufsize); |
| 3232 | bufspacewakeup(); |
| 3233 | bp->b_bufsize = 0; |
| 3234 | } |
| 3235 | bp->b_data = bp->b_kvabase; |
| 3236 | bp->b_bcount = 0; |
| 3237 | bp->b_flags &= ~B_MALLOC; |
| 3238 | } |
| 3239 | return 1; |
| 3240 | } |
| 3241 | vm_hold_free_pages( |
| 3242 | bp, |
| 3243 | (vm_offset_t) bp->b_data + newbsize, |
| 3244 | (vm_offset_t) bp->b_data + bp->b_bufsize); |
| 3245 | } else if (newbsize > bp->b_bufsize) { |
| 3246 | /* |
| 3247 | * We only use malloced memory on the first allocation. |
| 3248 | * and revert to page-allocated memory when the buffer |
| 3249 | * grows. |
| 3250 | */ |
| 3251 | if ((bufmallocspace < maxbufmallocspace) && |
| 3252 | (bp->b_bufsize == 0) && |
| 3253 | (mbsize <= PAGE_SIZE/2)) { |
| 3254 | |
| 3255 | bp->b_data = kmalloc(mbsize, M_BIOBUF, M_WAITOK); |
| 3256 | bp->b_bufsize = mbsize; |
| 3257 | bp->b_bcount = size; |
| 3258 | bp->b_flags |= B_MALLOC; |
| 3259 | atomic_add_long(&bufmallocspace, mbsize); |
| 3260 | return 1; |
| 3261 | } |
| 3262 | origbuf = NULL; |
| 3263 | origbufsize = 0; |
| 3264 | /* |
| 3265 | * If the buffer is growing on its other-than-first |
| 3266 | * allocation, then we revert to the page-allocation |
| 3267 | * scheme. |
| 3268 | */ |
| 3269 | if (bp->b_flags & B_MALLOC) { |
| 3270 | origbuf = bp->b_data; |
| 3271 | origbufsize = bp->b_bufsize; |
| 3272 | bp->b_data = bp->b_kvabase; |
| 3273 | if (bp->b_bufsize) { |
| 3274 | atomic_subtract_long(&bufmallocspace, |
| 3275 | bp->b_bufsize); |
| 3276 | bufspacewakeup(); |
| 3277 | bp->b_bufsize = 0; |
| 3278 | } |
| 3279 | bp->b_flags &= ~B_MALLOC; |
| 3280 | newbsize = round_page(newbsize); |
| 3281 | } |
| 3282 | vm_hold_load_pages( |
| 3283 | bp, |
| 3284 | (vm_offset_t) bp->b_data + bp->b_bufsize, |
| 3285 | (vm_offset_t) bp->b_data + newbsize); |
| 3286 | if (origbuf) { |
| 3287 | bcopy(origbuf, bp->b_data, origbufsize); |
| 3288 | kfree(origbuf, M_BIOBUF); |
| 3289 | } |
| 3290 | } |
| 3291 | } else { |
| 3292 | vm_page_t m; |
| 3293 | int desiredpages; |
| 3294 | |
| 3295 | newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); |
| 3296 | desiredpages = ((int)(bp->b_loffset & PAGE_MASK) + |
| 3297 | newbsize + PAGE_MASK) >> PAGE_SHIFT; |
| 3298 | KKASSERT(desiredpages <= XIO_INTERNAL_PAGES); |
| 3299 | |
| 3300 | if (bp->b_flags & B_MALLOC) |
| 3301 | panic("allocbuf: VMIO buffer can't be malloced"); |
| 3302 | /* |
| 3303 | * Set B_CACHE initially if buffer is 0 length or will become |
| 3304 | * 0-length. |
| 3305 | */ |
| 3306 | if (size == 0 || bp->b_bufsize == 0) |
| 3307 | bp->b_flags |= B_CACHE; |
| 3308 | |
| 3309 | if (newbsize < bp->b_bufsize) { |
| 3310 | /* |
| 3311 | * DEV_BSIZE aligned new buffer size is less then the |
| 3312 | * DEV_BSIZE aligned existing buffer size. Figure out |
| 3313 | * if we have to remove any pages. |
| 3314 | */ |
| 3315 | if (desiredpages < bp->b_xio.xio_npages) { |
| 3316 | for (i = desiredpages; i < bp->b_xio.xio_npages; i++) { |
| 3317 | /* |
| 3318 | * the page is not freed here -- it |
| 3319 | * is the responsibility of |
| 3320 | * vnode_pager_setsize |
| 3321 | */ |
| 3322 | m = bp->b_xio.xio_pages[i]; |
| 3323 | KASSERT(m != bogus_page, |
| 3324 | ("allocbuf: bogus page found")); |
| 3325 | vm_page_busy_wait(m, TRUE, "biodep"); |
| 3326 | bp->b_xio.xio_pages[i] = NULL; |
| 3327 | vm_page_unwire(m, 0); |
| 3328 | vm_page_wakeup(m); |
| 3329 | } |
| 3330 | pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + |
| 3331 | (desiredpages << PAGE_SHIFT), (bp->b_xio.xio_npages - desiredpages)); |
| 3332 | bp->b_xio.xio_npages = desiredpages; |
| 3333 | } |
| 3334 | } else if (size > bp->b_bcount) { |
| 3335 | /* |
| 3336 | * We are growing the buffer, possibly in a |
| 3337 | * byte-granular fashion. |
| 3338 | */ |
| 3339 | struct vnode *vp; |
| 3340 | vm_object_t obj; |
| 3341 | vm_offset_t toff; |
| 3342 | vm_offset_t tinc; |
| 3343 | |
| 3344 | /* |
| 3345 | * Step 1, bring in the VM pages from the object, |
| 3346 | * allocating them if necessary. We must clear |
| 3347 | * B_CACHE if these pages are not valid for the |
| 3348 | * range covered by the buffer. |
| 3349 | * |
| 3350 | * critical section protection is required to protect |
| 3351 | * against interrupts unbusying and freeing pages |
| 3352 | * between our vm_page_lookup() and our |
| 3353 | * busycheck/wiring call. |
| 3354 | */ |
| 3355 | vp = bp->b_vp; |
| 3356 | obj = vp->v_object; |
| 3357 | |
| 3358 | vm_object_hold(obj); |
| 3359 | while (bp->b_xio.xio_npages < desiredpages) { |
| 3360 | vm_page_t m; |
| 3361 | vm_pindex_t pi; |
| 3362 | int error; |
| 3363 | |
| 3364 | pi = OFF_TO_IDX(bp->b_loffset) + |
| 3365 | bp->b_xio.xio_npages; |
| 3366 | |
| 3367 | /* |
| 3368 | * Blocking on m->busy might lead to a |
| 3369 | * deadlock: |
| 3370 | * |
| 3371 | * vm_fault->getpages->cluster_read->allocbuf |
| 3372 | */ |
| 3373 | m = vm_page_lookup_busy_try(obj, pi, FALSE, |
| 3374 | &error); |
| 3375 | if (error) { |
| 3376 | vm_page_sleep_busy(m, FALSE, "pgtblk"); |
| 3377 | continue; |
| 3378 | } |
| 3379 | if (m == NULL) { |
| 3380 | /* |
| 3381 | * note: must allocate system pages |
| 3382 | * since blocking here could intefere |
| 3383 | * with paging I/O, no matter which |
| 3384 | * process we are. |
| 3385 | */ |
| 3386 | m = bio_page_alloc(obj, pi, desiredpages - bp->b_xio.xio_npages); |
| 3387 | if (m) { |
| 3388 | vm_page_wire(m); |
| 3389 | vm_page_flag_clear(m, PG_ZERO); |
| 3390 | vm_page_wakeup(m); |
| 3391 | bp->b_flags &= ~B_CACHE; |
| 3392 | bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; |
| 3393 | ++bp->b_xio.xio_npages; |
| 3394 | } |
| 3395 | continue; |
| 3396 | } |
| 3397 | |
| 3398 | /* |
| 3399 | * We found a page and were able to busy it. |
| 3400 | */ |
| 3401 | vm_page_flag_clear(m, PG_ZERO); |
| 3402 | vm_page_wire(m); |
| 3403 | vm_page_wakeup(m); |
| 3404 | bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; |
| 3405 | ++bp->b_xio.xio_npages; |
| 3406 | if (bp->b_act_count < m->act_count) |
| 3407 | bp->b_act_count = m->act_count; |
| 3408 | } |
| 3409 | vm_object_drop(obj); |
| 3410 | |
| 3411 | /* |
| 3412 | * Step 2. We've loaded the pages into the buffer, |
| 3413 | * we have to figure out if we can still have B_CACHE |
| 3414 | * set. Note that B_CACHE is set according to the |
| 3415 | * byte-granular range ( bcount and size ), not the |
| 3416 | * aligned range ( newbsize ). |
| 3417 | * |
| 3418 | * The VM test is against m->valid, which is DEV_BSIZE |
| 3419 | * aligned. Needless to say, the validity of the data |
| 3420 | * needs to also be DEV_BSIZE aligned. Note that this |
| 3421 | * fails with NFS if the server or some other client |
| 3422 | * extends the file's EOF. If our buffer is resized, |
| 3423 | * B_CACHE may remain set! XXX |
| 3424 | */ |
| 3425 | |
| 3426 | toff = bp->b_bcount; |
| 3427 | tinc = PAGE_SIZE - ((bp->b_loffset + toff) & PAGE_MASK); |
| 3428 | |
| 3429 | while ((bp->b_flags & B_CACHE) && toff < size) { |
| 3430 | vm_pindex_t pi; |
| 3431 | |
| 3432 | if (tinc > (size - toff)) |
| 3433 | tinc = size - toff; |
| 3434 | |
| 3435 | pi = ((bp->b_loffset & PAGE_MASK) + toff) >> |
| 3436 | PAGE_SHIFT; |
| 3437 | |
| 3438 | vfs_buf_test_cache( |
| 3439 | bp, |
| 3440 | bp->b_loffset, |
| 3441 | toff, |
| 3442 | tinc, |
| 3443 | bp->b_xio.xio_pages[pi] |
| 3444 | ); |
| 3445 | toff += tinc; |
| 3446 | tinc = PAGE_SIZE; |
| 3447 | } |
| 3448 | |
| 3449 | /* |
| 3450 | * Step 3, fixup the KVM pmap. Remember that |
| 3451 | * bp->b_data is relative to bp->b_loffset, but |
| 3452 | * bp->b_loffset may be offset into the first page. |
| 3453 | */ |
| 3454 | |
| 3455 | bp->b_data = (caddr_t) |
| 3456 | trunc_page((vm_offset_t)bp->b_data); |
| 3457 | pmap_qenter( |
| 3458 | (vm_offset_t)bp->b_data, |
| 3459 | bp->b_xio.xio_pages, |
| 3460 | bp->b_xio.xio_npages |
| 3461 | ); |
| 3462 | bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | |
| 3463 | (vm_offset_t)(bp->b_loffset & PAGE_MASK)); |
| 3464 | } |
| 3465 | } |
| 3466 | |
| 3467 | /* adjust space use on already-dirty buffer */ |
| 3468 | if (bp->b_flags & B_DELWRI) { |
| 3469 | spin_lock(&bufcspin); |
| 3470 | dirtybufspace += newbsize - bp->b_bufsize; |
| 3471 | if (bp->b_flags & B_HEAVY) |
| 3472 | dirtybufspacehw += newbsize - bp->b_bufsize; |
| 3473 | spin_unlock(&bufcspin); |
| 3474 | } |
| 3475 | if (newbsize < bp->b_bufsize) |
| 3476 | bufspacewakeup(); |
| 3477 | bp->b_bufsize = newbsize; /* actual buffer allocation */ |
| 3478 | bp->b_bcount = size; /* requested buffer size */ |
| 3479 | return 1; |
| 3480 | } |
| 3481 | |
| 3482 | /* |
| 3483 | * biowait: |
| 3484 | * |
| 3485 | * Wait for buffer I/O completion, returning error status. B_EINTR |
| 3486 | * is converted into an EINTR error but not cleared (since a chain |
| 3487 | * of biowait() calls may occur). |
| 3488 | * |
| 3489 | * On return bpdone() will have been called but the buffer will remain |
| 3490 | * locked and will not have been brelse()'d. |
| 3491 | * |
| 3492 | * NOTE! If a timeout is specified and ETIMEDOUT occurs the I/O is |
| 3493 | * likely still in progress on return. |
| 3494 | * |
| 3495 | * NOTE! This operation is on a BIO, not a BUF. |
| 3496 | * |
| 3497 | * NOTE! BIO_DONE is cleared by vn_strategy() |
| 3498 | * |
| 3499 | * MPSAFE |
| 3500 | */ |
| 3501 | static __inline int |
| 3502 | _biowait(struct bio *bio, const char *wmesg, int to) |
| 3503 | { |
| 3504 | struct buf *bp = bio->bio_buf; |
| 3505 | u_int32_t flags; |
| 3506 | u_int32_t nflags; |
| 3507 | int error; |
| 3508 | |
| 3509 | KKASSERT(bio == &bp->b_bio1); |
| 3510 | for (;;) { |
| 3511 | flags = bio->bio_flags; |
| 3512 | if (flags & BIO_DONE) |
| 3513 | break; |
| 3514 | nflags = flags | BIO_WANT; |
| 3515 | tsleep_interlock(bio, 0); |
| 3516 | if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { |
| 3517 | if (wmesg) |
| 3518 | error = tsleep(bio, PINTERLOCKED, wmesg, to); |
| 3519 | else if (bp->b_cmd == BUF_CMD_READ) |
| 3520 | error = tsleep(bio, PINTERLOCKED, "biord", to); |
| 3521 | else |
| 3522 | error = tsleep(bio, PINTERLOCKED, "biowr", to); |
| 3523 | if (error) { |
| 3524 | kprintf("tsleep error biowait %d\n", error); |
| 3525 | return (error); |
| 3526 | } |
| 3527 | } |
| 3528 | } |
| 3529 | |
| 3530 | /* |
| 3531 | * Finish up. |
| 3532 | */ |
| 3533 | KKASSERT(bp->b_cmd == BUF_CMD_DONE); |
| 3534 | bio->bio_flags &= ~(BIO_DONE | BIO_SYNC); |
| 3535 | if (bp->b_flags & B_EINTR) |
| 3536 | return (EINTR); |
| 3537 | if (bp->b_flags & B_ERROR) |
| 3538 | return (bp->b_error ? bp->b_error : EIO); |
| 3539 | return (0); |
| 3540 | } |
| 3541 | |
| 3542 | int |
| 3543 | biowait(struct bio *bio, const char *wmesg) |
| 3544 | { |
| 3545 | return(_biowait(bio, wmesg, 0)); |
| 3546 | } |
| 3547 | |
| 3548 | int |
| 3549 | biowait_timeout(struct bio *bio, const char *wmesg, int to) |
| 3550 | { |
| 3551 | return(_biowait(bio, wmesg, to)); |
| 3552 | } |
| 3553 | |
| 3554 | /* |
| 3555 | * This associates a tracking count with an I/O. vn_strategy() and |
| 3556 | * dev_dstrategy() do this automatically but there are a few cases |
| 3557 | * where a vnode or device layer is bypassed when a block translation |
| 3558 | * is cached. In such cases bio_start_transaction() may be called on |
| 3559 | * the bypassed layers so the system gets an I/O in progress indication |
| 3560 | * for those higher layers. |
| 3561 | */ |
| 3562 | void |
| 3563 | bio_start_transaction(struct bio *bio, struct bio_track *track) |
| 3564 | { |
| 3565 | bio->bio_track = track; |
| 3566 | if (dsched_is_clear_buf_priv(bio->bio_buf)) |
| 3567 | dsched_new_buf(bio->bio_buf); |
| 3568 | bio_track_ref(track); |
| 3569 | } |
| 3570 | |
| 3571 | /* |
| 3572 | * Initiate I/O on a vnode. |
| 3573 | * |
| 3574 | * SWAPCACHE OPERATION: |
| 3575 | * |
| 3576 | * Real buffer cache buffers have a non-NULL bp->b_vp. Unfortunately |
| 3577 | * devfs also uses b_vp for fake buffers so we also have to check |
| 3578 | * that B_PAGING is 0. In this case the passed 'vp' is probably the |
| 3579 | * underlying block device. The swap assignments are related to the |
| 3580 | * buffer cache buffer's b_vp, not the passed vp. |
| 3581 | * |
| 3582 | * The passed vp == bp->b_vp only in the case where the strategy call |
| 3583 | * is made on the vp itself for its own buffers (a regular file or |
| 3584 | * block device vp). The filesystem usually then re-calls vn_strategy() |
| 3585 | * after translating the request to an underlying device. |
| 3586 | * |
| 3587 | * Cluster buffers set B_CLUSTER and the passed vp is the vp of the |
| 3588 | * underlying buffer cache buffers. |
| 3589 | * |
| 3590 | * We can only deal with page-aligned buffers at the moment, because |
| 3591 | * we can't tell what the real dirty state for pages straddling a buffer |
| 3592 | * are. |
| 3593 | * |
| 3594 | * In order to call swap_pager_strategy() we must provide the VM object |
| 3595 | * and base offset for the underlying buffer cache pages so it can find |
| 3596 | * the swap blocks. |
| 3597 | */ |
| 3598 | void |
| 3599 | vn_strategy(struct vnode *vp, struct bio *bio) |
| 3600 | { |
| 3601 | struct bio_track *track; |
| 3602 | struct buf *bp = bio->bio_buf; |
| 3603 | |
| 3604 | KKASSERT(bp->b_cmd != BUF_CMD_DONE); |
| 3605 | |
| 3606 | /* |
| 3607 | * Set when an I/O is issued on the bp. Cleared by consumers |
| 3608 | * (aka HAMMER), allowing the consumer to determine if I/O had |
| 3609 | * actually occurred. |
| 3610 | */ |
| 3611 | bp->b_flags |= B_IODEBUG; |
| 3612 | |
| 3613 | /* |
| 3614 | * Handle the swap cache intercept. |
| 3615 | */ |
| 3616 | if (vn_cache_strategy(vp, bio)) |
| 3617 | return; |
| 3618 | |
| 3619 | /* |
| 3620 | * Otherwise do the operation through the filesystem |
| 3621 | */ |
| 3622 | if (bp->b_cmd == BUF_CMD_READ) |
| 3623 | track = &vp->v_track_read; |
| 3624 | else |
| 3625 | track = &vp->v_track_write; |
| 3626 | KKASSERT((bio->bio_flags & BIO_DONE) == 0); |
| 3627 | bio->bio_track = track; |
| 3628 | if (dsched_is_clear_buf_priv(bio->bio_buf)) |
| 3629 | dsched_new_buf(bio->bio_buf); |
| 3630 | bio_track_ref(track); |
| 3631 | vop_strategy(*vp->v_ops, vp, bio); |
| 3632 | } |
| 3633 | |
| 3634 | static void vn_cache_strategy_callback(struct bio *bio); |
| 3635 | |
| 3636 | int |
| 3637 | vn_cache_strategy(struct vnode *vp, struct bio *bio) |
| 3638 | { |
| 3639 | struct buf *bp = bio->bio_buf; |
| 3640 | struct bio *nbio; |
| 3641 | vm_object_t object; |
| 3642 | vm_page_t m; |
| 3643 | int i; |
| 3644 | |
| 3645 | /* |
| 3646 | * Is this buffer cache buffer suitable for reading from |
| 3647 | * the swap cache? |
| 3648 | */ |
| 3649 | if (vm_swapcache_read_enable == 0 || |
| 3650 | bp->b_cmd != BUF_CMD_READ || |
| 3651 | ((bp->b_flags & B_CLUSTER) == 0 && |
| 3652 | (bp->b_vp == NULL || (bp->b_flags & B_PAGING))) || |
| 3653 | ((int)bp->b_loffset & PAGE_MASK) != 0 || |
| 3654 | (bp->b_bcount & PAGE_MASK) != 0) { |
| 3655 | return(0); |
| 3656 | } |
| 3657 | |
| 3658 | /* |
| 3659 | * Figure out the original VM object (it will match the underlying |
| 3660 | * VM pages). Note that swap cached data uses page indices relative |
| 3661 | * to that object, not relative to bio->bio_offset. |
| 3662 | */ |
| 3663 | if (bp->b_flags & B_CLUSTER) |
| 3664 | object = vp->v_object; |
| 3665 | else |
| 3666 | object = bp->b_vp->v_object; |
| 3667 | |
| 3668 | /* |
| 3669 | * In order to be able to use the swap cache all underlying VM |
| 3670 | * pages must be marked as such, and we can't have any bogus pages. |
| 3671 | */ |
| 3672 | for (i = 0; i < bp->b_xio.xio_npages; ++i) { |
| 3673 | m = bp->b_xio.xio_pages[i]; |
| 3674 | if ((m->flags & PG_SWAPPED) == 0) |
| 3675 | break; |
| 3676 | if (m == bogus_page) |
| 3677 | break; |
| 3678 | } |
| 3679 | |
| 3680 | /* |
| 3681 | * If we are good then issue the I/O using swap_pager_strategy(). |
| 3682 | * |
| 3683 | * We can only do this if the buffer actually supports object-backed |
| 3684 | * I/O. If it doesn't npages will be 0. |
| 3685 | */ |
| 3686 | if (i && i == bp->b_xio.xio_npages) { |
| 3687 | m = bp->b_xio.xio_pages[0]; |
| 3688 | nbio = push_bio(bio); |
| 3689 | nbio->bio_done = vn_cache_strategy_callback; |
| 3690 | nbio->bio_offset = ptoa(m->pindex); |
| 3691 | KKASSERT(m->object == object); |
| 3692 | swap_pager_strategy(object, nbio); |
| 3693 | return(1); |
| 3694 | } |
| 3695 | return(0); |
| 3696 | } |
| 3697 | |
| 3698 | /* |
| 3699 | * This is a bit of a hack but since the vn_cache_strategy() function can |
| 3700 | * override a VFS's strategy function we must make sure that the bio, which |
| 3701 | * is probably bio2, doesn't leak an unexpected offset value back to the |
| 3702 | * filesystem. The filesystem (e.g. UFS) might otherwise assume that the |
| 3703 | * bio went through its own file strategy function and the the bio2 offset |
| 3704 | * is a cached disk offset when, in fact, it isn't. |
| 3705 | */ |
| 3706 | static void |
| 3707 | vn_cache_strategy_callback(struct bio *bio) |
| 3708 | { |
| 3709 | bio->bio_offset = NOOFFSET; |
| 3710 | biodone(pop_bio(bio)); |
| 3711 | } |
| 3712 | |
| 3713 | /* |
| 3714 | * bpdone: |
| 3715 | * |
| 3716 | * Finish I/O on a buffer after all BIOs have been processed. |
| 3717 | * Called when the bio chain is exhausted or by biowait. If called |
| 3718 | * by biowait, elseit is typically 0. |
| 3719 | * |
| 3720 | * bpdone is also responsible for setting B_CACHE in a B_VMIO bp. |
| 3721 | * In a non-VMIO bp, B_CACHE will be set on the next getblk() |
| 3722 | * assuming B_INVAL is clear. |
| 3723 | * |
| 3724 | * For the VMIO case, we set B_CACHE if the op was a read and no |
| 3725 | * read error occured, or if the op was a write. B_CACHE is never |
| 3726 | * set if the buffer is invalid or otherwise uncacheable. |
| 3727 | * |
| 3728 | * bpdone does not mess with B_INVAL, allowing the I/O routine or the |
| 3729 | * initiator to leave B_INVAL set to brelse the buffer out of existance |
| 3730 | * in the biodone routine. |
| 3731 | */ |
| 3732 | void |
| 3733 | bpdone(struct buf *bp, int elseit) |
| 3734 | { |
| 3735 | buf_cmd_t cmd; |
| 3736 | |
| 3737 | KASSERT(BUF_REFCNTNB(bp) > 0, |
| 3738 | ("biodone: bp %p not busy %d", bp, BUF_REFCNTNB(bp))); |
| 3739 | KASSERT(bp->b_cmd != BUF_CMD_DONE, |
| 3740 | ("biodone: bp %p already done!", bp)); |
| 3741 | |
| 3742 | /* |
| 3743 | * No more BIOs are left. All completion functions have been dealt |
| 3744 | * with, now we clean up the buffer. |
| 3745 | */ |
| 3746 | cmd = bp->b_cmd; |
| 3747 | bp->b_cmd = BUF_CMD_DONE; |
| 3748 | |
| 3749 | /* |
| 3750 | * Only reads and writes are processed past this point. |
| 3751 | */ |
| 3752 | if (cmd != BUF_CMD_READ && cmd != BUF_CMD_WRITE) { |
| 3753 | if (cmd == BUF_CMD_FREEBLKS) |
| 3754 | bp->b_flags |= B_NOCACHE; |
| 3755 | if (elseit) |
| 3756 | brelse(bp); |
| 3757 | return; |
| 3758 | } |
| 3759 | |
| 3760 | /* |
| 3761 | * Warning: softupdates may re-dirty the buffer, and HAMMER can do |
| 3762 | * a lot worse. XXX - move this above the clearing of b_cmd |
| 3763 | */ |
| 3764 | if (LIST_FIRST(&bp->b_dep) != NULL) |
| 3765 | buf_complete(bp); /* MPSAFE */ |
| 3766 | |
| 3767 | /* |
| 3768 | * A failed write must re-dirty the buffer unless B_INVAL |
| 3769 | * was set. Only applicable to normal buffers (with VPs). |
| 3770 | * vinum buffers may not have a vp. |
| 3771 | */ |
| 3772 | if (cmd == BUF_CMD_WRITE && |
| 3773 | (bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { |
| 3774 | bp->b_flags &= ~B_NOCACHE; |
| 3775 | if (bp->b_vp) |
| 3776 | bdirty(bp); |
| 3777 | } |
| 3778 | |
| 3779 | if (bp->b_flags & B_VMIO) { |
| 3780 | int i; |
| 3781 | vm_ooffset_t foff; |
| 3782 | vm_page_t m; |
| 3783 | vm_object_t obj; |
| 3784 | int iosize; |
| 3785 | struct vnode *vp = bp->b_vp; |
| 3786 | |
| 3787 | obj = vp->v_object; |
| 3788 | |
| 3789 | #if defined(VFS_BIO_DEBUG) |
| 3790 | if (vp->v_auxrefs == 0) |
| 3791 | panic("biodone: zero vnode hold count"); |
| 3792 | if ((vp->v_flag & VOBJBUF) == 0) |
| 3793 | panic("biodone: vnode is not setup for merged cache"); |
| 3794 | #endif |
| 3795 | |
| 3796 | foff = bp->b_loffset; |
| 3797 | KASSERT(foff != NOOFFSET, ("biodone: no buffer offset")); |
| 3798 | KASSERT(obj != NULL, ("biodone: missing VM object")); |
| 3799 | |
| 3800 | #if defined(VFS_BIO_DEBUG) |
| 3801 | if (obj->paging_in_progress < bp->b_xio.xio_npages) { |
| 3802 | kprintf("biodone: paging in progress(%d) < " |
| 3803 | "bp->b_xio.xio_npages(%d)\n", |
| 3804 | obj->paging_in_progress, |
| 3805 | bp->b_xio.xio_npages); |
| 3806 | } |
| 3807 | #endif |
| 3808 | |
| 3809 | /* |
| 3810 | * Set B_CACHE if the op was a normal read and no error |
| 3811 | * occured. B_CACHE is set for writes in the b*write() |
| 3812 | * routines. |
| 3813 | */ |
| 3814 | iosize = bp->b_bcount - bp->b_resid; |
| 3815 | if (cmd == BUF_CMD_READ && |
| 3816 | (bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR)) == 0) { |
| 3817 | bp->b_flags |= B_CACHE; |
| 3818 | } |
| 3819 | |
| 3820 | vm_object_hold(obj); |
| 3821 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 3822 | int bogusflag = 0; |
| 3823 | int resid; |
| 3824 | |
| 3825 | resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; |
| 3826 | if (resid > iosize) |
| 3827 | resid = iosize; |
| 3828 | |
| 3829 | /* |
| 3830 | * cleanup bogus pages, restoring the originals. Since |
| 3831 | * the originals should still be wired, we don't have |
| 3832 | * to worry about interrupt/freeing races destroying |
| 3833 | * the VM object association. |
| 3834 | */ |
| 3835 | m = bp->b_xio.xio_pages[i]; |
| 3836 | if (m == bogus_page) { |
| 3837 | bogusflag = 1; |
| 3838 | m = vm_page_lookup(obj, OFF_TO_IDX(foff)); |
| 3839 | if (m == NULL) |
| 3840 | panic("biodone: page disappeared"); |
| 3841 | bp->b_xio.xio_pages[i] = m; |
| 3842 | pmap_qenter(trunc_page((vm_offset_t)bp->b_data), |
| 3843 | bp->b_xio.xio_pages, bp->b_xio.xio_npages); |
| 3844 | } |
| 3845 | #if defined(VFS_BIO_DEBUG) |
| 3846 | if (OFF_TO_IDX(foff) != m->pindex) { |
| 3847 | kprintf("biodone: foff(%lu)/m->pindex(%ld) " |
| 3848 | "mismatch\n", |
| 3849 | (unsigned long)foff, (long)m->pindex); |
| 3850 | } |
| 3851 | #endif |
| 3852 | |
| 3853 | /* |
| 3854 | * In the write case, the valid and clean bits are |
| 3855 | * already changed correctly (see bdwrite()), so we |
| 3856 | * only need to do this here in the read case. |
| 3857 | */ |
| 3858 | vm_page_busy_wait(m, FALSE, "bpdpgw"); |
| 3859 | if (cmd == BUF_CMD_READ && !bogusflag && resid > 0) { |
| 3860 | vfs_clean_one_page(bp, i, m); |
| 3861 | } |
| 3862 | vm_page_flag_clear(m, PG_ZERO); |
| 3863 | |
| 3864 | /* |
| 3865 | * when debugging new filesystems or buffer I/O |
| 3866 | * methods, this is the most common error that pops |
| 3867 | * up. if you see this, you have not set the page |
| 3868 | * busy flag correctly!!! |
| 3869 | */ |
| 3870 | if (m->busy == 0) { |
| 3871 | kprintf("biodone: page busy < 0, " |
| 3872 | "pindex: %d, foff: 0x(%x,%x), " |
| 3873 | "resid: %d, index: %d\n", |
| 3874 | (int) m->pindex, (int)(foff >> 32), |
| 3875 | (int) foff & 0xffffffff, resid, i); |
| 3876 | if (!vn_isdisk(vp, NULL)) |
| 3877 | kprintf(" iosize: %ld, loffset: %lld, " |
| 3878 | "flags: 0x%08x, npages: %d\n", |
| 3879 | bp->b_vp->v_mount->mnt_stat.f_iosize, |
| 3880 | (long long)bp->b_loffset, |
| 3881 | bp->b_flags, bp->b_xio.xio_npages); |
| 3882 | else |
| 3883 | kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n", |
| 3884 | (long long)bp->b_loffset, |
| 3885 | bp->b_flags, bp->b_xio.xio_npages); |
| 3886 | kprintf(" valid: 0x%x, dirty: 0x%x, " |
| 3887 | "wired: %d\n", |
| 3888 | m->valid, m->dirty, |
| 3889 | m->wire_count); |
| 3890 | panic("biodone: page busy < 0"); |
| 3891 | } |
| 3892 | vm_page_io_finish(m); |
| 3893 | vm_page_wakeup(m); |
| 3894 | vm_object_pip_wakeup(obj); |
| 3895 | foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; |
| 3896 | iosize -= resid; |
| 3897 | } |
| 3898 | bp->b_flags &= ~B_HASBOGUS; |
| 3899 | vm_object_drop(obj); |
| 3900 | } |
| 3901 | |
| 3902 | /* |
| 3903 | * Finish up by releasing the buffer. There are no more synchronous |
| 3904 | * or asynchronous completions, those were handled by bio_done |
| 3905 | * callbacks. |
| 3906 | */ |
| 3907 | if (elseit) { |
| 3908 | if (bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR|B_RELBUF)) |
| 3909 | brelse(bp); |
| 3910 | else |
| 3911 | bqrelse(bp); |
| 3912 | } |
| 3913 | } |
| 3914 | |
| 3915 | /* |
| 3916 | * Normal biodone. |
| 3917 | */ |
| 3918 | void |
| 3919 | biodone(struct bio *bio) |
| 3920 | { |
| 3921 | struct buf *bp = bio->bio_buf; |
| 3922 | |
| 3923 | runningbufwakeup(bp); |
| 3924 | |
| 3925 | /* |
| 3926 | * Run up the chain of BIO's. Leave b_cmd intact for the duration. |
| 3927 | */ |
| 3928 | while (bio) { |
| 3929 | biodone_t *done_func; |
| 3930 | struct bio_track *track; |
| 3931 | |
| 3932 | /* |
| 3933 | * BIO tracking. Most but not all BIOs are tracked. |
| 3934 | */ |
| 3935 | if ((track = bio->bio_track) != NULL) { |
| 3936 | bio_track_rel(track); |
| 3937 | bio->bio_track = NULL; |
| 3938 | } |
| 3939 | |
| 3940 | /* |
| 3941 | * A bio_done function terminates the loop. The function |
| 3942 | * will be responsible for any further chaining and/or |
| 3943 | * buffer management. |
| 3944 | * |
| 3945 | * WARNING! The done function can deallocate the buffer! |
| 3946 | */ |
| 3947 | if ((done_func = bio->bio_done) != NULL) { |
| 3948 | bio->bio_done = NULL; |
| 3949 | done_func(bio); |
| 3950 | return; |
| 3951 | } |
| 3952 | bio = bio->bio_prev; |
| 3953 | } |
| 3954 | |
| 3955 | /* |
| 3956 | * If we've run out of bio's do normal [a]synchronous completion. |
| 3957 | */ |
| 3958 | bpdone(bp, 1); |
| 3959 | } |
| 3960 | |
| 3961 | /* |
| 3962 | * Synchronous biodone - this terminates a synchronous BIO. |
| 3963 | * |
| 3964 | * bpdone() is called with elseit=FALSE, leaving the buffer completed |
| 3965 | * but still locked. The caller must brelse() the buffer after waiting |
| 3966 | * for completion. |
| 3967 | */ |
| 3968 | void |
| 3969 | biodone_sync(struct bio *bio) |
| 3970 | { |
| 3971 | struct buf *bp = bio->bio_buf; |
| 3972 | int flags; |
| 3973 | int nflags; |
| 3974 | |
| 3975 | KKASSERT(bio == &bp->b_bio1); |
| 3976 | bpdone(bp, 0); |
| 3977 | |
| 3978 | for (;;) { |
| 3979 | flags = bio->bio_flags; |
| 3980 | nflags = (flags | BIO_DONE) & ~BIO_WANT; |
| 3981 | |
| 3982 | if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { |
| 3983 | if (flags & BIO_WANT) |
| 3984 | wakeup(bio); |
| 3985 | break; |
| 3986 | } |
| 3987 | } |
| 3988 | } |
| 3989 | |
| 3990 | /* |
| 3991 | * vfs_unbusy_pages: |
| 3992 | * |
| 3993 | * This routine is called in lieu of iodone in the case of |
| 3994 | * incomplete I/O. This keeps the busy status for pages |
| 3995 | * consistant. |
| 3996 | */ |
| 3997 | void |
| 3998 | vfs_unbusy_pages(struct buf *bp) |
| 3999 | { |
| 4000 | int i; |
| 4001 | |
| 4002 | runningbufwakeup(bp); |
| 4003 | |
| 4004 | if (bp->b_flags & B_VMIO) { |
| 4005 | struct vnode *vp = bp->b_vp; |
| 4006 | vm_object_t obj; |
| 4007 | |
| 4008 | obj = vp->v_object; |
| 4009 | vm_object_hold(obj); |
| 4010 | |
| 4011 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 4012 | vm_page_t m = bp->b_xio.xio_pages[i]; |
| 4013 | |
| 4014 | /* |
| 4015 | * When restoring bogus changes the original pages |
| 4016 | * should still be wired, so we are in no danger of |
| 4017 | * losing the object association and do not need |
| 4018 | * critical section protection particularly. |
| 4019 | */ |
| 4020 | if (m == bogus_page) { |
| 4021 | m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_loffset) + i); |
| 4022 | if (!m) { |
| 4023 | panic("vfs_unbusy_pages: page missing"); |
| 4024 | } |
| 4025 | bp->b_xio.xio_pages[i] = m; |
| 4026 | pmap_qenter(trunc_page((vm_offset_t)bp->b_data), |
| 4027 | bp->b_xio.xio_pages, bp->b_xio.xio_npages); |
| 4028 | } |
| 4029 | vm_page_busy_wait(m, FALSE, "bpdpgw"); |
| 4030 | vm_page_flag_clear(m, PG_ZERO); |
| 4031 | vm_page_io_finish(m); |
| 4032 | vm_page_wakeup(m); |
| 4033 | vm_object_pip_wakeup(obj); |
| 4034 | } |
| 4035 | bp->b_flags &= ~B_HASBOGUS; |
| 4036 | vm_object_drop(obj); |
| 4037 | } |
| 4038 | } |
| 4039 | |
| 4040 | /* |
| 4041 | * vfs_busy_pages: |
| 4042 | * |
| 4043 | * This routine is called before a device strategy routine. |
| 4044 | * It is used to tell the VM system that paging I/O is in |
| 4045 | * progress, and treat the pages associated with the buffer |
| 4046 | * almost as being PG_BUSY. Also the object 'paging_in_progress' |
| 4047 | * flag is handled to make sure that the object doesn't become |
| 4048 | * inconsistant. |
| 4049 | * |
| 4050 | * Since I/O has not been initiated yet, certain buffer flags |
| 4051 | * such as B_ERROR or B_INVAL may be in an inconsistant state |
| 4052 | * and should be ignored. |
| 4053 | * |
| 4054 | * MPSAFE |
| 4055 | */ |
| 4056 | void |
| 4057 | vfs_busy_pages(struct vnode *vp, struct buf *bp) |
| 4058 | { |
| 4059 | int i, bogus; |
| 4060 | struct lwp *lp = curthread->td_lwp; |
| 4061 | |
| 4062 | /* |
| 4063 | * The buffer's I/O command must already be set. If reading, |
| 4064 | * B_CACHE must be 0 (double check against callers only doing |
| 4065 | * I/O when B_CACHE is 0). |
| 4066 | */ |
| 4067 | KKASSERT(bp->b_cmd != BUF_CMD_DONE); |
| 4068 | KKASSERT(bp->b_cmd == BUF_CMD_WRITE || (bp->b_flags & B_CACHE) == 0); |
| 4069 | |
| 4070 | if (bp->b_flags & B_VMIO) { |
| 4071 | vm_object_t obj; |
| 4072 | |
| 4073 | obj = vp->v_object; |
| 4074 | KASSERT(bp->b_loffset != NOOFFSET, |
| 4075 | ("vfs_busy_pages: no buffer offset")); |
| 4076 | |
| 4077 | /* |
| 4078 | * Busy all the pages. We have to busy them all at once |
| 4079 | * to avoid deadlocks. |
| 4080 | */ |
| 4081 | retry: |
| 4082 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 4083 | vm_page_t m = bp->b_xio.xio_pages[i]; |
| 4084 | |
| 4085 | if (vm_page_busy_try(m, FALSE)) { |
| 4086 | vm_page_sleep_busy(m, FALSE, "vbpage"); |
| 4087 | while (--i >= 0) |
| 4088 | vm_page_wakeup(bp->b_xio.xio_pages[i]); |
| 4089 | goto retry; |
| 4090 | } |
| 4091 | } |
| 4092 | |
| 4093 | /* |
| 4094 | * Setup for I/O, soft-busy the page right now because |
| 4095 | * the next loop may block. |
| 4096 | */ |
| 4097 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 4098 | vm_page_t m = bp->b_xio.xio_pages[i]; |
| 4099 | |
| 4100 | vm_page_flag_clear(m, PG_ZERO); |
| 4101 | if ((bp->b_flags & B_CLUSTER) == 0) { |
| 4102 | vm_object_pip_add(obj, 1); |
| 4103 | vm_page_io_start(m); |
| 4104 | } |
| 4105 | } |
| 4106 | |
| 4107 | /* |
| 4108 | * Adjust protections for I/O and do bogus-page mapping. |
| 4109 | * Assume that vm_page_protect() can block (it can block |
| 4110 | * if VM_PROT_NONE, don't take any chances regardless). |
| 4111 | * |
| 4112 | * In particular note that for writes we must incorporate |
| 4113 | * page dirtyness from the VM system into the buffer's |
| 4114 | * dirty range. |
| 4115 | * |
| 4116 | * For reads we theoretically must incorporate page dirtyness |
| 4117 | * from the VM system to determine if the page needs bogus |
| 4118 | * replacement, but we shortcut the test by simply checking |
| 4119 | * that all m->valid bits are set, indicating that the page |
| 4120 | * is fully valid and does not need to be re-read. For any |
| 4121 | * VM system dirtyness the page will also be fully valid |
| 4122 | * since it was mapped at one point. |
| 4123 | */ |
| 4124 | bogus = 0; |
| 4125 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 4126 | vm_page_t m = bp->b_xio.xio_pages[i]; |
| 4127 | |
| 4128 | vm_page_flag_clear(m, PG_ZERO); /* XXX */ |
| 4129 | if (bp->b_cmd == BUF_CMD_WRITE) { |
| 4130 | /* |
| 4131 | * When readying a vnode-backed buffer for |
| 4132 | * a write we must zero-fill any invalid |
| 4133 | * portions of the backing VM pages, mark |
| 4134 | * it valid and clear related dirty bits. |
| 4135 | * |
| 4136 | * vfs_clean_one_page() incorporates any |
| 4137 | * VM dirtyness and updates the b_dirtyoff |
| 4138 | * range (after we've made the page RO). |
| 4139 | * |
| 4140 | * It is also expected that the pmap modified |
| 4141 | * bit has already been cleared by the |
| 4142 | * vm_page_protect(). We may not be able |
| 4143 | * to clear all dirty bits for a page if it |
| 4144 | * was also memory mapped (NFS). |
| 4145 | * |
| 4146 | * Finally be sure to unassign any swap-cache |
| 4147 | * backing store as it is now stale. |
| 4148 | */ |
| 4149 | vm_page_protect(m, VM_PROT_READ); |
| 4150 | vfs_clean_one_page(bp, i, m); |
| 4151 | swap_pager_unswapped(m); |
| 4152 | } else if (m->valid == VM_PAGE_BITS_ALL) { |
| 4153 | /* |
| 4154 | * When readying a vnode-backed buffer for |
| 4155 | * read we must replace any dirty pages with |
| 4156 | * a bogus page so dirty data is not destroyed |
| 4157 | * when filling gaps. |
| 4158 | * |
| 4159 | * To avoid testing whether the page is |
| 4160 | * dirty we instead test that the page was |
| 4161 | * at some point mapped (m->valid fully |
| 4162 | * valid) with the understanding that |
| 4163 | * this also covers the dirty case. |
| 4164 | */ |
| 4165 | bp->b_xio.xio_pages[i] = bogus_page; |
| 4166 | bp->b_flags |= B_HASBOGUS; |
| 4167 | bogus++; |
| 4168 | } else if (m->valid & m->dirty) { |
| 4169 | /* |
| 4170 | * This case should not occur as partial |
| 4171 | * dirtyment can only happen if the buffer |
| 4172 | * is B_CACHE, and this code is not entered |
| 4173 | * if the buffer is B_CACHE. |
| 4174 | */ |
| 4175 | kprintf("Warning: vfs_busy_pages - page not " |
| 4176 | "fully valid! loff=%jx bpf=%08x " |
| 4177 | "idx=%d val=%02x dir=%02x\n", |
| 4178 | (intmax_t)bp->b_loffset, bp->b_flags, |
| 4179 | i, m->valid, m->dirty); |
| 4180 | vm_page_protect(m, VM_PROT_NONE); |
| 4181 | } else { |
| 4182 | /* |
| 4183 | * The page is not valid and can be made |
| 4184 | * part of the read. |
| 4185 | */ |
| 4186 | vm_page_protect(m, VM_PROT_NONE); |
| 4187 | } |
| 4188 | vm_page_wakeup(m); |
| 4189 | } |
| 4190 | if (bogus) { |
| 4191 | pmap_qenter(trunc_page((vm_offset_t)bp->b_data), |
| 4192 | bp->b_xio.xio_pages, bp->b_xio.xio_npages); |
| 4193 | } |
| 4194 | } |
| 4195 | |
| 4196 | /* |
| 4197 | * This is the easiest place to put the process accounting for the I/O |
| 4198 | * for now. |
| 4199 | */ |
| 4200 | if (lp != NULL) { |
| 4201 | if (bp->b_cmd == BUF_CMD_READ) |
| 4202 | lp->lwp_ru.ru_inblock++; |
| 4203 | else |
| 4204 | lp->lwp_ru.ru_oublock++; |
| 4205 | } |
| 4206 | } |
| 4207 | |
| 4208 | /* |
| 4209 | * Tell the VM system that the pages associated with this buffer |
| 4210 | * are clean. This is used for delayed writes where the data is |
| 4211 | * going to go to disk eventually without additional VM intevention. |
| 4212 | * |
| 4213 | * NOTE: While we only really need to clean through to b_bcount, we |
| 4214 | * just go ahead and clean through to b_bufsize. |
| 4215 | */ |
| 4216 | static void |
| 4217 | vfs_clean_pages(struct buf *bp) |
| 4218 | { |
| 4219 | vm_page_t m; |
| 4220 | int i; |
| 4221 | |
| 4222 | if ((bp->b_flags & B_VMIO) == 0) |
| 4223 | return; |
| 4224 | |
| 4225 | KASSERT(bp->b_loffset != NOOFFSET, |
| 4226 | ("vfs_clean_pages: no buffer offset")); |
| 4227 | |
| 4228 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 4229 | m = bp->b_xio.xio_pages[i]; |
| 4230 | vfs_clean_one_page(bp, i, m); |
| 4231 | } |
| 4232 | } |
| 4233 | |
| 4234 | /* |
| 4235 | * vfs_clean_one_page: |
| 4236 | * |
| 4237 | * Set the valid bits and clear the dirty bits in a page within a |
| 4238 | * buffer. The range is restricted to the buffer's size and the |
| 4239 | * buffer's logical offset might index into the first page. |
| 4240 | * |
| 4241 | * The caller has busied or soft-busied the page and it is not mapped, |
| 4242 | * test and incorporate the dirty bits into b_dirtyoff/end before |
| 4243 | * clearing them. Note that we need to clear the pmap modified bits |
| 4244 | * after determining the the page was dirty, vm_page_set_validclean() |
| 4245 | * does not do it for us. |
| 4246 | * |
| 4247 | * This routine is typically called after a read completes (dirty should |
| 4248 | * be zero in that case as we are not called on bogus-replace pages), |
| 4249 | * or before a write is initiated. |
| 4250 | */ |
| 4251 | static void |
| 4252 | vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m) |
| 4253 | { |
| 4254 | int bcount; |
| 4255 | int xoff; |
| 4256 | int soff; |
| 4257 | int eoff; |
| 4258 | |
| 4259 | /* |
| 4260 | * Calculate offset range within the page but relative to buffer's |
| 4261 | * loffset. loffset might be offset into the first page. |
| 4262 | */ |
| 4263 | xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ |
| 4264 | bcount = bp->b_bcount + xoff; /* offset adjusted */ |
| 4265 | |
| 4266 | if (pageno == 0) { |
| 4267 | soff = xoff; |
| 4268 | eoff = PAGE_SIZE; |
| 4269 | } else { |
| 4270 | soff = (pageno << PAGE_SHIFT); |
| 4271 | eoff = soff + PAGE_SIZE; |
| 4272 | } |
| 4273 | if (eoff > bcount) |
| 4274 | eoff = bcount; |
| 4275 | if (soff >= eoff) |
| 4276 | return; |
| 4277 | |
| 4278 | /* |
| 4279 | * Test dirty bits and adjust b_dirtyoff/end. |
| 4280 | * |
| 4281 | * If dirty pages are incorporated into the bp any prior |
| 4282 | * B_NEEDCOMMIT state (NFS) must be cleared because the |
| 4283 | * caller has not taken into account the new dirty data. |
| 4284 | * |
| 4285 | * If the page was memory mapped the dirty bits might go beyond the |
| 4286 | * end of the buffer, but we can't really make the assumption that |
| 4287 | * a file EOF straddles the buffer (even though this is the case for |
| 4288 | * NFS if B_NEEDCOMMIT is also set). So for the purposes of clearing |
| 4289 | * B_NEEDCOMMIT we only test the dirty bits covered by the buffer. |
| 4290 | * This also saves some console spam. |
| 4291 | * |
| 4292 | * When clearing B_NEEDCOMMIT we must also clear B_CLUSTEROK, |
| 4293 | * NFS can handle huge commits but not huge writes. |
| 4294 | */ |
| 4295 | vm_page_test_dirty(m); |
| 4296 | if (m->dirty) { |
| 4297 | if ((bp->b_flags & B_NEEDCOMMIT) && |
| 4298 | (m->dirty & vm_page_bits(soff & PAGE_MASK, eoff - soff))) { |
| 4299 | if (debug_commit) |
| 4300 | kprintf("Warning: vfs_clean_one_page: bp %p " |
| 4301 | "loff=%jx,%d flgs=%08x clr B_NEEDCOMMIT" |
| 4302 | " cmd %d vd %02x/%02x x/s/e %d %d %d " |
| 4303 | "doff/end %d %d\n", |
| 4304 | bp, (intmax_t)bp->b_loffset, bp->b_bcount, |
| 4305 | bp->b_flags, bp->b_cmd, |
| 4306 | m->valid, m->dirty, xoff, soff, eoff, |
| 4307 | bp->b_dirtyoff, bp->b_dirtyend); |
| 4308 | bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); |
| 4309 | if (debug_commit) |
| 4310 | print_backtrace(-1); |
| 4311 | } |
| 4312 | /* |
| 4313 | * Only clear the pmap modified bits if ALL the dirty bits |
| 4314 | * are set, otherwise the system might mis-clear portions |
| 4315 | * of a page. |
| 4316 | */ |
| 4317 | if (m->dirty == VM_PAGE_BITS_ALL && |
| 4318 | (bp->b_flags & B_NEEDCOMMIT) == 0) { |
| 4319 | pmap_clear_modify(m); |
| 4320 | } |
| 4321 | if (bp->b_dirtyoff > soff - xoff) |
| 4322 | bp->b_dirtyoff = soff - xoff; |
| 4323 | if (bp->b_dirtyend < eoff - xoff) |
| 4324 | bp->b_dirtyend = eoff - xoff; |
| 4325 | } |
| 4326 | |
| 4327 | /* |
| 4328 | * Set related valid bits, clear related dirty bits. |
| 4329 | * Does not mess with the pmap modified bit. |
| 4330 | * |
| 4331 | * WARNING! We cannot just clear all of m->dirty here as the |
| 4332 | * buffer cache buffers may use a DEV_BSIZE'd aligned |
| 4333 | * block size, or have an odd size (e.g. NFS at file EOF). |
| 4334 | * The putpages code can clear m->dirty to 0. |
| 4335 | * |
| 4336 | * If a VOP_WRITE generates a buffer cache buffer which |
| 4337 | * covers the same space as mapped writable pages the |
| 4338 | * buffer flush might not be able to clear all the dirty |
| 4339 | * bits and still require a putpages from the VM system |
| 4340 | * to finish it off. |
| 4341 | * |
| 4342 | * WARNING! vm_page_set_validclean() currently assumes vm_token |
| 4343 | * is held. The page might not be busied (bdwrite() case). |
| 4344 | * XXX remove this comment once we've validated that this |
| 4345 | * is no longer an issue. |
| 4346 | */ |
| 4347 | vm_page_set_validclean(m, soff & PAGE_MASK, eoff - soff); |
| 4348 | } |
| 4349 | |
| 4350 | #if 0 |
| 4351 | /* |
| 4352 | * Similar to vfs_clean_one_page() but sets the bits to valid and dirty. |
| 4353 | * The page data is assumed to be valid (there is no zeroing here). |
| 4354 | */ |
| 4355 | static void |
| 4356 | vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m) |
| 4357 | { |
| 4358 | int bcount; |
| 4359 | int xoff; |
| 4360 | int soff; |
| 4361 | int eoff; |
| 4362 | |
| 4363 | /* |
| 4364 | * Calculate offset range within the page but relative to buffer's |
| 4365 | * loffset. loffset might be offset into the first page. |
| 4366 | */ |
| 4367 | xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ |
| 4368 | bcount = bp->b_bcount + xoff; /* offset adjusted */ |
| 4369 | |
| 4370 | if (pageno == 0) { |
| 4371 | soff = xoff; |
| 4372 | eoff = PAGE_SIZE; |
| 4373 | } else { |
| 4374 | soff = (pageno << PAGE_SHIFT); |
| 4375 | eoff = soff + PAGE_SIZE; |
| 4376 | } |
| 4377 | if (eoff > bcount) |
| 4378 | eoff = bcount; |
| 4379 | if (soff >= eoff) |
| 4380 | return; |
| 4381 | vm_page_set_validdirty(m, soff & PAGE_MASK, eoff - soff); |
| 4382 | } |
| 4383 | #endif |
| 4384 | |
| 4385 | /* |
| 4386 | * vfs_bio_clrbuf: |
| 4387 | * |
| 4388 | * Clear a buffer. This routine essentially fakes an I/O, so we need |
| 4389 | * to clear B_ERROR and B_INVAL. |
| 4390 | * |
| 4391 | * Note that while we only theoretically need to clear through b_bcount, |
| 4392 | * we go ahead and clear through b_bufsize. |
| 4393 | */ |
| 4394 | |
| 4395 | void |
| 4396 | vfs_bio_clrbuf(struct buf *bp) |
| 4397 | { |
| 4398 | int i, mask = 0; |
| 4399 | caddr_t sa, ea; |
| 4400 | if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { |
| 4401 | bp->b_flags &= ~(B_INVAL | B_EINTR | B_ERROR); |
| 4402 | if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && |
| 4403 | (bp->b_loffset & PAGE_MASK) == 0) { |
| 4404 | mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; |
| 4405 | if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) { |
| 4406 | bp->b_resid = 0; |
| 4407 | return; |
| 4408 | } |
| 4409 | if (((bp->b_xio.xio_pages[0]->flags & PG_ZERO) == 0) && |
| 4410 | ((bp->b_xio.xio_pages[0]->valid & mask) == 0)) { |
| 4411 | bzero(bp->b_data, bp->b_bufsize); |
| 4412 | bp->b_xio.xio_pages[0]->valid |= mask; |
| 4413 | bp->b_resid = 0; |
| 4414 | return; |
| 4415 | } |
| 4416 | } |
| 4417 | sa = bp->b_data; |
| 4418 | for(i=0;i<bp->b_xio.xio_npages;i++,sa=ea) { |
| 4419 | int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; |
| 4420 | ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); |
| 4421 | ea = (caddr_t)(vm_offset_t)ulmin( |
| 4422 | (u_long)(vm_offset_t)ea, |
| 4423 | (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); |
| 4424 | mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; |
| 4425 | if ((bp->b_xio.xio_pages[i]->valid & mask) == mask) |
| 4426 | continue; |
| 4427 | if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) { |
| 4428 | if ((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) { |
| 4429 | bzero(sa, ea - sa); |
| 4430 | } |
| 4431 | } else { |
| 4432 | for (; sa < ea; sa += DEV_BSIZE, j++) { |
| 4433 | if (((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) && |
| 4434 | (bp->b_xio.xio_pages[i]->valid & (1<<j)) == 0) |
| 4435 | bzero(sa, DEV_BSIZE); |
| 4436 | } |
| 4437 | } |
| 4438 | bp->b_xio.xio_pages[i]->valid |= mask; |
| 4439 | vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO); |
| 4440 | } |
| 4441 | bp->b_resid = 0; |
| 4442 | } else { |
| 4443 | clrbuf(bp); |
| 4444 | } |
| 4445 | } |
| 4446 | |
| 4447 | /* |
| 4448 | * vm_hold_load_pages: |
| 4449 | * |
| 4450 | * Load pages into the buffer's address space. The pages are |
| 4451 | * allocated from the kernel object in order to reduce interference |
| 4452 | * with the any VM paging I/O activity. The range of loaded |
| 4453 | * pages will be wired. |
| 4454 | * |
| 4455 | * If a page cannot be allocated, the 'pagedaemon' is woken up to |
| 4456 | * retrieve the full range (to - from) of pages. |
| 4457 | * |
| 4458 | * MPSAFE |
| 4459 | */ |
| 4460 | void |
| 4461 | vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) |
| 4462 | { |
| 4463 | vm_offset_t pg; |
| 4464 | vm_page_t p; |
| 4465 | int index; |
| 4466 | |
| 4467 | to = round_page(to); |
| 4468 | from = round_page(from); |
| 4469 | index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; |
| 4470 | |
| 4471 | pg = from; |
| 4472 | while (pg < to) { |
| 4473 | /* |
| 4474 | * Note: must allocate system pages since blocking here |
| 4475 | * could intefere with paging I/O, no matter which |
| 4476 | * process we are. |
| 4477 | */ |
| 4478 | vm_object_hold(&kernel_object); |
| 4479 | p = bio_page_alloc(&kernel_object, pg >> PAGE_SHIFT, |
| 4480 | (vm_pindex_t)((to - pg) >> PAGE_SHIFT)); |
| 4481 | vm_object_drop(&kernel_object); |
| 4482 | if (p) { |
| 4483 | vm_page_wire(p); |
| 4484 | p->valid = VM_PAGE_BITS_ALL; |
| 4485 | vm_page_flag_clear(p, PG_ZERO); |
| 4486 | pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); |
| 4487 | bp->b_xio.xio_pages[index] = p; |
| 4488 | vm_page_wakeup(p); |
| 4489 | |
| 4490 | pg += PAGE_SIZE; |
| 4491 | ++index; |
| 4492 | } |
| 4493 | } |
| 4494 | bp->b_xio.xio_npages = index; |
| 4495 | } |
| 4496 | |
| 4497 | /* |
| 4498 | * Allocate a page for a buffer cache buffer. |
| 4499 | * |
| 4500 | * If NULL is returned the caller is expected to retry (typically check if |
| 4501 | * the page already exists on retry before trying to allocate one). |
| 4502 | * |
| 4503 | * NOTE! Low-memory handling is dealt with in b[q]relse(), not here. This |
| 4504 | * function will use the system reserve with the hope that the page |
| 4505 | * allocations can be returned to PQ_CACHE/PQ_FREE when the caller |
| 4506 | * is done with the buffer. |
| 4507 | */ |
| 4508 | static |
| 4509 | vm_page_t |
| 4510 | bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit) |
| 4511 | { |
| 4512 | int vmflags = VM_ALLOC_NORMAL | VM_ALLOC_NULL_OK; |
| 4513 | vm_page_t p; |
| 4514 | |
| 4515 | ASSERT_LWKT_TOKEN_HELD(vm_object_token(obj)); |
| 4516 | |
| 4517 | /* |
| 4518 | * Try a normal allocation first. |
| 4519 | */ |
| 4520 | p = vm_page_alloc(obj, pg, vmflags); |
| 4521 | if (p) |
| 4522 | return(p); |
| 4523 | if (vm_page_lookup(obj, pg)) |
| 4524 | return(NULL); |
| 4525 | vm_pageout_deficit += deficit; |
| 4526 | |
| 4527 | /* |
| 4528 | * Try again, digging into the system reserve. |
| 4529 | * |
| 4530 | * Trying to recover pages from the buffer cache here can deadlock |
| 4531 | * against other threads trying to busy underlying pages so we |
| 4532 | * depend on the code in brelse() and bqrelse() to free/cache the |
| 4533 | * underlying buffer cache pages when memory is low. |
| 4534 | */ |
| 4535 | if (curthread->td_flags & TDF_SYSTHREAD) |
| 4536 | vmflags |= VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT; |
| 4537 | else |
| 4538 | vmflags |= VM_ALLOC_SYSTEM; |
| 4539 | |
| 4540 | /*recoverbufpages();*/ |
| 4541 | p = vm_page_alloc(obj, pg, vmflags); |
| 4542 | if (p) |
| 4543 | return(p); |
| 4544 | if (vm_page_lookup(obj, pg)) |
| 4545 | return(NULL); |
| 4546 | |
| 4547 | /* |
| 4548 | * Wait for memory to free up and try again |
| 4549 | */ |
| 4550 | if (vm_page_count_severe()) |
| 4551 | ++lowmempgallocs; |
| 4552 | vm_wait(hz / 20 + 1); |
| 4553 | |
| 4554 | p = vm_page_alloc(obj, pg, vmflags); |
| 4555 | if (p) |
| 4556 | return(p); |
| 4557 | if (vm_page_lookup(obj, pg)) |
| 4558 | return(NULL); |
| 4559 | |
| 4560 | /* |
| 4561 | * Ok, now we are really in trouble. |
| 4562 | */ |
| 4563 | { |
| 4564 | static struct krate biokrate = { .freq = 1 }; |
| 4565 | krateprintf(&biokrate, |
| 4566 | "Warning: bio_page_alloc: memory exhausted " |
| 4567 | "during bufcache page allocation from %s\n", |
| 4568 | curthread->td_comm); |
| 4569 | } |
| 4570 | if (curthread->td_flags & TDF_SYSTHREAD) |
| 4571 | vm_wait(hz / 20 + 1); |
| 4572 | else |
| 4573 | vm_wait(hz / 2 + 1); |
| 4574 | return (NULL); |
| 4575 | } |
| 4576 | |
| 4577 | /* |
| 4578 | * vm_hold_free_pages: |
| 4579 | * |
| 4580 | * Return pages associated with the buffer back to the VM system. |
| 4581 | * |
| 4582 | * The range of pages underlying the buffer's address space will |
| 4583 | * be unmapped and un-wired. |
| 4584 | * |
| 4585 | * MPSAFE |
| 4586 | */ |
| 4587 | void |
| 4588 | vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) |
| 4589 | { |
| 4590 | vm_offset_t pg; |
| 4591 | vm_page_t p; |
| 4592 | int index, newnpages; |
| 4593 | |
| 4594 | from = round_page(from); |
| 4595 | to = round_page(to); |
| 4596 | index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; |
| 4597 | newnpages = index; |
| 4598 | |
| 4599 | for (pg = from; pg < to; pg += PAGE_SIZE, index++) { |
| 4600 | p = bp->b_xio.xio_pages[index]; |
| 4601 | if (p && (index < bp->b_xio.xio_npages)) { |
| 4602 | if (p->busy) { |
| 4603 | kprintf("vm_hold_free_pages: doffset: %lld, " |
| 4604 | "loffset: %lld\n", |
| 4605 | (long long)bp->b_bio2.bio_offset, |
| 4606 | (long long)bp->b_loffset); |
| 4607 | } |
| 4608 | bp->b_xio.xio_pages[index] = NULL; |
| 4609 | pmap_kremove(pg); |
| 4610 | vm_page_busy_wait(p, FALSE, "vmhldpg"); |
| 4611 | vm_page_unwire(p, 0); |
| 4612 | vm_page_free(p); |
| 4613 | } |
| 4614 | } |
| 4615 | bp->b_xio.xio_npages = newnpages; |
| 4616 | } |
| 4617 | |
| 4618 | /* |
| 4619 | * vmapbuf: |
| 4620 | * |
| 4621 | * Map a user buffer into KVM via a pbuf. On return the buffer's |
| 4622 | * b_data, b_bufsize, and b_bcount will be set, and its XIO page array |
| 4623 | * initialized. |
| 4624 | */ |
| 4625 | int |
| 4626 | vmapbuf(struct buf *bp, caddr_t udata, int bytes) |
| 4627 | { |
| 4628 | caddr_t addr; |
| 4629 | vm_offset_t va; |
| 4630 | vm_page_t m; |
| 4631 | int vmprot; |
| 4632 | int error; |
| 4633 | int pidx; |
| 4634 | int i; |
| 4635 | |
| 4636 | /* |
| 4637 | * bp had better have a command and it better be a pbuf. |
| 4638 | */ |
| 4639 | KKASSERT(bp->b_cmd != BUF_CMD_DONE); |
| 4640 | KKASSERT(bp->b_flags & B_PAGING); |
| 4641 | KKASSERT(bp->b_kvabase); |
| 4642 | |
| 4643 | if (bytes < 0) |
| 4644 | return (-1); |
| 4645 | |
| 4646 | /* |
| 4647 | * Map the user data into KVM. Mappings have to be page-aligned. |
| 4648 | */ |
| 4649 | addr = (caddr_t)trunc_page((vm_offset_t)udata); |
| 4650 | pidx = 0; |
| 4651 | |
| 4652 | vmprot = VM_PROT_READ; |
| 4653 | if (bp->b_cmd == BUF_CMD_READ) |
| 4654 | vmprot |= VM_PROT_WRITE; |
| 4655 | |
| 4656 | while (addr < udata + bytes) { |
| 4657 | /* |
| 4658 | * Do the vm_fault if needed; do the copy-on-write thing |
| 4659 | * when reading stuff off device into memory. |
| 4660 | * |
| 4661 | * vm_fault_page*() returns a held VM page. |
| 4662 | */ |
| 4663 | va = (addr >= udata) ? (vm_offset_t)addr : (vm_offset_t)udata; |
| 4664 | va = trunc_page(va); |
| 4665 | |
| 4666 | m = vm_fault_page_quick(va, vmprot, &error); |
| 4667 | if (m == NULL) { |
| 4668 | for (i = 0; i < pidx; ++i) { |
| 4669 | vm_page_unhold(bp->b_xio.xio_pages[i]); |
| 4670 | bp->b_xio.xio_pages[i] = NULL; |
| 4671 | } |
| 4672 | return(-1); |
| 4673 | } |
| 4674 | bp->b_xio.xio_pages[pidx] = m; |
| 4675 | addr += PAGE_SIZE; |
| 4676 | ++pidx; |
| 4677 | } |
| 4678 | |
| 4679 | /* |
| 4680 | * Map the page array and set the buffer fields to point to |
| 4681 | * the mapped data buffer. |
| 4682 | */ |
| 4683 | if (pidx > btoc(MAXPHYS)) |
| 4684 | panic("vmapbuf: mapped more than MAXPHYS"); |
| 4685 | pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_xio.xio_pages, pidx); |
| 4686 | |
| 4687 | bp->b_xio.xio_npages = pidx; |
| 4688 | bp->b_data = bp->b_kvabase + ((int)(intptr_t)udata & PAGE_MASK); |
| 4689 | bp->b_bcount = bytes; |
| 4690 | bp->b_bufsize = bytes; |
| 4691 | return(0); |
| 4692 | } |
| 4693 | |
| 4694 | /* |
| 4695 | * vunmapbuf: |
| 4696 | * |
| 4697 | * Free the io map PTEs associated with this IO operation. |
| 4698 | * We also invalidate the TLB entries and restore the original b_addr. |
| 4699 | */ |
| 4700 | void |
| 4701 | vunmapbuf(struct buf *bp) |
| 4702 | { |
| 4703 | int pidx; |
| 4704 | int npages; |
| 4705 | |
| 4706 | KKASSERT(bp->b_flags & B_PAGING); |
| 4707 | |
| 4708 | npages = bp->b_xio.xio_npages; |
| 4709 | pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); |
| 4710 | for (pidx = 0; pidx < npages; ++pidx) { |
| 4711 | vm_page_unhold(bp->b_xio.xio_pages[pidx]); |
| 4712 | bp->b_xio.xio_pages[pidx] = NULL; |
| 4713 | } |
| 4714 | bp->b_xio.xio_npages = 0; |
| 4715 | bp->b_data = bp->b_kvabase; |
| 4716 | } |
| 4717 | |
| 4718 | /* |
| 4719 | * Scan all buffers in the system and issue the callback. |
| 4720 | */ |
| 4721 | int |
| 4722 | scan_all_buffers(int (*callback)(struct buf *, void *), void *info) |
| 4723 | { |
| 4724 | int count = 0; |
| 4725 | int error; |
| 4726 | int n; |
| 4727 | |
| 4728 | for (n = 0; n < nbuf; ++n) { |
| 4729 | if ((error = callback(&buf[n], info)) < 0) { |
| 4730 | count = error; |
| 4731 | break; |
| 4732 | } |
| 4733 | count += error; |
| 4734 | } |
| 4735 | return (count); |
| 4736 | } |
| 4737 | |
| 4738 | /* |
| 4739 | * nestiobuf_iodone: biodone callback for nested buffers and propagate |
| 4740 | * completion to the master buffer. |
| 4741 | */ |
| 4742 | static void |
| 4743 | nestiobuf_iodone(struct bio *bio) |
| 4744 | { |
| 4745 | struct bio *mbio; |
| 4746 | struct buf *mbp, *bp; |
| 4747 | struct devstat *stats; |
| 4748 | int error; |
| 4749 | int donebytes; |
| 4750 | |
| 4751 | bp = bio->bio_buf; |
| 4752 | mbio = bio->bio_caller_info1.ptr; |
| 4753 | stats = bio->bio_caller_info2.ptr; |
| 4754 | mbp = mbio->bio_buf; |
| 4755 | |
| 4756 | KKASSERT(bp->b_bcount <= bp->b_bufsize); |
| 4757 | KKASSERT(mbp != bp); |
| 4758 | |
| 4759 | error = bp->b_error; |
| 4760 | if (bp->b_error == 0 && |
| 4761 | (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { |
| 4762 | /* |
| 4763 | * Not all got transfered, raise an error. We have no way to |
| 4764 | * propagate these conditions to mbp. |
| 4765 | */ |
| 4766 | error = EIO; |
| 4767 | } |
| 4768 | |
| 4769 | donebytes = bp->b_bufsize; |
| 4770 | |
| 4771 | relpbuf(bp, NULL); |
| 4772 | |
| 4773 | nestiobuf_done(mbio, donebytes, error, stats); |
| 4774 | } |
| 4775 | |
| 4776 | void |
| 4777 | nestiobuf_done(struct bio *mbio, int donebytes, int error, struct devstat *stats) |
| 4778 | { |
| 4779 | struct buf *mbp; |
| 4780 | |
| 4781 | mbp = mbio->bio_buf; |
| 4782 | |
| 4783 | KKASSERT((int)(intptr_t)mbio->bio_driver_info > 0); |
| 4784 | |
| 4785 | /* |
| 4786 | * If an error occured, propagate it to the master buffer. |
| 4787 | * |
| 4788 | * Several biodone()s may wind up running concurrently so |
| 4789 | * use an atomic op to adjust b_flags. |
| 4790 | */ |
| 4791 | if (error) { |
| 4792 | mbp->b_error = error; |
| 4793 | atomic_set_int(&mbp->b_flags, B_ERROR); |
| 4794 | } |
| 4795 | |
| 4796 | /* |
| 4797 | * Decrement the operations in progress counter and terminate the |
| 4798 | * I/O if this was the last bit. |
| 4799 | */ |
| 4800 | if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { |
| 4801 | mbp->b_resid = 0; |
| 4802 | if (stats) |
| 4803 | devstat_end_transaction_buf(stats, mbp); |
| 4804 | biodone(mbio); |
| 4805 | } |
| 4806 | } |
| 4807 | |
| 4808 | /* |
| 4809 | * Initialize a nestiobuf for use. Set an initial count of 1 to prevent |
| 4810 | * the mbio from being biodone()'d while we are still adding sub-bios to |
| 4811 | * it. |
| 4812 | */ |
| 4813 | void |
| 4814 | nestiobuf_init(struct bio *bio) |
| 4815 | { |
| 4816 | bio->bio_driver_info = (void *)1; |
| 4817 | } |
| 4818 | |
| 4819 | /* |
| 4820 | * The BIOs added to the nestedio have already been started, remove the |
| 4821 | * count that placeheld our mbio and biodone() it if the count would |
| 4822 | * transition to 0. |
| 4823 | */ |
| 4824 | void |
| 4825 | nestiobuf_start(struct bio *mbio) |
| 4826 | { |
| 4827 | struct buf *mbp = mbio->bio_buf; |
| 4828 | |
| 4829 | /* |
| 4830 | * Decrement the operations in progress counter and terminate the |
| 4831 | * I/O if this was the last bit. |
| 4832 | */ |
| 4833 | if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { |
| 4834 | if (mbp->b_flags & B_ERROR) |
| 4835 | mbp->b_resid = mbp->b_bcount; |
| 4836 | else |
| 4837 | mbp->b_resid = 0; |
| 4838 | biodone(mbio); |
| 4839 | } |
| 4840 | } |
| 4841 | |
| 4842 | /* |
| 4843 | * Set an intermediate error prior to calling nestiobuf_start() |
| 4844 | */ |
| 4845 | void |
| 4846 | nestiobuf_error(struct bio *mbio, int error) |
| 4847 | { |
| 4848 | struct buf *mbp = mbio->bio_buf; |
| 4849 | |
| 4850 | if (error) { |
| 4851 | mbp->b_error = error; |
| 4852 | atomic_set_int(&mbp->b_flags, B_ERROR); |
| 4853 | } |
| 4854 | } |
| 4855 | |
| 4856 | /* |
| 4857 | * nestiobuf_add: setup a "nested" buffer. |
| 4858 | * |
| 4859 | * => 'mbp' is a "master" buffer which is being divided into sub pieces. |
| 4860 | * => 'bp' should be a buffer allocated by getiobuf. |
| 4861 | * => 'offset' is a byte offset in the master buffer. |
| 4862 | * => 'size' is a size in bytes of this nested buffer. |
| 4863 | */ |
| 4864 | void |
| 4865 | nestiobuf_add(struct bio *mbio, struct buf *bp, int offset, size_t size, struct devstat *stats) |
| 4866 | { |
| 4867 | struct buf *mbp = mbio->bio_buf; |
| 4868 | struct vnode *vp = mbp->b_vp; |
| 4869 | |
| 4870 | KKASSERT(mbp->b_bcount >= offset + size); |
| 4871 | |
| 4872 | atomic_add_int((int *)&mbio->bio_driver_info, 1); |
| 4873 | |
| 4874 | /* kernel needs to own the lock for it to be released in biodone */ |
| 4875 | BUF_KERNPROC(bp); |
| 4876 | bp->b_vp = vp; |
| 4877 | bp->b_cmd = mbp->b_cmd; |
| 4878 | bp->b_bio1.bio_done = nestiobuf_iodone; |
| 4879 | bp->b_data = (char *)mbp->b_data + offset; |
| 4880 | bp->b_resid = bp->b_bcount = size; |
| 4881 | bp->b_bufsize = bp->b_bcount; |
| 4882 | |
| 4883 | bp->b_bio1.bio_track = NULL; |
| 4884 | bp->b_bio1.bio_caller_info1.ptr = mbio; |
| 4885 | bp->b_bio1.bio_caller_info2.ptr = stats; |
| 4886 | } |
| 4887 | |
| 4888 | /* |
| 4889 | * print out statistics from the current status of the buffer pool |
| 4890 | * this can be toggeled by the system control option debug.syncprt |
| 4891 | */ |
| 4892 | #ifdef DEBUG |
| 4893 | void |
| 4894 | vfs_bufstats(void) |
| 4895 | { |
| 4896 | int i, j, count; |
| 4897 | struct buf *bp; |
| 4898 | struct bqueues *dp; |
| 4899 | int counts[(MAXBSIZE / PAGE_SIZE) + 1]; |
| 4900 | static char *bname[3] = { "LOCKED", "LRU", "AGE" }; |
| 4901 | |
| 4902 | for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) { |
| 4903 | count = 0; |
| 4904 | for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) |
| 4905 | counts[j] = 0; |
| 4906 | |
| 4907 | spin_lock(&bufqspin); |
| 4908 | TAILQ_FOREACH(bp, dp, b_freelist) { |
| 4909 | if (bp->b_flags & B_MARKER) |
| 4910 | continue; |
| 4911 | counts[bp->b_bufsize/PAGE_SIZE]++; |
| 4912 | count++; |
| 4913 | } |
| 4914 | spin_unlock(&bufqspin); |
| 4915 | |
| 4916 | kprintf("%s: total-%d", bname[i], count); |
| 4917 | for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) |
| 4918 | if (counts[j] != 0) |
| 4919 | kprintf(", %d-%d", j * PAGE_SIZE, counts[j]); |
| 4920 | kprintf("\n"); |
| 4921 | } |
| 4922 | } |
| 4923 | #endif |
| 4924 | |
| 4925 | #ifdef DDB |
| 4926 | |
| 4927 | DB_SHOW_COMMAND(buffer, db_show_buffer) |
| 4928 | { |
| 4929 | /* get args */ |
| 4930 | struct buf *bp = (struct buf *)addr; |
| 4931 | |
| 4932 | if (!have_addr) { |
| 4933 | db_printf("usage: show buffer <addr>\n"); |
| 4934 | return; |
| 4935 | } |
| 4936 | |
| 4937 | db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); |
| 4938 | db_printf("b_cmd = %d\n", bp->b_cmd); |
| 4939 | db_printf("b_error = %d, b_bufsize = %d, b_bcount = %d, " |
| 4940 | "b_resid = %d\n, b_data = %p, " |
| 4941 | "bio_offset(disk) = %lld, bio_offset(phys) = %lld\n", |
| 4942 | bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, |
| 4943 | bp->b_data, |
| 4944 | (long long)bp->b_bio2.bio_offset, |
| 4945 | (long long)(bp->b_bio2.bio_next ? |
| 4946 | bp->b_bio2.bio_next->bio_offset : (off_t)-1)); |
| 4947 | if (bp->b_xio.xio_npages) { |
| 4948 | int i; |
| 4949 | db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ", |
| 4950 | bp->b_xio.xio_npages); |
| 4951 | for (i = 0; i < bp->b_xio.xio_npages; i++) { |
| 4952 | vm_page_t m; |
| 4953 | m = bp->b_xio.xio_pages[i]; |
| 4954 | db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, |
| 4955 | (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); |
| 4956 | if ((i + 1) < bp->b_xio.xio_npages) |
| 4957 | db_printf(","); |
| 4958 | } |
| 4959 | db_printf("\n"); |
| 4960 | } |
| 4961 | } |
| 4962 | #endif /* DDB */ |