kernel/ufs: Fix compilation without INVARIANTS.
[dragonfly.git] / sys / vfs / ufs / ffs_softdep.c
CommitLineData
984263bc
MD
1/*
2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved.
3 *
4 * The soft updates code is derived from the appendix of a University
5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
6 * "Soft Updates: A Solution to the Metadata Update Problem in File
7 * Systems", CSE-TR-254-95, August 1995).
8 *
9 * Further information about soft updates can be obtained from:
10 *
11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/
12 * 1614 Oxford Street mckusick@mckusick.com
13 * Berkeley, CA 94709-1608 +1-510-843-9542
14 * USA
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY
27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR
30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00
39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $
40 */
41
42/*
43 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide.
44 */
45#ifndef DIAGNOSTIC
46#define DIAGNOSTIC
47#endif
48#ifndef DEBUG
49#define DEBUG
50#endif
51
52#include <sys/param.h>
53#include <sys/kernel.h>
54#include <sys/systm.h>
55#include <sys/buf.h>
56#include <sys/malloc.h>
57#include <sys/mount.h>
58#include <sys/proc.h>
59#include <sys/syslog.h>
60#include <sys/vnode.h>
61#include <sys/conf.h>
f91a71dd 62#include <machine/inttypes.h>
1f2de5d4
MD
63#include "dir.h"
64#include "quota.h"
65#include "inode.h"
66#include "ufsmount.h"
67#include "fs.h"
68#include "softdep.h"
69#include "ffs_extern.h"
70#include "ufs_extern.h"
984263bc 71
59a647b1
MD
72#include <sys/buf2.h>
73#include <sys/mplock2.h>
165dba55 74#include <sys/thread2.h>
f5be2504 75#include <sys/lock.h>
165dba55 76
984263bc
MD
77/*
78 * These definitions need to be adapted to the system to which
79 * this file is being ported.
80 */
81/*
82 * malloc types defined for the softdep system.
83 */
84MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies");
85MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies");
86MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation");
87MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map");
88MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode");
89MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies");
90MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block");
91MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode");
92MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode");
93MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated");
94MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry");
95MALLOC_DEFINE(M_MKDIR, "mkdir","New directory");
96MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted");
97
98#define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE)
99
100#define D_PAGEDEP 0
101#define D_INODEDEP 1
102#define D_NEWBLK 2
103#define D_BMSAFEMAP 3
104#define D_ALLOCDIRECT 4
105#define D_INDIRDEP 5
106#define D_ALLOCINDIR 6
107#define D_FREEFRAG 7
108#define D_FREEBLKS 8
109#define D_FREEFILE 9
110#define D_DIRADD 10
111#define D_MKDIR 11
112#define D_DIRREM 12
113#define D_LAST D_DIRREM
114
115/*
116 * translate from workitem type to memory type
117 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
118 */
119static struct malloc_type *memtype[] = {
120 M_PAGEDEP,
121 M_INODEDEP,
122 M_NEWBLK,
123 M_BMSAFEMAP,
124 M_ALLOCDIRECT,
125 M_INDIRDEP,
126 M_ALLOCINDIR,
127 M_FREEFRAG,
128 M_FREEBLKS,
129 M_FREEFILE,
130 M_DIRADD,
131 M_MKDIR,
132 M_DIRREM
133};
134
135#define DtoM(type) (memtype[type])
136
137/*
138 * Names of malloc types.
139 */
140#define TYPENAME(type) \
141 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???")
984263bc
MD
142/*
143 * End system adaptaion definitions.
144 */
145
146/*
147 * Internal function prototypes.
148 */
f719c866
DR
149static void softdep_error(char *, int);
150static void drain_output(struct vnode *, int);
151static int getdirtybuf(struct buf **, int);
152static void clear_remove(struct thread *);
153static void clear_inodedeps(struct thread *);
154static int flush_pagedep_deps(struct vnode *, struct mount *,
a6ee311a 155 struct diraddhd *);
f719c866
DR
156static int flush_inodedep_deps(struct fs *, ino_t);
157static int handle_written_filepage(struct pagedep *, struct buf *);
158static void diradd_inode_written(struct diradd *, struct inodedep *);
159static int handle_written_inodeblock(struct inodedep *, struct buf *);
160static void handle_allocdirect_partdone(struct allocdirect *);
161static void handle_allocindir_partdone(struct allocindir *);
162static void initiate_write_filepage(struct pagedep *, struct buf *);
163static void handle_written_mkdir(struct mkdir *, int);
164static void initiate_write_inodeblock(struct inodedep *, struct buf *);
165static void handle_workitem_freefile(struct freefile *);
166static void handle_workitem_remove(struct dirrem *);
167static struct dirrem *newdirrem(struct buf *, struct inode *,
a6ee311a 168 struct inode *, int, struct dirrem **);
f719c866
DR
169static void free_diradd(struct diradd *);
170static void free_allocindir(struct allocindir *, struct inodedep *);
54078292 171static int indir_trunc (struct inode *, off_t, int, ufs_lbn_t, long *);
f719c866
DR
172static void deallocate_dependencies(struct buf *, struct inodedep *);
173static void free_allocdirect(struct allocdirectlst *,
a6ee311a 174 struct allocdirect *, int);
f719c866
DR
175static int check_inode_unwritten(struct inodedep *);
176static int free_inodedep(struct inodedep *);
177static void handle_workitem_freeblocks(struct freeblks *);
178static void merge_inode_lists(struct inodedep *);
179static void setup_allocindir_phase2(struct buf *, struct inode *,
a6ee311a 180 struct allocindir *);
f719c866 181static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t,
a6ee311a 182 ufs_daddr_t);
f719c866
DR
183static void handle_workitem_freefrag(struct freefrag *);
184static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long);
185static void allocdirect_merge(struct allocdirectlst *,
a6ee311a 186 struct allocdirect *, struct allocdirect *);
f719c866
DR
187static struct bmsafemap *bmsafemap_lookup(struct buf *);
188static int newblk_lookup(struct fs *, ufs_daddr_t, int,
a6ee311a 189 struct newblk **);
f719c866
DR
190static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **);
191static int pagedep_lookup(struct inode *, ufs_lbn_t, int,
a6ee311a 192 struct pagedep **);
f719c866
DR
193static void pause_timer(void *);
194static int request_cleanup(int, int);
195static int process_worklist_item(struct mount *, int);
196static void add_to_worklist(struct worklist *);
984263bc
MD
197
198/*
199 * Exported softdep operations.
200 */
f719c866
DR
201static void softdep_disk_io_initiation(struct buf *);
202static void softdep_disk_write_complete(struct buf *);
203static void softdep_deallocate_dependencies(struct buf *);
204static int softdep_fsync(struct vnode *);
205static int softdep_process_worklist(struct mount *);
206static void softdep_move_dependencies(struct buf *, struct buf *);
207static int softdep_count_dependencies(struct buf *bp, int);
27bc0cb1
MD
208static int softdep_checkread(struct buf *bp);
209static int softdep_checkwrite(struct buf *bp);
984263bc 210
bc50d880 211static struct bio_ops softdep_bioops = {
408357d8
MD
212 .io_start = softdep_disk_io_initiation,
213 .io_complete = softdep_disk_write_complete,
214 .io_deallocate = softdep_deallocate_dependencies,
215 .io_fsync = softdep_fsync,
216 .io_sync = softdep_process_worklist,
217 .io_movedeps = softdep_move_dependencies,
218 .io_countdeps = softdep_count_dependencies,
27bc0cb1
MD
219 .io_checkread = softdep_checkread,
220 .io_checkwrite = softdep_checkwrite
984263bc
MD
221};
222
223/*
224 * Locking primitives.
984263bc 225 */
f5be2504
VS
226static void acquire_lock(struct lock *);
227static void free_lock(struct lock *);
17720c99 228#ifdef INVARIANTS
f5be2504 229static int lock_held(struct lock *);
17720c99 230#endif
f5be2504
VS
231static int interlocked_sleep(struct lock *, void *, int,
232 const char *, int);
233
234static struct lock lk;
235
236#define ACQUIRE_LOCK(lkp) acquire_lock(lkp)
237#define FREE_LOCK(lkp) free_lock(lkp)
984263bc
MD
238
239static void
f5be2504 240acquire_lock(struct lock *lkp)
984263bc 241{
f5be2504 242 lockmgr(lkp, LK_EXCLUSIVE);
984263bc
MD
243}
244
245static void
f5be2504 246free_lock(struct lock *lkp)
984263bc 247{
f5be2504 248 lockmgr(lkp, LK_RELEASE);
984263bc
MD
249}
250
17720c99 251#ifdef INVARIANTS
f5be2504
VS
252static int
253lock_held(struct lock *lkp)
984263bc 254{
f5be2504 255 return lockcountnb(lkp);
984263bc 256}
17720c99 257#endif
984263bc
MD
258
259static int
f5be2504 260interlocked_sleep(struct lock *lkp, void *ident, int flags,
3fcb1ab8 261 const char *wmesg, int timo)
984263bc 262{
f5be2504
VS
263 int retval;
264
265 KKASSERT(lock_held(lkp) > 0);
266 retval = lksleep(ident, lkp, flags, wmesg, timo);
984263bc
MD
267 return (retval);
268}
269
270/*
271 * Place holder for real semaphores.
272 */
273struct sema {
274 int value;
dadab5e9 275 thread_t holder;
984263bc
MD
276 char *name;
277 int prio;
278 int timo;
279};
f719c866 280static void sema_init(struct sema *, char *, int, int);
f5be2504 281static int sema_get(struct sema *, struct lock *);
f719c866 282static void sema_release(struct sema *);
984263bc 283
f5be2504
VS
284#define NOHOLDER ((struct thread *) -1)
285
984263bc 286static void
3fcb1ab8 287sema_init(struct sema *semap, char *name, int prio, int timo)
984263bc
MD
288{
289
dadab5e9 290 semap->holder = NOHOLDER;
984263bc
MD
291 semap->value = 0;
292 semap->name = name;
293 semap->prio = prio;
294 semap->timo = timo;
295}
296
297static int
f5be2504 298sema_get(struct sema *semap, struct lock *interlock)
984263bc
MD
299{
300
301 if (semap->value++ > 0) {
302 if (interlock != NULL) {
f5be2504 303 interlocked_sleep(interlock, (caddr_t)semap,
984263bc
MD
304 semap->prio, semap->name, semap->timo);
305 FREE_LOCK(interlock);
306 } else {
307 tsleep((caddr_t)semap, semap->prio, semap->name,
308 semap->timo);
309 }
310 return (0);
311 }
dadab5e9 312 semap->holder = curthread;
984263bc
MD
313 if (interlock != NULL)
314 FREE_LOCK(interlock);
315 return (1);
316}
317
318static void
3fcb1ab8 319sema_release(struct sema *semap)
984263bc
MD
320{
321
dadab5e9 322 if (semap->value <= 0 || semap->holder != curthread) {
984263bc
MD
323 panic("sema_release: not held");
324 }
325 if (--semap->value > 0) {
326 semap->value = 0;
327 wakeup(semap);
328 }
dadab5e9 329 semap->holder = NOHOLDER;
984263bc
MD
330}
331
332/*
333 * Worklist queue management.
334 * These routines require that the lock be held.
335 */
f719c866
DR
336static void worklist_insert(struct workhead *, struct worklist *);
337static void worklist_remove(struct worklist *);
338static void workitem_free(struct worklist *, int);
984263bc 339
408357d8
MD
340#define WORKLIST_INSERT_BP(bp, item) do { \
341 (bp)->b_ops = &softdep_bioops; \
342 worklist_insert(&(bp)->b_dep, item); \
343} while (0)
344
984263bc
MD
345#define WORKLIST_INSERT(head, item) worklist_insert(head, item)
346#define WORKLIST_REMOVE(item) worklist_remove(item)
347#define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type)
348
349static void
3fcb1ab8 350worklist_insert(struct workhead *head, struct worklist *item)
984263bc
MD
351{
352
f5be2504
VS
353 KKASSERT(lock_held(&lk) > 0);
354
984263bc 355 if (item->wk_state & ONWORKLIST) {
984263bc
MD
356 panic("worklist_insert: already on list");
357 }
358 item->wk_state |= ONWORKLIST;
359 LIST_INSERT_HEAD(head, item, wk_list);
360}
361
362static void
3fcb1ab8 363worklist_remove(struct worklist *item)
984263bc
MD
364{
365
f5be2504
VS
366 KKASSERT(lock_held(&lk));
367 if ((item->wk_state & ONWORKLIST) == 0)
984263bc 368 panic("worklist_remove: not on list");
f5be2504 369
984263bc
MD
370 item->wk_state &= ~ONWORKLIST;
371 LIST_REMOVE(item, wk_list);
372}
373
374static void
3fcb1ab8 375workitem_free(struct worklist *item, int type)
984263bc
MD
376{
377
f5be2504 378 if (item->wk_state & ONWORKLIST)
984263bc 379 panic("workitem_free: still on list");
f5be2504 380 if (item->wk_type != type)
984263bc 381 panic("workitem_free: type mismatch");
f5be2504 382
884717e1 383 kfree(item, DtoM(type));
984263bc 384}
984263bc
MD
385
386/*
387 * Workitem queue management
388 */
389static struct workhead softdep_workitem_pending;
390static int num_on_worklist; /* number of worklist items to be processed */
391static int softdep_worklist_busy; /* 1 => trying to do unmount */
392static int softdep_worklist_req; /* serialized waiters */
393static int max_softdeps; /* maximum number of structs before slowdown */
394static int tickdelay = 2; /* number of ticks to pause during slowdown */
395static int *stat_countp; /* statistic to count in proc_waiting timeout */
396static int proc_waiting; /* tracks whether we have a timeout posted */
47979091 397static struct callout handle; /* handle on posted proc_waiting timeout */
dadab5e9 398static struct thread *filesys_syncer; /* proc of filesystem syncer process */
984263bc
MD
399static int req_clear_inodedeps; /* syncer process flush some inodedeps */
400#define FLUSH_INODES 1
401static int req_clear_remove; /* syncer process flush some freeblks */
402#define FLUSH_REMOVE 2
403/*
404 * runtime statistics
405 */
406static int stat_worklist_push; /* number of worklist cleanups */
407static int stat_blk_limit_push; /* number of times block limit neared */
408static int stat_ino_limit_push; /* number of times inode limit neared */
409static int stat_blk_limit_hit; /* number of times block slowdown imposed */
410static int stat_ino_limit_hit; /* number of times inode slowdown imposed */
411static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */
412static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */
413static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */
414static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
415static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */
416#ifdef DEBUG
417#include <vm/vm.h>
418#include <sys/sysctl.h>
0c52fa62
SG
419SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0,
420 "Maximum soft dependencies before slowdown occurs");
421SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0,
422 "Ticks to delay before allocating during slowdown");
423SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,
424 "Number of worklist cleanups");
425SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,
426 "Number of times block limit neared");
427SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,
428 "Number of times inode limit neared");
429SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0,
430 "Number of times block slowdown imposed");
431SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0,
432 "Number of times inode slowdown imposed ");
433SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0,
434 "Number of synchronous slowdowns imposed");
435SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0,
436 "Bufs redirtied as indir ptrs not written");
437SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0,
438 "Bufs redirtied as inode bitmap not written");
439SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0,
440 "Bufs redirtied as direct ptrs not written");
441SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0,
442 "Bufs redirtied as dir entry cannot write");
984263bc
MD
443#endif /* DEBUG */
444
445/*
446 * Add an item to the end of the work queue.
447 * This routine requires that the lock be held.
448 * This is the only routine that adds items to the list.
449 * The following routine is the only one that removes items
450 * and does so in order from first to last.
451 */
452static void
3fcb1ab8 453add_to_worklist(struct worklist *wk)
984263bc
MD
454{
455 static struct worklist *worklist_tail;
456
457 if (wk->wk_state & ONWORKLIST) {
984263bc
MD
458 panic("add_to_worklist: already on list");
459 }
460 wk->wk_state |= ONWORKLIST;
461 if (LIST_FIRST(&softdep_workitem_pending) == NULL)
462 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list);
463 else
464 LIST_INSERT_AFTER(worklist_tail, wk, wk_list);
465 worklist_tail = wk;
466 num_on_worklist += 1;
467}
468
469/*
470 * Process that runs once per second to handle items in the background queue.
471 *
472 * Note that we ensure that everything is done in the order in which they
473 * appear in the queue. The code below depends on this property to ensure
474 * that blocks of a file are freed before the inode itself is freed. This
475 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
476 * until all the old ones have been purged from the dependency lists.
59a647b1
MD
477 *
478 * bioops callback - hold io_token
984263bc
MD
479 */
480static int
3fcb1ab8 481softdep_process_worklist(struct mount *matchmnt)
984263bc 482{
dadab5e9 483 thread_t td = curthread;
984263bc
MD
484 int matchcnt, loopcount;
485 long starttime;
486
59a647b1
MD
487 get_mplock();
488
984263bc
MD
489 /*
490 * Record the process identifier of our caller so that we can give
491 * this process preferential treatment in request_cleanup below.
492 */
dadab5e9 493 filesys_syncer = td;
984263bc
MD
494 matchcnt = 0;
495
496 /*
497 * There is no danger of having multiple processes run this
498 * code, but we have to single-thread it when softdep_flushfiles()
499 * is in operation to get an accurate count of the number of items
500 * related to its mount point that are in the list.
501 */
502 if (matchmnt == NULL) {
59a647b1
MD
503 if (softdep_worklist_busy < 0) {
504 matchcnt = -1;
505 goto done;
506 }
984263bc
MD
507 softdep_worklist_busy += 1;
508 }
509
510 /*
511 * If requested, try removing inode or removal dependencies.
512 */
513 if (req_clear_inodedeps) {
dadab5e9 514 clear_inodedeps(td);
984263bc
MD
515 req_clear_inodedeps -= 1;
516 wakeup_one(&proc_waiting);
517 }
518 if (req_clear_remove) {
dadab5e9 519 clear_remove(td);
984263bc
MD
520 req_clear_remove -= 1;
521 wakeup_one(&proc_waiting);
522 }
523 loopcount = 1;
524 starttime = time_second;
525 while (num_on_worklist > 0) {
526 matchcnt += process_worklist_item(matchmnt, 0);
527
528 /*
529 * If a umount operation wants to run the worklist
530 * accurately, abort.
531 */
532 if (softdep_worklist_req && matchmnt == NULL) {
533 matchcnt = -1;
534 break;
535 }
536
537 /*
538 * If requested, try removing inode or removal dependencies.
539 */
540 if (req_clear_inodedeps) {
dadab5e9 541 clear_inodedeps(td);
984263bc
MD
542 req_clear_inodedeps -= 1;
543 wakeup_one(&proc_waiting);
544 }
545 if (req_clear_remove) {
dadab5e9 546 clear_remove(td);
984263bc
MD
547 req_clear_remove -= 1;
548 wakeup_one(&proc_waiting);
549 }
550 /*
551 * We do not generally want to stop for buffer space, but if
552 * we are really being a buffer hog, we will stop and wait.
553 */
554 if (loopcount++ % 128 == 0)
c4df9635 555 bwillinode(1);
984263bc
MD
556 /*
557 * Never allow processing to run for more than one
558 * second. Otherwise the other syncer tasks may get
559 * excessively backlogged.
560 */
561 if (starttime != time_second && matchmnt == NULL) {
562 matchcnt = -1;
563 break;
564 }
565 }
566 if (matchmnt == NULL) {
567 --softdep_worklist_busy;
568 if (softdep_worklist_req && softdep_worklist_busy == 0)
569 wakeup(&softdep_worklist_req);
570 }
59a647b1
MD
571done:
572 rel_mplock();
984263bc
MD
573 return (matchcnt);
574}
575
576/*
577 * Process one item on the worklist.
578 */
579static int
3fcb1ab8 580process_worklist_item(struct mount *matchmnt, int flags)
984263bc
MD
581{
582 struct worklist *wk;
583 struct dirrem *dirrem;
584 struct fs *matchfs;
585 struct vnode *vp;
586 int matchcnt = 0;
587
588 matchfs = NULL;
589 if (matchmnt != NULL)
590 matchfs = VFSTOUFS(matchmnt)->um_fs;
591 ACQUIRE_LOCK(&lk);
592 /*
593 * Normally we just process each item on the worklist in order.
594 * However, if we are in a situation where we cannot lock any
595 * inodes, we have to skip over any dirrem requests whose
596 * vnodes are resident and locked.
597 */
598 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) {
599 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM)
600 break;
601 dirrem = WK_DIRREM(wk);
602 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev,
603 dirrem->dm_oldinum);
a11aaa81 604 if (vp == NULL || !vn_islocked(vp))
984263bc
MD
605 break;
606 }
4090d6ff 607 if (wk == NULL) {
984263bc
MD
608 FREE_LOCK(&lk);
609 return (0);
610 }
611 WORKLIST_REMOVE(wk);
612 num_on_worklist -= 1;
613 FREE_LOCK(&lk);
614 switch (wk->wk_type) {
615
616 case D_DIRREM:
617 /* removal of a directory entry */
618 if (WK_DIRREM(wk)->dm_mnt == matchmnt)
619 matchcnt += 1;
620 handle_workitem_remove(WK_DIRREM(wk));
621 break;
622
623 case D_FREEBLKS:
624 /* releasing blocks and/or fragments from a file */
625 if (WK_FREEBLKS(wk)->fb_fs == matchfs)
626 matchcnt += 1;
627 handle_workitem_freeblocks(WK_FREEBLKS(wk));
628 break;
629
630 case D_FREEFRAG:
631 /* releasing a fragment when replaced as a file grows */
632 if (WK_FREEFRAG(wk)->ff_fs == matchfs)
633 matchcnt += 1;
634 handle_workitem_freefrag(WK_FREEFRAG(wk));
635 break;
636
637 case D_FREEFILE:
638 /* releasing an inode when its link count drops to 0 */
639 if (WK_FREEFILE(wk)->fx_fs == matchfs)
640 matchcnt += 1;
641 handle_workitem_freefile(WK_FREEFILE(wk));
642 break;
643
644 default:
645 panic("%s_process_worklist: Unknown type %s",
646 "softdep", TYPENAME(wk->wk_type));
647 /* NOTREACHED */
648 }
649 return (matchcnt);
650}
651
652/*
653 * Move dependencies from one buffer to another.
59a647b1
MD
654 *
655 * bioops callback - hold io_token
984263bc
MD
656 */
657static void
3fcb1ab8 658softdep_move_dependencies(struct buf *oldbp, struct buf *newbp)
984263bc
MD
659{
660 struct worklist *wk, *wktail;
661
59a647b1 662 get_mplock();
984263bc
MD
663 if (LIST_FIRST(&newbp->b_dep) != NULL)
664 panic("softdep_move_dependencies: need merge code");
408357d8 665 wktail = NULL;
984263bc
MD
666 ACQUIRE_LOCK(&lk);
667 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
668 LIST_REMOVE(wk, wk_list);
408357d8 669 if (wktail == NULL)
984263bc
MD
670 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
671 else
672 LIST_INSERT_AFTER(wktail, wk, wk_list);
673 wktail = wk;
408357d8 674 newbp->b_ops = &softdep_bioops;
984263bc
MD
675 }
676 FREE_LOCK(&lk);
59a647b1 677 rel_mplock();
984263bc
MD
678}
679
680/*
681 * Purge the work list of all items associated with a particular mount point.
682 */
683int
2aa32050 684softdep_flushfiles(struct mount *oldmnt, int flags)
984263bc
MD
685{
686 struct vnode *devvp;
687 int error, loopcnt;
688
689 /*
690 * Await our turn to clear out the queue, then serialize access.
691 */
f5be2504 692 ACQUIRE_LOCK(&lk);
984263bc
MD
693 while (softdep_worklist_busy != 0) {
694 softdep_worklist_req += 1;
f5be2504 695 lksleep(&softdep_worklist_req, &lk, 0, "softflush", 0);
984263bc
MD
696 softdep_worklist_req -= 1;
697 }
698 softdep_worklist_busy = -1;
f5be2504 699 FREE_LOCK(&lk);
984263bc 700
2aa32050 701 if ((error = ffs_flushfiles(oldmnt, flags)) != 0) {
984263bc
MD
702 softdep_worklist_busy = 0;
703 if (softdep_worklist_req)
704 wakeup(&softdep_worklist_req);
705 return (error);
706 }
707 /*
708 * Alternately flush the block device associated with the mount
709 * point and process any dependencies that the flushing
710 * creates. In theory, this loop can happen at most twice,
711 * but we give it a few extra just to be sure.
712 */
713 devvp = VFSTOUFS(oldmnt)->um_devvp;
714 for (loopcnt = 10; loopcnt > 0; ) {
715 if (softdep_process_worklist(oldmnt) == 0) {
716 loopcnt--;
717 /*
718 * Do another flush in case any vnodes were brought in
719 * as part of the cleanup operations.
720 */
2aa32050 721 if ((error = ffs_flushfiles(oldmnt, flags)) != 0)
984263bc
MD
722 break;
723 /*
724 * If we still found nothing to do, we are really done.
725 */
726 if (softdep_process_worklist(oldmnt) == 0)
727 break;
728 }
ca466bae 729 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
52174f71 730 error = VOP_FSYNC(devvp, MNT_WAIT, 0);
a11aaa81 731 vn_unlock(devvp);
984263bc
MD
732 if (error)
733 break;
734 }
f5be2504 735 ACQUIRE_LOCK(&lk);
984263bc 736 softdep_worklist_busy = 0;
f5be2504 737 if (softdep_worklist_req)
984263bc 738 wakeup(&softdep_worklist_req);
f5be2504 739 FREE_LOCK(&lk);
984263bc
MD
740
741 /*
742 * If we are unmounting then it is an error to fail. If we
743 * are simply trying to downgrade to read-only, then filesystem
744 * activity can keep us busy forever, so we just fail with EBUSY.
745 */
746 if (loopcnt == 0) {
747 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
748 panic("softdep_flushfiles: looping");
749 error = EBUSY;
750 }
751 return (error);
752}
753
754/*
755 * Structure hashing.
756 *
757 * There are three types of structures that can be looked up:
758 * 1) pagedep structures identified by mount point, inode number,
759 * and logical block.
760 * 2) inodedep structures identified by mount point and inode number.
761 * 3) newblk structures identified by mount point and
762 * physical block number.
763 *
764 * The "pagedep" and "inodedep" dependency structures are hashed
765 * separately from the file blocks and inodes to which they correspond.
766 * This separation helps when the in-memory copy of an inode or
767 * file block must be replaced. It also obviates the need to access
768 * an inode or file page when simply updating (or de-allocating)
769 * dependency structures. Lookup of newblk structures is needed to
770 * find newly allocated blocks when trying to associate them with
771 * their allocdirect or allocindir structure.
772 *
773 * The lookup routines optionally create and hash a new instance when
774 * an existing entry is not found.
775 */
776#define DEPALLOC 0x0001 /* allocate structure if lookup fails */
777#define NODELAY 0x0002 /* cannot do background work */
778
779/*
780 * Structures and routines associated with pagedep caching.
781 */
782LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
783u_long pagedep_hash; /* size of hash table - 1 */
784#define PAGEDEP_HASH(mp, inum, lbn) \
785 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
786 pagedep_hash])
787static struct sema pagedep_in_progress;
788
98a74972
MD
789/*
790 * Helper routine for pagedep_lookup()
791 */
792static __inline
793struct pagedep *
794pagedep_find(struct pagedep_hashhead *pagedephd, ino_t ino, ufs_lbn_t lbn,
795 struct mount *mp)
796{
797 struct pagedep *pagedep;
798
799 LIST_FOREACH(pagedep, pagedephd, pd_hash) {
800 if (ino == pagedep->pd_ino &&
801 lbn == pagedep->pd_lbn &&
802 mp == pagedep->pd_mnt) {
803 return (pagedep);
804 }
805 }
806 return(NULL);
807}
808
984263bc
MD
809/*
810 * Look up a pagedep. Return 1 if found, 0 if not found.
811 * If not found, allocate if DEPALLOC flag is passed.
812 * Found or allocated entry is returned in pagedeppp.
813 * This routine must be called with splbio interrupts blocked.
814 */
815static int
3fcb1ab8
SW
816pagedep_lookup(struct inode *ip, ufs_lbn_t lbn, int flags,
817 struct pagedep **pagedeppp)
984263bc
MD
818{
819 struct pagedep *pagedep;
820 struct pagedep_hashhead *pagedephd;
821 struct mount *mp;
822 int i;
823
f5be2504
VS
824 KKASSERT(lock_held(&lk) > 0);
825
984263bc
MD
826 mp = ITOV(ip)->v_mount;
827 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn);
828top:
98a74972
MD
829 *pagedeppp = pagedep_find(pagedephd, ip->i_number, lbn, mp);
830 if (*pagedeppp)
831 return(1);
832 if ((flags & DEPALLOC) == 0)
984263bc 833 return (0);
984263bc
MD
834 if (sema_get(&pagedep_in_progress, &lk) == 0) {
835 ACQUIRE_LOCK(&lk);
836 goto top;
837 }
884717e1
SW
838 pagedep = kmalloc(sizeof(struct pagedep), M_PAGEDEP,
839 M_SOFTDEP_FLAGS | M_ZERO);
98a74972
MD
840
841 if (pagedep_find(pagedephd, ip->i_number, lbn, mp)) {
086c1d7e 842 kprintf("pagedep_lookup: blocking race avoided\n");
98a74972
MD
843 ACQUIRE_LOCK(&lk);
844 sema_release(&pagedep_in_progress);
efda3bd0 845 kfree(pagedep, M_PAGEDEP);
98a74972
MD
846 goto top;
847 }
848
984263bc
MD
849 pagedep->pd_list.wk_type = D_PAGEDEP;
850 pagedep->pd_mnt = mp;
851 pagedep->pd_ino = ip->i_number;
852 pagedep->pd_lbn = lbn;
853 LIST_INIT(&pagedep->pd_dirremhd);
854 LIST_INIT(&pagedep->pd_pendinghd);
855 for (i = 0; i < DAHASHSZ; i++)
856 LIST_INIT(&pagedep->pd_diraddhd[i]);
857 ACQUIRE_LOCK(&lk);
858 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
859 sema_release(&pagedep_in_progress);
860 *pagedeppp = pagedep;
861 return (0);
862}
863
864/*
865 * Structures and routines associated with inodedep caching.
866 */
867LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
868static u_long inodedep_hash; /* size of hash table - 1 */
869static long num_inodedep; /* number of inodedep allocated */
870#define INODEDEP_HASH(fs, inum) \
871 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
872static struct sema inodedep_in_progress;
873
98a74972
MD
874/*
875 * Helper routine for inodedep_lookup()
876 */
877static __inline
878struct inodedep *
879inodedep_find(struct inodedep_hashhead *inodedephd, struct fs *fs, ino_t inum)
880{
881 struct inodedep *inodedep;
882
883 LIST_FOREACH(inodedep, inodedephd, id_hash) {
884 if (inum == inodedep->id_ino && fs == inodedep->id_fs)
885 return(inodedep);
886 }
887 return (NULL);
888}
889
984263bc
MD
890/*
891 * Look up a inodedep. Return 1 if found, 0 if not found.
892 * If not found, allocate if DEPALLOC flag is passed.
893 * Found or allocated entry is returned in inodedeppp.
894 * This routine must be called with splbio interrupts blocked.
895 */
896static int
3fcb1ab8
SW
897inodedep_lookup(struct fs *fs, ino_t inum, int flags,
898 struct inodedep **inodedeppp)
984263bc
MD
899{
900 struct inodedep *inodedep;
901 struct inodedep_hashhead *inodedephd;
902 int firsttry;
903
f5be2504
VS
904 KKASSERT(lock_held(&lk) > 0);
905
984263bc
MD
906 firsttry = 1;
907 inodedephd = INODEDEP_HASH(fs, inum);
908top:
98a74972
MD
909 *inodedeppp = inodedep_find(inodedephd, fs, inum);
910 if (*inodedeppp)
984263bc 911 return (1);
98a74972 912 if ((flags & DEPALLOC) == 0)
984263bc 913 return (0);
984263bc
MD
914 /*
915 * If we are over our limit, try to improve the situation.
916 */
917 if (num_inodedep > max_softdeps && firsttry &&
918 speedup_syncer() == 0 && (flags & NODELAY) == 0 &&
919 request_cleanup(FLUSH_INODES, 1)) {
920 firsttry = 0;
921 goto top;
922 }
923 if (sema_get(&inodedep_in_progress, &lk) == 0) {
924 ACQUIRE_LOCK(&lk);
925 goto top;
926 }
884717e1
SW
927 inodedep = kmalloc(sizeof(struct inodedep), M_INODEDEP,
928 M_SOFTDEP_FLAGS | M_ZERO);
98a74972 929 if (inodedep_find(inodedephd, fs, inum)) {
086c1d7e 930 kprintf("inodedep_lookup: blocking race avoided\n");
98a74972
MD
931 ACQUIRE_LOCK(&lk);
932 sema_release(&inodedep_in_progress);
efda3bd0 933 kfree(inodedep, M_INODEDEP);
98a74972
MD
934 goto top;
935 }
984263bc
MD
936 inodedep->id_list.wk_type = D_INODEDEP;
937 inodedep->id_fs = fs;
938 inodedep->id_ino = inum;
939 inodedep->id_state = ALLCOMPLETE;
940 inodedep->id_nlinkdelta = 0;
941 inodedep->id_savedino = NULL;
942 inodedep->id_savedsize = -1;
943 inodedep->id_buf = NULL;
944 LIST_INIT(&inodedep->id_pendinghd);
945 LIST_INIT(&inodedep->id_inowait);
946 LIST_INIT(&inodedep->id_bufwait);
947 TAILQ_INIT(&inodedep->id_inoupdt);
948 TAILQ_INIT(&inodedep->id_newinoupdt);
949 ACQUIRE_LOCK(&lk);
98a74972 950 num_inodedep += 1;
984263bc
MD
951 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
952 sema_release(&inodedep_in_progress);
953 *inodedeppp = inodedep;
954 return (0);
955}
956
957/*
958 * Structures and routines associated with newblk caching.
959 */
960LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
961u_long newblk_hash; /* size of hash table - 1 */
962#define NEWBLK_HASH(fs, inum) \
963 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
964static struct sema newblk_in_progress;
965
98a74972
MD
966/*
967 * Helper routine for newblk_lookup()
968 */
969static __inline
970struct newblk *
971newblk_find(struct newblk_hashhead *newblkhd, struct fs *fs,
972 ufs_daddr_t newblkno)
973{
974 struct newblk *newblk;
975
976 LIST_FOREACH(newblk, newblkhd, nb_hash) {
977 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs)
978 return (newblk);
979 }
980 return(NULL);
981}
982
984263bc
MD
983/*
984 * Look up a newblk. Return 1 if found, 0 if not found.
985 * If not found, allocate if DEPALLOC flag is passed.
986 * Found or allocated entry is returned in newblkpp.
987 */
988static int
3fcb1ab8
SW
989newblk_lookup(struct fs *fs, ufs_daddr_t newblkno, int flags,
990 struct newblk **newblkpp)
984263bc
MD
991{
992 struct newblk *newblk;
993 struct newblk_hashhead *newblkhd;
994
995 newblkhd = NEWBLK_HASH(fs, newblkno);
996top:
98a74972
MD
997 *newblkpp = newblk_find(newblkhd, fs, newblkno);
998 if (*newblkpp)
999 return(1);
1000 if ((flags & DEPALLOC) == 0)
984263bc 1001 return (0);
984263bc
MD
1002 if (sema_get(&newblk_in_progress, 0) == 0)
1003 goto top;
884717e1
SW
1004 newblk = kmalloc(sizeof(struct newblk), M_NEWBLK,
1005 M_SOFTDEP_FLAGS | M_ZERO);
98a74972
MD
1006
1007 if (newblk_find(newblkhd, fs, newblkno)) {
086c1d7e 1008 kprintf("newblk_lookup: blocking race avoided\n");
98a74972 1009 sema_release(&pagedep_in_progress);
efda3bd0 1010 kfree(newblk, M_NEWBLK);
98a74972
MD
1011 goto top;
1012 }
984263bc
MD
1013 newblk->nb_state = 0;
1014 newblk->nb_fs = fs;
1015 newblk->nb_newblkno = newblkno;
1016 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
1017 sema_release(&newblk_in_progress);
1018 *newblkpp = newblk;
1019 return (0);
1020}
1021
1022/*
1023 * Executed during filesystem system initialization before
f719c866 1024 * mounting any filesystems.
984263bc
MD
1025 */
1026void
3fcb1ab8 1027softdep_initialize(void)
984263bc 1028{
47979091 1029 callout_init(&handle);
984263bc
MD
1030
1031 LIST_INIT(&mkdirlisthd);
1032 LIST_INIT(&softdep_workitem_pending);
1033 max_softdeps = min(desiredvnodes * 8,
1034 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep)));
1035 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
1036 &pagedep_hash);
7b7fca29 1037 lockinit(&lk, "ffs_softdep", 0, LK_CANRECURSE);
377d4740 1038 sema_init(&pagedep_in_progress, "pagedep", 0, 0);
984263bc 1039 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
377d4740 1040 sema_init(&inodedep_in_progress, "inodedep", 0, 0);
984263bc 1041 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash);
377d4740 1042 sema_init(&newblk_in_progress, "newblk", 0, 0);
408357d8 1043 add_bio_ops(&softdep_bioops);
984263bc
MD
1044}
1045
1046/*
1047 * Called at mount time to notify the dependency code that a
1048 * filesystem wishes to use it.
1049 */
1050int
3fcb1ab8 1051softdep_mount(struct vnode *devvp, struct mount *mp, struct fs *fs)
984263bc
MD
1052{
1053 struct csum cstotal;
1054 struct cg *cgp;
1055 struct buf *bp;
1056 int error, cyl;
1057
1058 mp->mnt_flag &= ~MNT_ASYNC;
1059 mp->mnt_flag |= MNT_SOFTDEP;
408357d8 1060 mp->mnt_bioops = &softdep_bioops;
984263bc
MD
1061 /*
1062 * When doing soft updates, the counters in the
1063 * superblock may have gotten out of sync, so we have
1064 * to scan the cylinder groups and recalculate them.
1065 */
1066 if (fs->fs_clean != 0)
1067 return (0);
1068 bzero(&cstotal, sizeof cstotal);
1069 for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
54078292
MD
1070 if ((error = bread(devvp, fsbtodoff(fs, cgtod(fs, cyl)),
1071 fs->fs_cgsize, &bp)) != 0) {
984263bc
MD
1072 brelse(bp);
1073 return (error);
1074 }
1075 cgp = (struct cg *)bp->b_data;
1076 cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
1077 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
1078 cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
1079 cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
1080 fs->fs_cs(fs, cyl) = cgp->cg_cs;
1081 brelse(bp);
1082 }
1083#ifdef DEBUG
1084 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
086c1d7e 1085 kprintf("ffs_mountfs: superblock updated for soft updates\n");
984263bc
MD
1086#endif
1087 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
1088 return (0);
1089}
1090
1091/*
1092 * Protecting the freemaps (or bitmaps).
1093 *
f719c866 1094 * To eliminate the need to execute fsck before mounting a filesystem
984263bc
MD
1095 * after a power failure, one must (conservatively) guarantee that the
1096 * on-disk copy of the bitmaps never indicate that a live inode or block is
1097 * free. So, when a block or inode is allocated, the bitmap should be
1098 * updated (on disk) before any new pointers. When a block or inode is
1099 * freed, the bitmap should not be updated until all pointers have been
1100 * reset. The latter dependency is handled by the delayed de-allocation
1101 * approach described below for block and inode de-allocation. The former
1102 * dependency is handled by calling the following procedure when a block or
1103 * inode is allocated. When an inode is allocated an "inodedep" is created
1104 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
1105 * Each "inodedep" is also inserted into the hash indexing structure so
1106 * that any additional link additions can be made dependent on the inode
1107 * allocation.
1108 *
f719c866 1109 * The ufs filesystem maintains a number of free block counts (e.g., per
984263bc
MD
1110 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
1111 * in addition to the bitmaps. These counts are used to improve efficiency
1112 * during allocation and therefore must be consistent with the bitmaps.
1113 * There is no convenient way to guarantee post-crash consistency of these
1114 * counts with simple update ordering, for two main reasons: (1) The counts
1115 * and bitmaps for a single cylinder group block are not in the same disk
1116 * sector. If a disk write is interrupted (e.g., by power failure), one may
1117 * be written and the other not. (2) Some of the counts are located in the
1118 * superblock rather than the cylinder group block. So, we focus our soft
1119 * updates implementation on protecting the bitmaps. When mounting a
1120 * filesystem, we recompute the auxiliary counts from the bitmaps.
1121 */
1122
1123/*
1124 * Called just after updating the cylinder group block to allocate an inode.
3fcb1ab8
SW
1125 *
1126 * Parameters:
1127 * bp: buffer for cylgroup block with inode map
1128 * ip: inode related to allocation
1129 * newinum: new inode number being allocated
984263bc
MD
1130 */
1131void
3fcb1ab8 1132softdep_setup_inomapdep(struct buf *bp, struct inode *ip, ino_t newinum)
984263bc
MD
1133{
1134 struct inodedep *inodedep;
1135 struct bmsafemap *bmsafemap;
1136
1137 /*
1138 * Create a dependency for the newly allocated inode.
1139 * Panic if it already exists as something is seriously wrong.
1140 * Otherwise add it to the dependency list for the buffer holding
1141 * the cylinder group map from which it was allocated.
1142 */
1143 ACQUIRE_LOCK(&lk);
1144 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) {
1145 FREE_LOCK(&lk);
1146 panic("softdep_setup_inomapdep: found inode");
1147 }
1148 inodedep->id_buf = bp;
1149 inodedep->id_state &= ~DEPCOMPLETE;
1150 bmsafemap = bmsafemap_lookup(bp);
1151 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
1152 FREE_LOCK(&lk);
1153}
1154
1155/*
1156 * Called just after updating the cylinder group block to
1157 * allocate block or fragment.
3fcb1ab8
SW
1158 *
1159 * Parameters:
1160 * bp: buffer for cylgroup block with block map
1161 * fs: filesystem doing allocation
1162 * newblkno: number of newly allocated block
984263bc
MD
1163 */
1164void
3fcb1ab8
SW
1165softdep_setup_blkmapdep(struct buf *bp, struct fs *fs,
1166 ufs_daddr_t newblkno)
984263bc
MD
1167{
1168 struct newblk *newblk;
1169 struct bmsafemap *bmsafemap;
1170
1171 /*
1172 * Create a dependency for the newly allocated block.
1173 * Add it to the dependency list for the buffer holding
1174 * the cylinder group map from which it was allocated.
1175 */
1176 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0)
1177 panic("softdep_setup_blkmapdep: found block");
1178 ACQUIRE_LOCK(&lk);
1179 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp);
1180 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
1181 FREE_LOCK(&lk);
1182}
1183
1184/*
1185 * Find the bmsafemap associated with a cylinder group buffer.
1186 * If none exists, create one. The buffer must be locked when
1187 * this routine is called and this routine must be called with
1188 * splbio interrupts blocked.
1189 */
1190static struct bmsafemap *
3fcb1ab8 1191bmsafemap_lookup(struct buf *bp)
984263bc
MD
1192{
1193 struct bmsafemap *bmsafemap;
1194 struct worklist *wk;
1195
f5be2504
VS
1196 KKASSERT(lock_held(&lk) > 0);
1197
408357d8 1198 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
984263bc
MD
1199 if (wk->wk_type == D_BMSAFEMAP)
1200 return (WK_BMSAFEMAP(wk));
408357d8 1201 }
984263bc 1202 FREE_LOCK(&lk);
884717e1
SW
1203 bmsafemap = kmalloc(sizeof(struct bmsafemap), M_BMSAFEMAP,
1204 M_SOFTDEP_FLAGS);
984263bc
MD
1205 bmsafemap->sm_list.wk_type = D_BMSAFEMAP;
1206 bmsafemap->sm_list.wk_state = 0;
1207 bmsafemap->sm_buf = bp;
1208 LIST_INIT(&bmsafemap->sm_allocdirecthd);
1209 LIST_INIT(&bmsafemap->sm_allocindirhd);
1210 LIST_INIT(&bmsafemap->sm_inodedephd);
1211 LIST_INIT(&bmsafemap->sm_newblkhd);
1212 ACQUIRE_LOCK(&lk);
408357d8 1213 WORKLIST_INSERT_BP(bp, &bmsafemap->sm_list);
984263bc
MD
1214 return (bmsafemap);
1215}
1216
1217/*
1218 * Direct block allocation dependencies.
1219 *
1220 * When a new block is allocated, the corresponding disk locations must be
1221 * initialized (with zeros or new data) before the on-disk inode points to
1222 * them. Also, the freemap from which the block was allocated must be
1223 * updated (on disk) before the inode's pointer. These two dependencies are
1224 * independent of each other and are needed for all file blocks and indirect
1225 * blocks that are pointed to directly by the inode. Just before the
1226 * "in-core" version of the inode is updated with a newly allocated block
1227 * number, a procedure (below) is called to setup allocation dependency
1228 * structures. These structures are removed when the corresponding
1229 * dependencies are satisfied or when the block allocation becomes obsolete
1230 * (i.e., the file is deleted, the block is de-allocated, or the block is a
1231 * fragment that gets upgraded). All of these cases are handled in
1232 * procedures described later.
1233 *
1234 * When a file extension causes a fragment to be upgraded, either to a larger
1235 * fragment or to a full block, the on-disk location may change (if the
1236 * previous fragment could not simply be extended). In this case, the old
1237 * fragment must be de-allocated, but not until after the inode's pointer has
1238 * been updated. In most cases, this is handled by later procedures, which
1239 * will construct a "freefrag" structure to be added to the workitem queue
1240 * when the inode update is complete (or obsolete). The main exception to
1241 * this is when an allocation occurs while a pending allocation dependency
1242 * (for the same block pointer) remains. This case is handled in the main
1243 * allocation dependency setup procedure by immediately freeing the
1244 * unreferenced fragments.
3fcb1ab8
SW
1245 *
1246 * Parameters:
1247 * ip: inode to which block is being added
1248 * lbn: block pointer within inode
1249 * newblkno: disk block number being added
1250 * oldblkno: previous block number, 0 unless frag
1251 * newsize: size of new block
1252 * oldsize: size of new block
1253 * bp: bp for allocated block
984263bc
MD
1254 */
1255void
3fcb1ab8
SW
1256softdep_setup_allocdirect(struct inode *ip, ufs_lbn_t lbn, ufs_daddr_t newblkno,
1257 ufs_daddr_t oldblkno, long newsize, long oldsize,
1258 struct buf *bp)
984263bc
MD
1259{
1260 struct allocdirect *adp, *oldadp;
1261 struct allocdirectlst *adphead;
1262 struct bmsafemap *bmsafemap;
1263 struct inodedep *inodedep;
1264 struct pagedep *pagedep;
1265 struct newblk *newblk;
1266
884717e1
SW
1267 adp = kmalloc(sizeof(struct allocdirect), M_ALLOCDIRECT,
1268 M_SOFTDEP_FLAGS | M_ZERO);
984263bc
MD
1269 adp->ad_list.wk_type = D_ALLOCDIRECT;
1270 adp->ad_lbn = lbn;
1271 adp->ad_newblkno = newblkno;
1272 adp->ad_oldblkno = oldblkno;
1273 adp->ad_newsize = newsize;
1274 adp->ad_oldsize = oldsize;
1275 adp->ad_state = ATTACHED;
1276 if (newblkno == oldblkno)
1277 adp->ad_freefrag = NULL;
1278 else
1279 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize);
1280
1281 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0)
1282 panic("softdep_setup_allocdirect: lost block");
1283
1284 ACQUIRE_LOCK(&lk);
1285 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep);
1286 adp->ad_inodedep = inodedep;
1287
1288 if (newblk->nb_state == DEPCOMPLETE) {
1289 adp->ad_state |= DEPCOMPLETE;
1290 adp->ad_buf = NULL;
1291 } else {
1292 bmsafemap = newblk->nb_bmsafemap;
1293 adp->ad_buf = bmsafemap->sm_buf;
1294 LIST_REMOVE(newblk, nb_deps);
1295 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps);
1296 }
1297 LIST_REMOVE(newblk, nb_hash);
884717e1 1298 kfree(newblk, M_NEWBLK);
984263bc 1299
408357d8 1300 WORKLIST_INSERT_BP(bp, &adp->ad_list);
984263bc
MD
1301 if (lbn >= NDADDR) {
1302 /* allocating an indirect block */
1303 if (oldblkno != 0) {
1304 FREE_LOCK(&lk);
1305 panic("softdep_setup_allocdirect: non-zero indir");
1306 }
1307 } else {
1308 /*
1309 * Allocating a direct block.
1310 *
1311 * If we are allocating a directory block, then we must
1312 * allocate an associated pagedep to track additions and
1313 * deletions.
1314 */
1315 if ((ip->i_mode & IFMT) == IFDIR &&
408357d8
MD
1316 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) {
1317 WORKLIST_INSERT_BP(bp, &pagedep->pd_list);
1318 }
984263bc
MD
1319 }
1320 /*
1321 * The list of allocdirects must be kept in sorted and ascending
1322 * order so that the rollback routines can quickly determine the
1323 * first uncommitted block (the size of the file stored on disk
1324 * ends at the end of the lowest committed fragment, or if there
1325 * are no fragments, at the end of the highest committed block).
1326 * Since files generally grow, the typical case is that the new
1327 * block is to be added at the end of the list. We speed this
1328 * special case by checking against the last allocdirect in the
1329 * list before laboriously traversing the list looking for the
1330 * insertion point.
1331 */
1332 adphead = &inodedep->id_newinoupdt;
1333 oldadp = TAILQ_LAST(adphead, allocdirectlst);
1334 if (oldadp == NULL || oldadp->ad_lbn <= lbn) {
1335 /* insert at end of list */
1336 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
1337 if (oldadp != NULL && oldadp->ad_lbn == lbn)
1338 allocdirect_merge(adphead, adp, oldadp);
1339 FREE_LOCK(&lk);
1340 return;
1341 }
1342 TAILQ_FOREACH(oldadp, adphead, ad_next) {
1343 if (oldadp->ad_lbn >= lbn)
1344 break;
1345 }
1346 if (oldadp == NULL) {
1347 FREE_LOCK(&lk);
1348 panic("softdep_setup_allocdirect: lost entry");
1349 }
1350 /* insert in middle of list */
1351 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
1352 if (oldadp->ad_lbn == lbn)
1353 allocdirect_merge(adphead, adp, oldadp);
1354 FREE_LOCK(&lk);
1355}
1356
1357/*
1358 * Replace an old allocdirect dependency with a newer one.
1359 * This routine must be called with splbio interrupts blocked.
3fcb1ab8
SW
1360 *
1361 * Parameters:
1362 * adphead: head of list holding allocdirects
1363 * newadp: allocdirect being added
1364 * oldadp: existing allocdirect being checked
984263bc
MD
1365 */
1366static void
3fcb1ab8
SW
1367allocdirect_merge(struct allocdirectlst *adphead,
1368 struct allocdirect *newadp,
1369 struct allocdirect *oldadp)
984263bc
MD
1370{
1371 struct freefrag *freefrag;
1372
f5be2504
VS
1373 KKASSERT(lock_held(&lk) > 0);
1374
984263bc
MD
1375 if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
1376 newadp->ad_oldsize != oldadp->ad_newsize ||
1377 newadp->ad_lbn >= NDADDR) {
1378 FREE_LOCK(&lk);
1379 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d",
1380 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn,
1381 NDADDR);
1382 }
1383 newadp->ad_oldblkno = oldadp->ad_oldblkno;
1384 newadp->ad_oldsize = oldadp->ad_oldsize;
1385 /*
1386 * If the old dependency had a fragment to free or had never
1387 * previously had a block allocated, then the new dependency
1388 * can immediately post its freefrag and adopt the old freefrag.
1389 * This action is done by swapping the freefrag dependencies.
1390 * The new dependency gains the old one's freefrag, and the
1391 * old one gets the new one and then immediately puts it on
1392 * the worklist when it is freed by free_allocdirect. It is
1393 * not possible to do this swap when the old dependency had a
1394 * non-zero size but no previous fragment to free. This condition
1395 * arises when the new block is an extension of the old block.
1396 * Here, the first part of the fragment allocated to the new
1397 * dependency is part of the block currently claimed on disk by
1398 * the old dependency, so cannot legitimately be freed until the
1399 * conditions for the new dependency are fulfilled.
1400 */
1401 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
1402 freefrag = newadp->ad_freefrag;
1403 newadp->ad_freefrag = oldadp->ad_freefrag;
1404 oldadp->ad_freefrag = freefrag;
1405 }
1406 free_allocdirect(adphead, oldadp, 0);
1407}
1408
1409/*
1410 * Allocate a new freefrag structure if needed.
1411 */
1412static struct freefrag *
3fcb1ab8 1413newfreefrag(struct inode *ip, ufs_daddr_t blkno, long size)
984263bc
MD
1414{
1415 struct freefrag *freefrag;
1416 struct fs *fs;
1417
1418 if (blkno == 0)
1419 return (NULL);
1420 fs = ip->i_fs;
1421 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
1422 panic("newfreefrag: frag size");
884717e1
SW
1423 freefrag = kmalloc(sizeof(struct freefrag), M_FREEFRAG,
1424 M_SOFTDEP_FLAGS);
984263bc
MD
1425 freefrag->ff_list.wk_type = D_FREEFRAG;
1426 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */
1427 freefrag->ff_inum = ip->i_number;
1428 freefrag->ff_fs = fs;
1429 freefrag->ff_devvp = ip->i_devvp;
1430 freefrag->ff_blkno = blkno;
1431 freefrag->ff_fragsize = size;
1432 return (freefrag);
1433}
1434
1435/*
1436 * This workitem de-allocates fragments that were replaced during
1437 * file block allocation.
1438 */
1439static void
3fcb1ab8 1440handle_workitem_freefrag(struct freefrag *freefrag)
984263bc
MD
1441{
1442 struct inode tip;
1443
1444 tip.i_fs = freefrag->ff_fs;
1445 tip.i_devvp = freefrag->ff_devvp;
1446 tip.i_dev = freefrag->ff_devvp->v_rdev;
1447 tip.i_number = freefrag->ff_inum;
1448 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */
1449 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize);
884717e1 1450 kfree(freefrag, M_FREEFRAG);
984263bc
MD
1451}
1452
1453/*
1454 * Indirect block allocation dependencies.
1455 *
1456 * The same dependencies that exist for a direct block also exist when
1457 * a new block is allocated and pointed to by an entry in a block of
1458 * indirect pointers. The undo/redo states described above are also
1459 * used here. Because an indirect block contains many pointers that
1460 * may have dependencies, a second copy of the entire in-memory indirect
1461 * block is kept. The buffer cache copy is always completely up-to-date.
1462 * The second copy, which is used only as a source for disk writes,
1463 * contains only the safe pointers (i.e., those that have no remaining
1464 * update dependencies). The second copy is freed when all pointers
1465 * are safe. The cache is not allowed to replace indirect blocks with
1466 * pending update dependencies. If a buffer containing an indirect
1467 * block with dependencies is written, these routines will mark it
1468 * dirty again. It can only be successfully written once all the
1469 * dependencies are removed. The ffs_fsync routine in conjunction with
1470 * softdep_sync_metadata work together to get all the dependencies
1471 * removed so that a file can be successfully written to disk. Three
1472 * procedures are used when setting up indirect block pointer
1473 * dependencies. The division is necessary because of the organization
1474 * of the "balloc" routine and because of the distinction between file
1475 * pages and file metadata blocks.
1476 */
1477
1478/*
1479 * Allocate a new allocindir structure.
3fcb1ab8
SW
1480 *
1481 * Parameters:
1482 * ip: inode for file being extended
1483 * ptrno: offset of pointer in indirect block
1484 * newblkno: disk block number being added
1485 * oldblkno: previous block number, 0 if none
984263bc
MD
1486 */
1487static struct allocindir *
3fcb1ab8
SW
1488newallocindir(struct inode *ip, int ptrno, ufs_daddr_t newblkno,
1489 ufs_daddr_t oldblkno)
984263bc
MD
1490{
1491 struct allocindir *aip;
1492
884717e1
SW
1493 aip = kmalloc(sizeof(struct allocindir), M_ALLOCINDIR,
1494 M_SOFTDEP_FLAGS | M_ZERO);
984263bc
MD
1495 aip->ai_list.wk_type = D_ALLOCINDIR;
1496 aip->ai_state = ATTACHED;
1497 aip->ai_offset = ptrno;
1498 aip->ai_newblkno = newblkno;
1499 aip->ai_oldblkno = oldblkno;
1500 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize);
1501 return (aip);
1502}
1503
1504/*
1505 * Called just before setting an indirect block pointer
1506 * to a newly allocated file page.
3fcb1ab8
SW
1507 *
1508 * Parameters:
1509 * ip: inode for file being extended
1510 * lbn: allocated block number within file
1511 * bp: buffer with indirect blk referencing page
1512 * ptrno: offset of pointer in indirect block
1513 * newblkno: disk block number being added
1514 * oldblkno: previous block number, 0 if none
1515 * nbp: buffer holding allocated page
984263bc
MD
1516 */
1517void
3fcb1ab8
SW
1518softdep_setup_allocindir_page(struct inode *ip, ufs_lbn_t lbn,
1519 struct buf *bp, int ptrno,
1520 ufs_daddr_t newblkno, ufs_daddr_t oldblkno,
1521 struct buf *nbp)
984263bc
MD
1522{
1523 struct allocindir *aip;
1524 struct pagedep *pagedep;
1525
1526 aip = newallocindir(ip, ptrno, newblkno, oldblkno);
1527 ACQUIRE_LOCK(&lk);
1528 /*
1529 * If we are allocating a directory page, then we must
1530 * allocate an associated pagedep to track additions and
1531 * deletions.
1532 */
1533 if ((ip->i_mode & IFMT) == IFDIR &&
1534 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
408357d8
MD
1535 WORKLIST_INSERT_BP(nbp, &pagedep->pd_list);
1536 WORKLIST_INSERT_BP(nbp, &aip->ai_list);
984263bc
MD
1537 FREE_LOCK(&lk);
1538 setup_allocindir_phase2(bp, ip, aip);
1539}
1540
1541/*
1542 * Called just before setting an indirect block pointer to a
1543 * newly allocated indirect block.
3fcb1ab8
SW
1544 * Parameters:
1545 * nbp: newly allocated indirect block
1546 * ip: inode for file being extended
1547 * bp: indirect block referencing allocated block
1548 * ptrno: offset of pointer in indirect block
1549 * newblkno: disk block number being added
984263bc
MD
1550 */
1551void
3fcb1ab8
SW
1552softdep_setup_allocindir_meta(struct buf *nbp, struct inode *ip,
1553 struct buf *bp, int ptrno,
1554 ufs_daddr_t newblkno)
984263bc
MD
1555{
1556 struct allocindir *aip;
1557
1558 aip = newallocindir(ip, ptrno, newblkno, 0);
1559 ACQUIRE_LOCK(&lk);
408357d8 1560 WORKLIST_INSERT_BP(nbp, &aip->ai_list);
984263bc
MD
1561 FREE_LOCK(&lk);
1562 setup_allocindir_phase2(bp, ip, aip);
1563}
1564
1565/*
1566 * Called to finish the allocation of the "aip" allocated
1567 * by one of the two routines above.
3fcb1ab8
SW
1568 *
1569 * Parameters:
1570 * bp: in-memory copy of the indirect block
1571 * ip: inode for file being extended
1572 * aip: allocindir allocated by the above routines
984263bc
MD
1573 */
1574static void
3fcb1ab8
SW
1575setup_allocindir_phase2(struct buf *bp, struct inode *ip,
1576 struct allocindir *aip)
984263bc
MD
1577{
1578 struct worklist *wk;
1579 struct indirdep *indirdep, *newindirdep;
1580 struct bmsafemap *bmsafemap;
1581 struct allocindir *oldaip;
1582 struct freefrag *freefrag;
1583 struct newblk *newblk;
1584
54078292 1585 if (bp->b_loffset >= 0)
984263bc
MD
1586 panic("setup_allocindir_phase2: not indir blk");
1587 for (indirdep = NULL, newindirdep = NULL; ; ) {
1588 ACQUIRE_LOCK(&lk);
1589 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
1590 if (wk->wk_type != D_INDIRDEP)
1591 continue;
1592 indirdep = WK_INDIRDEP(wk);
1593 break;
1594 }
1595 if (indirdep == NULL && newindirdep) {
1596 indirdep = newindirdep;
408357d8 1597 WORKLIST_INSERT_BP(bp, &indirdep->ir_list);
984263bc
MD
1598 newindirdep = NULL;
1599 }
1600 FREE_LOCK(&lk);
1601 if (indirdep) {
1602 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0,
1603 &newblk) == 0)
1604 panic("setup_allocindir: lost block");
1605 ACQUIRE_LOCK(&lk);
1606 if (newblk->nb_state == DEPCOMPLETE) {
1607 aip->ai_state |= DEPCOMPLETE;
1608 aip->ai_buf = NULL;
1609 } else {
1610 bmsafemap = newblk->nb_bmsafemap;
1611 aip->ai_buf = bmsafemap->sm_buf;
1612 LIST_REMOVE(newblk, nb_deps);
1613 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd,
1614 aip, ai_deps);
1615 }
1616 LIST_REMOVE(newblk, nb_hash);
884717e1 1617 kfree(newblk, M_NEWBLK);
984263bc
MD
1618 aip->ai_indirdep = indirdep;
1619 /*
1620 * Check to see if there is an existing dependency
1621 * for this block. If there is, merge the old
1622 * dependency into the new one.
1623 */
1624 if (aip->ai_oldblkno == 0)
1625 oldaip = NULL;
1626 else
1627
1628 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next)
1629 if (oldaip->ai_offset == aip->ai_offset)
1630 break;
1631 if (oldaip != NULL) {
1632 if (oldaip->ai_newblkno != aip->ai_oldblkno) {
1633 FREE_LOCK(&lk);
1634 panic("setup_allocindir_phase2: blkno");
1635 }
1636 aip->ai_oldblkno = oldaip->ai_oldblkno;
1637 freefrag = oldaip->ai_freefrag;
1638 oldaip->ai_freefrag = aip->ai_freefrag;
1639 aip->ai_freefrag = freefrag;
1640 free_allocindir(oldaip, NULL);
1641 }
1642 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
1643 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)
1644 [aip->ai_offset] = aip->ai_oldblkno;
1645 FREE_LOCK(&lk);
1646 }
1647 if (newindirdep) {
7d618503 1648 /*
2ae68842
MD
1649 * Avoid any possibility of data corruption by
1650 * ensuring that our old version is thrown away.
7d618503 1651 */
2ae68842
MD
1652 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
1653 brelse(newindirdep->ir_savebp);
984263bc
MD
1654 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP);
1655 }
1656 if (indirdep)
1657 break;
884717e1
SW
1658 newindirdep = kmalloc(sizeof(struct indirdep), M_INDIRDEP,
1659 M_SOFTDEP_FLAGS);
984263bc
MD
1660 newindirdep->ir_list.wk_type = D_INDIRDEP;
1661 newindirdep->ir_state = ATTACHED;
1662 LIST_INIT(&newindirdep->ir_deplisthd);
1663 LIST_INIT(&newindirdep->ir_donehd);
54078292
MD
1664 if (bp->b_bio2.bio_offset == NOOFFSET) {
1665 VOP_BMAP(bp->b_vp, bp->b_bio1.bio_offset,
e92ca23a
MD
1666 &bp->b_bio2.bio_offset, NULL, NULL,
1667 BUF_CMD_WRITE);
984263bc 1668 }
54078292
MD
1669 KKASSERT(bp->b_bio2.bio_offset != NOOFFSET);
1670 newindirdep->ir_savebp = getblk(ip->i_devvp,
1671 bp->b_bio2.bio_offset,
1672 bp->b_bcount, 0, 0);
984263bc
MD
1673 BUF_KERNPROC(newindirdep->ir_savebp);
1674 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
1675 }
1676}
1677
1678/*
1679 * Block de-allocation dependencies.
1680 *
1681 * When blocks are de-allocated, the on-disk pointers must be nullified before
1682 * the blocks are made available for use by other files. (The true
1683 * requirement is that old pointers must be nullified before new on-disk
1684 * pointers are set. We chose this slightly more stringent requirement to
1685 * reduce complexity.) Our implementation handles this dependency by updating
1686 * the inode (or indirect block) appropriately but delaying the actual block
1687 * de-allocation (i.e., freemap and free space count manipulation) until
1688 * after the updated versions reach stable storage. After the disk is
1689 * updated, the blocks can be safely de-allocated whenever it is convenient.
1690 * This implementation handles only the common case of reducing a file's
1691 * length to zero. Other cases are handled by the conventional synchronous
1692 * write approach.
1693 *
1694 * The ffs implementation with which we worked double-checks
1695 * the state of the block pointers and file size as it reduces
1696 * a file's length. Some of this code is replicated here in our
1697 * soft updates implementation. The freeblks->fb_chkcnt field is
1698 * used to transfer a part of this information to the procedure
1699 * that eventually de-allocates the blocks.
1700 *
1701 * This routine should be called from the routine that shortens
1702 * a file's length, before the inode's size or block pointers
1703 * are modified. It will save the block pointer information for
1704 * later release and zero the inode so that the calling routine
1705 * can release it.
1706 */
6bae6177
MD
1707struct softdep_setup_freeblocks_info {
1708 struct fs *fs;
1709 struct inode *ip;
1710};
1711
1712static int softdep_setup_freeblocks_bp(struct buf *bp, void *data);
1713
3fcb1ab8
SW
1714/*
1715 * Parameters:
1716 * ip: The inode whose length is to be reduced
1717 * length: The new length for the file
1718 */
984263bc 1719void
3fcb1ab8 1720softdep_setup_freeblocks(struct inode *ip, off_t length)
984263bc 1721{
6bae6177 1722 struct softdep_setup_freeblocks_info info;
984263bc
MD
1723 struct freeblks *freeblks;
1724 struct inodedep *inodedep;
1725 struct allocdirect *adp;
1726 struct vnode *vp;
1727 struct buf *bp;
1728 struct fs *fs;
1729 int i, error, delay;
6bae6177 1730 int count;
984263bc
MD
1731
1732 fs = ip->i_fs;
1733 if (length != 0)
1734 panic("softde_setup_freeblocks: non-zero length");
884717e1
SW
1735 freeblks = kmalloc(sizeof(struct freeblks), M_FREEBLKS,
1736 M_SOFTDEP_FLAGS | M_ZERO);
984263bc 1737 freeblks->fb_list.wk_type = D_FREEBLKS;
89a5de29 1738 freeblks->fb_state = ATTACHED;
984263bc
MD
1739 freeblks->fb_uid = ip->i_uid;
1740 freeblks->fb_previousinum = ip->i_number;
1741 freeblks->fb_devvp = ip->i_devvp;
1742 freeblks->fb_fs = fs;
1743 freeblks->fb_oldsize = ip->i_size;
1744 freeblks->fb_newsize = length;
1745 freeblks->fb_chkcnt = ip->i_blocks;
1746 for (i = 0; i < NDADDR; i++) {
1747 freeblks->fb_dblks[i] = ip->i_db[i];
1748 ip->i_db[i] = 0;
1749 }
1750 for (i = 0; i < NIADDR; i++) {
1751 freeblks->fb_iblks[i] = ip->i_ib[i];
1752 ip->i_ib[i] = 0;
1753 }
1754 ip->i_blocks = 0;
1755 ip->i_size = 0;
1756 /*
1757 * Push the zero'ed inode to to its disk buffer so that we are free
1758 * to delete its dependencies below. Once the dependencies are gone
1759 * the buffer can be safely released.
1760 */
1761 if ((error = bread(ip->i_devvp,
54078292 1762 fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)),
3b568787 1763 (int)fs->fs_bsize, &bp)) != 0)
984263bc 1764 softdep_error("softdep_setup_freeblocks", error);
50e58362 1765 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) =
984263bc
MD
1766 ip->i_din;
1767 /*
1768 * Find and eliminate any inode dependencies.
1769 */
1770 ACQUIRE_LOCK(&lk);
1771 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep);
1772 if ((inodedep->id_state & IOSTARTED) != 0) {
1773 FREE_LOCK(&lk);
1774 panic("softdep_setup_freeblocks: inode busy");
1775 }
1776 /*
1777 * Add the freeblks structure to the list of operations that
1778 * must await the zero'ed inode being written to disk. If we
1779 * still have a bitmap dependency (delay == 0), then the inode
1780 * has never been written to disk, so we can process the
1781 * freeblks below once we have deleted the dependencies.
1782 */
1783 delay = (inodedep->id_state & DEPCOMPLETE);
1784 if (delay)
1785 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list);
1786 /*
1787 * Because the file length has been truncated to zero, any
1788 * pending block allocation dependency structures associated
1789 * with this inode are obsolete and can simply be de-allocated.
1790 * We must first merge the two dependency lists to get rid of
1791 * any duplicate freefrag structures, then purge the merged list.
1792 */
1793 merge_inode_lists(inodedep);
4090d6ff 1794 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
984263bc
MD
1795 free_allocdirect(&inodedep->id_inoupdt, adp, 1);
1796 FREE_LOCK(&lk);
1797 bdwrite(bp);
1798 /*
1799 * We must wait for any I/O in progress to finish so that
1800 * all potential buffers on the dirty list will be visible.
1801 * Once they are all there, walk the list and get rid of
1802 * any dependencies.
1803 */
1804 vp = ITOV(ip);
1805 ACQUIRE_LOCK(&lk);
1806 drain_output(vp, 1);
6bae6177
MD
1807
1808 info.fs = fs;
1809 info.ip = ip;
3b998fa9 1810 lwkt_gettoken(&vp->v_token);
6bae6177
MD
1811 do {
1812 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
1813 softdep_setup_freeblocks_bp, &info);
2ef16e04 1814 } while (count != 0);
3b998fa9 1815 lwkt_reltoken(&vp->v_token);
0202303b 1816
984263bc
MD
1817 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0)
1818 (void)free_inodedep(inodedep);
89a5de29
MD
1819
1820 if (delay) {
1821 freeblks->fb_state |= DEPCOMPLETE;
1822 /*
1823 * If the inode with zeroed block pointers is now on disk
1824 * we can start freeing blocks. Add freeblks to the worklist
1825 * instead of calling handle_workitem_freeblocks directly as
1826 * it is more likely that additional IO is needed to complete
1827 * the request here than in the !delay case.
1828 */
1829 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
1830 add_to_worklist(&freeblks->fb_list);
1831 }
1832
984263bc
MD
1833 FREE_LOCK(&lk);
1834 /*
1835 * If the inode has never been written to disk (delay == 0),
1836 * then we can process the freeblks now that we have deleted
1837 * the dependencies.
1838 */
1839 if (!delay)
1840 handle_workitem_freeblocks(freeblks);
1841}
1842
6bae6177
MD
1843static int
1844softdep_setup_freeblocks_bp(struct buf *bp, void *data)
1845{
1846 struct softdep_setup_freeblocks_info *info = data;
1847 struct inodedep *inodedep;
1848
2ef16e04 1849 if (getdirtybuf(&bp, MNT_WAIT) == 0) {
086c1d7e 1850 kprintf("softdep_setup_freeblocks_bp(1): caught bp %p going away\n", bp);
6bae6177 1851 return(-1);
2ef16e04
MD
1852 }
1853 if (bp->b_vp != ITOV(info->ip) || (bp->b_flags & B_DELWRI) == 0) {
086c1d7e 1854 kprintf("softdep_setup_freeblocks_bp(2): caught bp %p going away\n", bp);
2ef16e04
MD
1855 BUF_UNLOCK(bp);
1856 return(-1);
1857 }
6bae6177
MD
1858 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep);
1859 deallocate_dependencies(bp, inodedep);
1860 bp->b_flags |= B_INVAL | B_NOCACHE;
1861 FREE_LOCK(&lk);
1862 brelse(bp);
1863 ACQUIRE_LOCK(&lk);
1864 return(1);
1865}
1866
984263bc
MD
1867/*
1868 * Reclaim any dependency structures from a buffer that is about to
1869 * be reallocated to a new vnode. The buffer must be locked, thus,
1870 * no I/O completion operations can occur while we are manipulating
1871 * its associated dependencies. The mutex is held so that other I/O's
1872 * associated with related dependencies do not occur.
1873 */
1874static void
3fcb1ab8 1875deallocate_dependencies(struct buf *bp, struct inodedep *inodedep)
984263bc
MD
1876{
1877 struct worklist *wk;
1878 struct indirdep *indirdep;
1879 struct allocindir *aip;
1880 struct pagedep *pagedep;
1881 struct dirrem *dirrem;
1882 struct diradd *dap;
1883 int i;
1884
1885 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
1886 switch (wk->wk_type) {
1887
1888 case D_INDIRDEP:
1889 indirdep = WK_INDIRDEP(wk);
1890 /*
1891 * None of the indirect pointers will ever be visible,
1892 * so they can simply be tossed. GOINGAWAY ensures
1893 * that allocated pointers will be saved in the buffer
1894 * cache until they are freed. Note that they will
1895 * only be able to be found by their physical address
1896 * since the inode mapping the logical address will
1897 * be gone. The save buffer used for the safe copy
1898 * was allocated in setup_allocindir_phase2 using
1899 * the physical address so it could be used for this
1900 * purpose. Hence we swap the safe copy with the real
1901 * copy, allowing the safe copy to be freed and holding
1902 * on to the real copy for later use in indir_trunc.
81b5c339
MD
1903 *
1904 * NOTE: ir_savebp is relative to the block device
1905 * so b_bio1 contains the device block number.
984263bc
MD
1906 */
1907 if (indirdep->ir_state & GOINGAWAY) {
1908 FREE_LOCK(&lk);
1909 panic("deallocate_dependencies: already gone");
1910 }
1911 indirdep->ir_state |= GOINGAWAY;
4090d6ff 1912 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL)
984263bc 1913 free_allocindir(aip, inodedep);
54078292
MD
1914 if (bp->b_bio1.bio_offset >= 0 ||
1915 bp->b_bio2.bio_offset != indirdep->ir_savebp->b_bio1.bio_offset) {
984263bc
MD
1916 FREE_LOCK(&lk);
1917 panic("deallocate_dependencies: not indir");
1918 }
1919 bcopy(bp->b_data, indirdep->ir_savebp->b_data,
1920 bp->b_bcount);
1921 WORKLIST_REMOVE(wk);
408357d8 1922 WORKLIST_INSERT_BP(indirdep->ir_savebp, wk);
984263bc
MD
1923 continue;
1924
1925 case D_PAGEDEP:
1926 pagedep = WK_PAGEDEP(wk);
1927 /*
1928 * None of the directory additions will ever be
1929 * visible, so they can simply be tossed.
1930 */
1931 for (i = 0; i < DAHASHSZ; i++)
1932 while ((dap =
1933 LIST_FIRST(&pagedep->pd_diraddhd[i])))
1934 free_diradd(dap);
4090d6ff 1935 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
984263bc
MD
1936 free_diradd(dap);
1937 /*
1938 * Copy any directory remove dependencies to the list
1939 * to be processed after the zero'ed inode is written.
1940 * If the inode has already been written, then they
1941 * can be dumped directly onto the work list.
1942 */
1943 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
1944 LIST_REMOVE(dirrem, dm_next);
1945 dirrem->dm_dirinum = pagedep->pd_ino;
1946 if (inodedep == NULL ||
1947 (inodedep->id_state & ALLCOMPLETE) ==
1948 ALLCOMPLETE)
1949 add_to_worklist(&dirrem->dm_list);
1950 else
1951 WORKLIST_INSERT(&inodedep->id_bufwait,
1952 &dirrem->dm_list);
1953 }
1954 WORKLIST_REMOVE(&pagedep->pd_list);
1955 LIST_REMOVE(pagedep, pd_hash);
1956 WORKITEM_FREE(pagedep, D_PAGEDEP);
1957 continue;
1958
1959 case D_ALLOCINDIR:
1960 free_allocindir(WK_ALLOCINDIR(wk), inodedep);
1961 continue;
1962
1963 case D_ALLOCDIRECT:
1964 case D_INODEDEP:
1965 FREE_LOCK(&lk);
1966 panic("deallocate_dependencies: Unexpected type %s",
1967 TYPENAME(wk->wk_type));
1968 /* NOTREACHED */
1969
1970 default:
1971 FREE_LOCK(&lk);
1972 panic("deallocate_dependencies: Unknown type %s",
1973 TYPENAME(wk->wk_type));
1974 /* NOTREACHED */
1975 }
1976 }
1977}
1978
1979/*
1980 * Free an allocdirect. Generate a new freefrag work request if appropriate.
1981 * This routine must be called with splbio interrupts blocked.
1982 */
1983static void
3fcb1ab8
SW
1984free_allocdirect(struct allocdirectlst *adphead,
1985 struct allocdirect *adp, int delay)
984263bc 1986{
f5be2504 1987 KKASSERT(lock_held(&lk) > 0);
984263bc 1988
984263bc
MD
1989 if ((adp->ad_state & DEPCOMPLETE) == 0)
1990 LIST_REMOVE(adp, ad_deps);
1991 TAILQ_REMOVE(adphead, adp, ad_next);
1992 if ((adp->ad_state & COMPLETE) == 0)
1993 WORKLIST_REMOVE(&adp->ad_list);
1994 if (adp->ad_freefrag != NULL) {
1995 if (delay)
1996 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait,
1997 &adp->ad_freefrag->ff_list);
1998 else
1999 add_to_worklist(&adp->ad_freefrag->ff_list);
2000 }
2001 WORKITEM_FREE(adp, D_ALLOCDIRECT);
2002}
2003
2004/*
2005 * Prepare an inode to be freed. The actual free operation is not
2006 * done until the zero'ed inode has been written to disk.
2007 */
2008void
3fcb1ab8 2009softdep_freefile(struct vnode *pvp, ino_t ino, int mode)
984263bc
MD
2010{
2011 struct inode *ip = VTOI(pvp);
2012 struct inodedep *inodedep;
2013 struct freefile *freefile;
2014
2015 /*
2016 * This sets up the inode de-allocation dependency.
2017 */
884717e1
SW
2018 freefile = kmalloc(sizeof(struct freefile), M_FREEFILE,
2019 M_SOFTDEP_FLAGS);
984263bc
MD
2020 freefile->fx_list.wk_type = D_FREEFILE;
2021 freefile->fx_list.wk_state = 0;
2022 freefile->fx_mode = mode;
2023 freefile->fx_oldinum = ino;
2024 freefile->fx_devvp = ip->i_devvp;
2025 freefile->fx_fs = ip->i_fs;
2026
2027 /*
2028 * If the inodedep does not exist, then the zero'ed inode has
2029 * been written to disk. If the allocated inode has never been
2030 * written to disk, then the on-disk inode is zero'ed. In either
2031 * case we can free the file immediately.
2032 */
2033 ACQUIRE_LOCK(&lk);
2034 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 ||
2035 check_inode_unwritten(inodedep)) {
2036 FREE_LOCK(&lk);
2037 handle_workitem_freefile(freefile);
2038 return;
2039 }
2040 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
2041 FREE_LOCK(&lk);
2042}
2043
2044/*
2045 * Check to see if an inode has never been written to disk. If
2046 * so free the inodedep and return success, otherwise return failure.
2047 * This routine must be called with splbio interrupts blocked.
2048 *
2049 * If we still have a bitmap dependency, then the inode has never
2050 * been written to disk. Drop the dependency as it is no longer
2051 * necessary since the inode is being deallocated. We set the
2052 * ALLCOMPLETE flags since the bitmap now properly shows that the
2053 * inode is not allocated. Even if the inode is actively being
2054 * written, it has been rolled back to its zero'ed state, so we
2055 * are ensured that a zero inode is what is on the disk. For short
2056 * lived files, this change will usually result in removing all the
2057 * dependencies from the inode so that it can be freed immediately.
2058 */
2059static int
3fcb1ab8 2060check_inode_unwritten(struct inodedep *inodedep)
984263bc
MD
2061{
2062
2063 if ((inodedep->id_state & DEPCOMPLETE) != 0 ||
2064 LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2065 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2066 LIST_FIRST(&inodedep->id_inowait) != NULL ||
2067 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2068 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2069 inodedep->id_nlinkdelta != 0)
2070 return (0);
83b29fff
MD
2071
2072 /*
2073 * Another process might be in initiate_write_inodeblock
2074 * trying to allocate memory without holding "Softdep Lock".
2075 */
2076 if ((inodedep->id_state & IOSTARTED) != 0 &&
2077 inodedep->id_savedino == NULL)
2078 return(0);
2079
984263bc
MD
2080 inodedep->id_state |= ALLCOMPLETE;
2081 LIST_REMOVE(inodedep, id_deps);
2082 inodedep->id_buf = NULL;
2083 if (inodedep->id_state & ONWORKLIST)
2084 WORKLIST_REMOVE(&inodedep->id_list);
2085 if (inodedep->id_savedino != NULL) {
884717e1 2086 kfree(inodedep->id_savedino, M_INODEDEP);
984263bc
MD
2087 inodedep->id_savedino = NULL;
2088 }
2089 if (free_inodedep(inodedep) == 0) {
2090 FREE_LOCK(&lk);
2091 panic("check_inode_unwritten: busy inode");
2092 }
2093 return (1);
2094}
2095
2096/*
2097 * Try to free an inodedep structure. Return 1 if it could be freed.
2098 */
2099static int
3fcb1ab8 2100free_inodedep(struct inodedep *inodedep)
984263bc
MD
2101{
2102
2103 if ((inodedep->id_state & ONWORKLIST) != 0 ||
2104 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
2105 LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2106 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2107 LIST_FIRST(&inodedep->id_inowait) != NULL ||
2108 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2109 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2110 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL)
2111 return (0);
2112 LIST_REMOVE(inodedep, id_hash);
2113 WORKITEM_FREE(inodedep, D_INODEDEP);
2114 num_inodedep -= 1;
2115 return (1);
2116}
2117
2118/*
2119 * This workitem routine performs the block de-allocation.
2120 * The workitem is added to the pending list after the updated
2121 * inode block has been written to disk. As mentioned above,
2122 * checks regarding the number of blocks de-allocated (compared
2123 * to the number of blocks allocated for the file) are also
2124 * performed in this function.
2125 */
2126static void
3fcb1ab8 2127handle_workitem_freeblocks(struct freeblks *freeblks)
984263bc
MD
2128{
2129 struct inode tip;
2130 ufs_daddr_t bn;
2131 struct fs *fs;
2132 int i, level, bsize;
2133 long nblocks, blocksreleased = 0;
2134 int error, allerror = 0;
2135 ufs_lbn_t baselbns[NIADDR], tmpval;
2136
2137 tip.i_number = freeblks->fb_previousinum;
2138 tip.i_devvp = freeblks->fb_devvp;
2139 tip.i_dev = freeblks->fb_devvp->v_rdev;
2140 tip.i_fs = freeblks->fb_fs;
2141 tip.i_size = freeblks->fb_oldsize;
2142 tip.i_uid = freeblks->fb_uid;
2143 fs = freeblks->fb_fs;
2144 tmpval = 1;
2145 baselbns[0] = NDADDR;
2146 for (i = 1; i < NIADDR; i++) {
2147 tmpval *= NINDIR(fs);
2148 baselbns[i] = baselbns[i - 1] + tmpval;
2149 }
2150 nblocks = btodb(fs->fs_bsize);
2151 blocksreleased = 0;
2152 /*
2153 * Indirect blocks first.
2154 */
2155 for (level = (NIADDR - 1); level >= 0; level--) {
2156 if ((bn = freeblks->fb_iblks[level]) == 0)
2157 continue;
54078292 2158 if ((error = indir_trunc(&tip, fsbtodoff(fs, bn), level,
984263bc
MD
2159 baselbns[level], &blocksreleased)) == 0)
2160 allerror = error;
2161 ffs_blkfree(&tip, bn, fs->fs_bsize);
2162 blocksreleased += nblocks;
2163 }
2164 /*
2165 * All direct blocks or frags.
2166 */
2167 for (i = (NDADDR - 1); i >= 0; i--) {
2168 if ((bn = freeblks->fb_dblks[i]) == 0)
2169 continue;
2170 bsize = blksize(fs, &tip, i);
2171 ffs_blkfree(&tip, bn, bsize);
2172 blocksreleased += btodb(bsize);
2173 }
2174
2175#ifdef DIAGNOSTIC
2176 if (freeblks->fb_chkcnt != blocksreleased)
086c1d7e 2177 kprintf("handle_workitem_freeblocks: block count\n");
984263bc
MD
2178 if (allerror)
2179 softdep_error("handle_workitem_freeblks", allerror);
2180#endif /* DIAGNOSTIC */
2181 WORKITEM_FREE(freeblks, D_FREEBLKS);
2182}
2183
2184/*
2185 * Release blocks associated with the inode ip and stored in the indirect
54078292
MD
2186 * block at doffset. If level is greater than SINGLE, the block is an
2187 * indirect block and recursive calls to indirtrunc must be used to
2188 * cleanse other indirect blocks.
984263bc
MD
2189 */
2190static int
3fcb1ab8
SW
2191indir_trunc(struct inode *ip, off_t doffset, int level, ufs_lbn_t lbn,
2192 long *countp)
984263bc
MD
2193{
2194 struct buf *bp;
2195 ufs_daddr_t *bap;
2196 ufs_daddr_t nb;
2197 struct fs *fs;
2198 struct worklist *wk;
2199 struct indirdep *indirdep;
2200 int i, lbnadd, nblocks;
2201 int error, allerror = 0;
2202
2203 fs = ip->i_fs;
2204 lbnadd = 1;
2205 for (i = level; i > 0; i--)
2206 lbnadd *= NINDIR(fs);
2207 /*
2208 * Get buffer of block pointers to be freed. This routine is not
2209 * called until the zero'ed inode has been written, so it is safe
2210 * to free blocks as they are encountered. Because the inode has
2211 * been zero'ed, calls to bmap on these blocks will fail. So, we
2212 * have to use the on-disk address and the block device for the
2213 * filesystem to look them up. If the file was deleted before its
2214 * indirect blocks were all written to disk, the routine that set
2215 * us up (deallocate_dependencies) will have arranged to leave
2216 * a complete copy of the indirect block in memory for our use.
2217 * Otherwise we have to read the blocks in from the disk.
2218 */
2219 ACQUIRE_LOCK(&lk);
b1c20cfa 2220 if ((bp = findblk(ip->i_devvp, doffset, FINDBLK_TEST)) != NULL &&
984263bc 2221 (wk = LIST_FIRST(&bp->b_dep)) != NULL) {
1f1ea522
MD
2222 /*
2223 * bp must be ir_savebp, which is held locked for our use.
2224 */
984263bc
MD
2225 if (wk->wk_type != D_INDIRDEP ||
2226 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp ||
2227 (indirdep->ir_state & GOINGAWAY) == 0) {
2228 FREE_LOCK(&lk);
2229 panic("indir_trunc: lost indirdep");
2230 }
2231 WORKLIST_REMOVE(wk);
2232 WORKITEM_FREE(indirdep, D_INDIRDEP);
2233 if (LIST_FIRST(&bp->b_dep) != NULL) {
2234 FREE_LOCK(&lk);
2235 panic("indir_trunc: dangling dep");
2236 }
2237 FREE_LOCK(&lk);
2238 } else {
2239 FREE_LOCK(&lk);
54078292 2240 error = bread(ip->i_devvp, doffset, (int)fs->fs_bsize, &bp);
984263bc
MD
2241 if (error)
2242 return (error);
2243 }
2244 /*
2245 * Recursively free indirect blocks.
2246 */
2247 bap = (ufs_daddr_t *)bp->b_data;
2248 nblocks = btodb(fs->fs_bsize);
2249 for (i = NINDIR(fs) - 1; i >= 0; i--) {
2250 if ((nb = bap[i]) == 0)
2251 continue;
2252 if (level != 0) {
54078292 2253 if ((error = indir_trunc(ip, fsbtodoff(fs, nb),
984263bc
MD
2254 level - 1, lbn + (i * lbnadd), countp)) != 0)
2255 allerror = error;
2256 }
2257 ffs_blkfree(ip, nb, fs->fs_bsize);
2258 *countp += nblocks;
2259 }
2260 bp->b_flags |= B_INVAL | B_NOCACHE;
2261 brelse(bp);
2262 return (allerror);
2263}
2264
2265/*
2266 * Free an allocindir.
2267 * This routine must be called with splbio interrupts blocked.
2268 */
2269static void
3fcb1ab8 2270free_allocindir(struct allocindir *aip, struct inodedep *inodedep)
984263bc
MD
2271{
2272 struct freefrag *freefrag;
2273
f5be2504
VS
2274 KKASSERT(lock_held(&lk) > 0);
2275
984263bc
MD
2276 if ((aip->ai_state & DEPCOMPLETE) == 0)
2277 LIST_REMOVE(aip, ai_deps);
2278 if (aip->ai_state & ONWORKLIST)
2279 WORKLIST_REMOVE(&aip->ai_list);
2280 LIST_REMOVE(aip, ai_next);
2281 if ((freefrag = aip->ai_freefrag) != NULL) {
2282 if (inodedep == NULL)
2283 add_to_worklist(&freefrag->ff_list);
2284 else
2285 WORKLIST_INSERT(&inodedep->id_bufwait,
2286 &freefrag->ff_list);
2287 }
2288 WORKITEM_FREE(aip, D_ALLOCINDIR);
2289}
2290
2291/*
2292 * Directory entry addition dependencies.
2293 *
2294 * When adding a new directory entry, the inode (with its incremented link
2295 * count) must be written to disk before the directory entry's pointer to it.
2296 * Also, if the inode is newly allocated, the corresponding freemap must be
2297 * updated (on disk) before the directory entry's pointer. These requirements
2298 * are met via undo/redo on the directory entry's pointer, which consists
2299 * simply of the inode number.
2300 *
2301 * As directory entries are added and deleted, the free space within a
f719c866 2302 * directory block can become fragmented. The ufs filesystem will compact
984263bc
MD
2303 * a fragmented directory block to make space for a new entry. When this
2304 * occurs, the offsets of previously added entries change. Any "diradd"
2305 * dependency structures corresponding to these entries must be updated with
2306 * the new offsets.
2307 */
2308
2309/*
2310 * This routine is called after the in-memory inode's link
2311 * count has been incremented, but before the directory entry's
2312 * pointer to the inode has been set.
3fcb1ab8
SW
2313 *
2314 * Parameters:
2315 * bp: buffer containing directory block
2316 * dp: inode for directory
2317 * diroffset: offset of new entry in directory
2318 * newinum: inode referenced by new directory entry
2319 * newdirbp: non-NULL => contents of new mkdir
984263bc
MD
2320 */
2321void
3fcb1ab8
SW
2322softdep_setup_directory_add(struct buf *bp, struct inode *dp, off_t diroffset,
2323 ino_t newinum, struct buf *newdirbp)
984263bc
MD
2324{
2325 int offset; /* offset of new entry within directory block */
2326 ufs_lbn_t lbn; /* block in directory containing new entry */
2327 struct fs *fs;
2328 struct diradd *dap;
2329 struct pagedep *pagedep;
2330 struct inodedep *inodedep;
2331 struct mkdir *mkdir1, *mkdir2;
2332
2333 /*
2334 * Whiteouts have no dependencies.
2335 */
2336 if (newinum == WINO) {
2337 if (newdirbp != NULL)
2338 bdwrite(newdirbp);
2339 return;
2340 }
2341
2342 fs = dp->i_fs;
2343 lbn = lblkno(fs, diroffset);
2344 offset = blkoff(fs, diroffset);
884717e1
SW
2345 dap = kmalloc(sizeof(struct diradd), M_DIRADD,
2346 M_SOFTDEP_FLAGS | M_ZERO);
984263bc
MD
2347 dap->da_list.wk_type = D_DIRADD;
2348 dap->da_offset = offset;
2349 dap->da_newinum = newinum;
2350 dap->da_state = ATTACHED;
2351 if (newdirbp == NULL) {
2352 dap->da_state |= DEPCOMPLETE;
2353 ACQUIRE_LOCK(&lk);
2354 } else {
2355 dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
884717e1
SW
2356 mkdir1 = kmalloc(sizeof(struct mkdir), M_MKDIR,
2357 M_SOFTDEP_FLAGS);
984263bc
MD
2358 mkdir1->md_list.wk_type = D_MKDIR;
2359 mkdir1->md_state = MKDIR_BODY;
2360 mkdir1->md_diradd = dap;
884717e1
SW
2361 mkdir2 = kmalloc(sizeof(struct mkdir), M_MKDIR,
2362 M_SOFTDEP_FLAGS);
984263bc
MD
2363 mkdir2->md_list.wk_type = D_MKDIR;
2364 mkdir2->md_state = MKDIR_PARENT;
2365 mkdir2->md_diradd = dap;
2366 /*
2367 * Dependency on "." and ".." being written to disk.
2368 */
2369 mkdir1->md_buf = newdirbp;
2370 ACQUIRE_LOCK(&lk);
2371 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
408357d8 2372 WORKLIST_INSERT_BP(newdirbp, &mkdir1->md_list);
984263bc
MD
2373 FREE_LOCK(&lk);
2374 bdwrite(newdirbp);
2375 /*
2376 * Dependency on link count increase for parent directory
2377 */
2378 ACQUIRE_LOCK(&lk);
2379 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0
2380 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
2381 dap->da_state &= ~MKDIR_PARENT;
2382 WORKITEM_FREE(mkdir2, D_MKDIR);
2383 } else {
2384 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
2385 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list);
2386 }
2387 }
2388 /*
2389 * Link into parent directory pagedep to await its being written.
2390 */
2391 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
408357d8 2392 WORKLIST_INSERT_BP(bp, &pagedep->pd_list);
984263bc
MD
2393 dap->da_pagedep = pagedep;
2394 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
2395 da_pdlist);
2396 /*
2397 * Link into its inodedep. Put it on the id_bufwait list if the inode
2398 * is not yet written. If it is written, do the post-inode write
2399 * processing to put it on the id_pendinghd list.
2400 */
2401 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep);
2402 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
2403 diradd_inode_written(dap, inodedep);
2404 else
2405 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
2406 FREE_LOCK(&lk);
2407}
2408
2409/*
2410 * This procedure is called to change the offset of a directory
2411 * entry when compacting a directory block which must be owned
2412 * exclusively by the caller. Note that the actual entry movement
2413 * must be done in this procedure to ensure that no I/O completions
2414 * occur while the move is in progress.
3fcb1ab8
SW
2415 *
2416 * Parameters:
2417 * dp: inode for directory
2418 * base: address of dp->i_offset
2419 * oldloc: address of old directory location
2420 * newloc: address of new directory location
2421 * entrysize: size of directory entry
984263bc
MD
2422 */
2423void
3fcb1ab8
SW
2424softdep_change_directoryentry_offset(struct inode *dp, caddr_t base,
2425 caddr_t oldloc, caddr_t newloc,
2426 int entrysize)
984263bc
MD
2427{
2428 int offset, oldoffset, newoffset;
2429 struct pagedep *pagedep;
2430 struct diradd *dap;
2431 ufs_lbn_t lbn;
2432
2433 ACQUIRE_LOCK(&lk);
2434 lbn = lblkno(dp->i_fs, dp->i_offset);
2435 offset = blkoff(dp->i_fs, dp->i_offset);
2436 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0)
2437 goto done;
2438 oldoffset = offset + (oldloc - base);
2439 newoffset = offset + (newloc - base);
2440
2441 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) {
2442 if (dap->da_offset != oldoffset)
2443 continue;
2444 dap->da_offset = newoffset;
2445 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset))
2446 break;
2447 LIST_REMOVE(dap, da_pdlist);
2448 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)],
2449 dap, da_pdlist);
2450 break;
2451 }
2452 if (dap == NULL) {
2453
2454 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) {
2455 if (dap->da_offset == oldoffset) {
2456 dap->da_offset = newoffset;
2457 break;
2458 }
2459 }
2460 }
2461done:
2462 bcopy(oldloc, newloc, entrysize);
2463 FREE_LOCK(&lk);
2464}
2465
2466/*
2467 * Free a diradd dependency structure. This routine must be called
2468 * with splbio interrupts blocked.
2469 */
2470static void
3fcb1ab8 2471free_diradd(struct diradd *dap)
984263bc
MD
2472{
2473 struct dirrem *dirrem;
2474 struct pagedep *pagedep;
2475 struct inodedep *inodedep;
2476 struct mkdir *mkdir, *nextmd;
2477
f5be2504
VS
2478 KKASSERT(lock_held(&lk) > 0);
2479
984263bc
MD
2480 WORKLIST_REMOVE(&dap->da_list);
2481 LIST_REMOVE(dap, da_pdlist);
2482 if ((dap->da_state & DIRCHG) == 0) {
2483 pagedep = dap->da_pagedep;
2484 } else {
2485 dirrem = dap->da_previous;
2486 pagedep = dirrem->dm_pagedep;
2487 dirrem->dm_dirinum = pagedep->pd_ino;
2488 add_to_worklist(&dirrem->dm_list);
2489 }
2490 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum,
2491 0, &inodedep) != 0)
2492 (void) free_inodedep(inodedep);
2493 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
2494 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
2495 nextmd = LIST_NEXT(mkdir, md_mkdirs);
2496 if (mkdir->md_diradd != dap)
2497 continue;
2498 dap->da_state &= ~mkdir->md_state;
2499 WORKLIST_REMOVE(&mkdir->md_list);
2500 LIST_REMOVE(mkdir, md_mkdirs);
2501 WORKITEM_FREE(mkdir, D_MKDIR);
2502 }
2503 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
2504 FREE_LOCK(&lk);
2505 panic("free_diradd: unfound ref");
2506 }
2507 }
2508 WORKITEM_FREE(dap, D_DIRADD);
2509}
2510
2511/*
2512 * Directory entry removal dependencies.
2513 *
2514 * When removing a directory entry, the entry's inode pointer must be
2515 * zero'ed on disk before the corresponding inode's link count is decremented
2516 * (possibly freeing the inode for re-use). This dependency is handled by
2517 * updating the directory entry but delaying the inode count reduction until
2518 * after the directory block has been written to disk. After this point, the
2519 * inode count can be decremented whenever it is convenient.
2520 */
2521
2522/*
2523 * This routine should be called immediately after removing
2524 * a directory entry. The inode's link count should not be
2525 * decremented by the calling procedure -- the soft updates
2526 * code will do this task when it is safe.
3fcb1ab8
SW
2527 *
2528 * Parameters:
2529 * bp: buffer containing directory block
2530 * dp: inode for the directory being modified
2531 * ip: inode for directory entry being removed
2532 * isrmdir: indicates if doing RMDIR
984263bc
MD
2533 */
2534void
3fcb1ab8
SW
2535softdep_setup_remove(struct buf *bp, struct inode *dp, struct inode *ip,
2536 int isrmdir)
984263bc
MD
2537{
2538 struct dirrem *dirrem, *prevdirrem;
2539
2540 /*
2541 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.
2542 */
2543 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
2544
2545 /*
2546 * If the COMPLETE flag is clear, then there were no active
2547 * entries and we want to roll back to a zeroed entry until
2548 * the new inode is committed to disk. If the COMPLETE flag is
2549 * set then we have deleted an entry that never made it to
2550 * disk. If the entry we deleted resulted from a name change,
2551 * then the old name still resides on disk. We cannot delete
2552 * its inode (returned to us in prevdirrem) until the zeroed
2553 * directory entry gets to disk. The new inode has never been
2554 * referenced on the disk, so can be deleted immediately.
2555 */
2556 if ((dirrem->dm_state & COMPLETE) == 0) {
2557 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
2558 dm_next);
2559 FREE_LOCK(&lk);
2560 } else {
2561 if (prevdirrem != NULL)
2562 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
2563 prevdirrem, dm_next);
2564 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
2565 FREE_LOCK(&lk);
2566 handle_workitem_remove(dirrem);
2567 }
2568}
2569
2570/*
2571 * Allocate a new dirrem if appropriate and return it along with
2572 * its associated pagedep. Called without a lock, returns with lock.
2573 */
2574static long num_dirrem; /* number of dirrem allocated */
3fcb1ab8
SW
2575
2576/*
2577 * Parameters:
2578 * bp: buffer containing directory block
2579 * dp: inode for the directory being modified
2580 * ip: inode for directory entry being removed
2581 * isrmdir: indicates if doing RMDIR
2582 * prevdirremp: previously referenced inode, if any
2583 */
984263bc 2584static struct dirrem *
3fcb1ab8
SW
2585newdirrem(struct buf *bp, struct inode *dp, struct inode *ip,
2586 int isrmdir, struct dirrem **prevdirremp)
984263bc
MD
2587{
2588 int offset;
2589 ufs_lbn_t lbn;
2590 struct diradd *dap;
2591 struct dirrem *dirrem;
2592 struct pagedep *pagedep;
2593
2594 /*
2595 * Whiteouts have no deletion dependencies.
2596 */
2597 if (ip == NULL)
2598 panic("newdirrem: whiteout");
2599 /*
2600 * If we are over our limit, try to improve the situation.
2601 * Limiting the number of dirrem structures will also limit
2602 * the number of freefile and freeblks structures.
2603 */
2604 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0)
2605 (void) request_cleanup(FLUSH_REMOVE, 0);
2606 num_dirrem += 1;
884717e1
SW
2607 dirrem = kmalloc(sizeof(struct dirrem), M_DIRREM,
2608 M_SOFTDEP_FLAGS | M_ZERO);
984263bc
MD
2609 dirrem->dm_list.wk_type = D_DIRREM;
2610 dirrem->dm_state = isrmdir ? RMDIR : 0;
2611 dirrem->dm_mnt = ITOV(ip)->v_mount;
2612 dirrem->dm_oldinum = ip->i_number;
2613 *prevdirremp = NULL;
2614
2615 ACQUIRE_LOCK(&lk);
2616 lbn = lblkno(dp->i_fs, dp->i_offset);
2617 offset = blkoff(dp->i_fs, dp->i_offset);
2618 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
408357d8 2619 WORKLIST_INSERT_BP(bp, &pagedep->pd_list);
984263bc
MD
2620 dirrem->dm_pagedep = pagedep;
2621 /*
2622 * Check for a diradd dependency for the same directory entry.
2623 * If present, then both dependencies become obsolete and can
2624 * be de-allocated. Check for an entry on both the pd_dirraddhd
2625 * list and the pd_pendinghd list.
2626 */
2627
2628 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
2629 if (dap->da_offset == offset)
2630 break;
2631 if (dap == NULL) {
2632
2633 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
2634 if (dap->da_offset == offset)
2635 break;
2636 if (dap == NULL)
2637 return (dirrem);
2638 }
2639 /*
2640 * Must be ATTACHED at this point.
2641 */
2642 if ((dap->da_state & ATTACHED) == 0) {
2643 FREE_LOCK(&lk);
2644 panic("newdirrem: not ATTACHED");
2645 }
2646 if (dap->da_newinum != ip->i_number) {
2647 FREE_LOCK(&lk);
f91a71dd 2648 panic("newdirrem: inum %"PRId64" should be %"PRId64,
984263bc
MD
2649 ip->i_number, dap->da_newinum);
2650 }
2651 /*
2652 * If we are deleting a changed name that never made it to disk,
2653 * then return the dirrem describing the previous inode (which
2654 * represents the inode currently referenced from this entry on disk).
2655 */
2656 if ((dap->da_state & DIRCHG) != 0) {
2657 *prevdirremp = dap->da_previous;
2658 dap->da_state &= ~DIRCHG;
2659 dap->da_pagedep = pagedep;
2660 }
2661 /*
2662 * We are deleting an entry that never made it to disk.
2663 * Mark it COMPLETE so we can delete its inode immediately.
2664 */
2665 dirrem->dm_state |= COMPLETE;
2666 free_diradd(dap);
2667 return (dirrem);
2668}
2669
2670/*
2671 * Directory entry change dependencies.
2672 *
2673 * Changing an existing directory entry requires that an add operation
2674 * be completed first followed by a deletion. The semantics for the addition
2675 * are identical to the description of adding a new entry above except
2676 * that the rollback is to the old inode number rather than zero. Once
2677 * the addition dependency is completed, the removal is done as described
2678 * in the removal routine above.
2679 */
2680
2681/*
2682 * This routine should be called immediately after changing
2683 * a directory entry. The inode's link count should not be
2684 * decremented by the calling procedure -- the soft updates
2685 * code will perform this task when it is safe.
3fcb1ab8
SW
2686 *
2687 * Parameters:
2688 * bp: buffer containing directory block
2689 * dp: inode for the directory being modified
2690 * ip: inode for directory entry being removed
2691 * newinum: new inode number for changed entry
2692 * isrmdir: indicates if doing RMDIR
984263bc
MD
2693 */
2694void
3fcb1ab8
SW
2695softdep_setup_directory_change(struct buf *bp, struct inode *dp,
2696 struct inode *ip, ino_t newinum,
2697 int isrmdir)
984263bc
MD
2698{
2699 int offset;
2700 struct diradd *dap = NULL;
2701 struct dirrem *dirrem, *prevdirrem;
2702 struct pagedep *pagedep;
2703 struct inodedep *inodedep;
2704
2705 offset = blkoff(dp->i_fs, dp->i_offset);
2706
2707 /*
2708 * Whiteouts do not need diradd dependencies.
2709 */
2710 if (newinum != WINO) {
884717e1
SW
2711 dap = kmalloc(sizeof(struct diradd), M_DIRADD,
2712 M_SOFTDEP_FLAGS | M_ZERO);
984263bc
MD
2713 dap->da_list.wk_type = D_DIRADD;
2714 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
2715 dap->da_offset = offset;
2716 dap->da_newinum = newinum;
2717 }
2718
2719 /*
2720 * Allocate a new dirrem and ACQUIRE_LOCK.
2721 */
2722 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
2723 pagedep = dirrem->dm_pagedep;
2724 /*
2725 * The possible values for isrmdir:
2726 * 0 - non-directory file rename
2727 * 1 - directory rename within same directory
2728 * inum - directory rename to new directory of given inode number
2729 * When renaming to a new directory, we are both deleting and
2730 * creating a new directory entry, so the link count on the new
2731 * directory should not change. Thus we do not need the followup
2732 * dirrem which is usually done in handle_workitem_remove. We set
2733 * the DIRCHG flag to tell handle_workitem_remove to skip the
2734 * followup dirrem.
2735 */
2736 if (isrmdir > 1)
2737 dirrem->dm_state |= DIRCHG;
2738
2739 /*
2740 * Whiteouts have no additional dependencies,
2741 * so just put the dirrem on the correct list.
2742 */
2743 if (newinum == WINO) {
2744 if ((dirrem->dm_state & COMPLETE) == 0) {
2745 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
2746 dm_next);
2747 } else {
2748 dirrem->dm_dirinum = pagedep->pd_ino;
2749 add_to_worklist(&dirrem->dm_list);
2750 }
2751 FREE_LOCK(&lk);
2752 return;
2753 }
2754
2755 /*
2756 * If the COMPLETE flag is clear, then there were no active
2757 * entries and we want to roll back to the previous inode until
2758 * the new inode is committed to disk. If the COMPLETE flag is
2759 * set, then we have deleted an entry that never made it to disk.
2760 * If the entry we deleted resulted from a name change, then the old
2761 * inode reference still resides on disk. Any rollback that we do
2762 * needs to be to that old inode (returned to us in prevdirrem). If
2763 * the entry we deleted resulted from a create, then there is
2764 * no entry on the disk, so we want to roll back to zero rather
2765 * than the uncommitted inode. In either of the COMPLETE cases we
2766 * want to immediately free the unwritten and unreferenced inode.
2767 */
2768 if ((dirrem->dm_state & COMPLETE) == 0) {
2769 dap->da_previous = dirrem;
2770 } else {
2771 if (prevdirrem != NULL) {
2772 dap->da_previous = prevdirrem;
2773 } else {
2774 dap->da_state &= ~DIRCHG;
2775 dap->da_pagedep = pagedep;
2776 }
2777 dirrem->dm_dirinum = pagedep->pd_ino;
2778 add_to_worklist(&dirrem->dm_list);
2779 }
2780 /*
2781 * Link into its inodedep. Put it on the id_bufwait list if the inode
2782 * is not yet written. If it is written, do the post-inode write
2783 * processing to put it on the id_pendinghd list.
2784 */
2785 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 ||
2786 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
2787 dap->da_state |= COMPLETE;
2788 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
2789 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
2790 } else {
2791 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
2792 dap, da_pdlist);
2793 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
2794 }
2795 FREE_LOCK(&lk);
2796}
2797
2798/*
2799 * Called whenever the link count on an inode is changed.
2800 * It creates an inode dependency so that the new reference(s)
2801 * to the inode cannot be committed to disk until the updated
2802 * inode has been written.
3fcb1ab8
SW
2803 *
2804 * Parameters:
2805 * ip: the inode with the increased link count
984263bc
MD
2806 */
2807void
3fcb1ab8 2808softdep_change_linkcnt(struct inode *ip)
984263bc
MD
2809{
2810 struct inodedep *inodedep;
2811
2812 ACQUIRE_LOCK(&lk);
2813 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep);
2814 if (ip->i_nlink < ip->i_effnlink) {
2815 FREE_LOCK(&lk);
2816 panic("softdep_change_linkcnt: bad delta");
2817 }
2818 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2819 FREE_LOCK(&lk);
2820}
2821
2822/*
2823 * This workitem decrements the inode's link count.
2824 * If the link count reaches zero, the file is removed.
2825 */
2826static void
3fcb1ab8 2827handle_workitem_remove(struct dirrem *dirrem)
984263bc 2828{
984263bc
MD
2829 struct inodedep *inodedep;
2830 struct vnode *vp;
2831 struct inode *ip;
2832 ino_t oldinum;
2833 int error;
2834
b9b0a6d0
MD
2835 error = VFS_VGET(dirrem->dm_mnt, NULL, dirrem->dm_oldinum, &vp);
2836 if (error) {
984263bc
MD
2837 softdep_error("handle_workitem_remove: vget", error);
2838 return;
2839 }
2840 ip = VTOI(vp);
2841 ACQUIRE_LOCK(&lk);
2842 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){
2843 FREE_LOCK(&lk);
2844 panic("handle_workitem_remove: lost inodedep");
2845 }
2846 /*
2847 * Normal file deletion.
2848 */
2849 if ((dirrem->dm_state & RMDIR) == 0) {
2850 ip->i_nlink--;
2851 ip->i_flag |= IN_CHANGE;
2852 if (ip->i_nlink < ip->i_effnlink) {
2853 FREE_LOCK(&lk);
2854 panic("handle_workitem_remove: bad file delta");
2855 }
2856 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2857 FREE_LOCK(&lk);
2858 vput(vp);
2859 num_dirrem -= 1;
2860 WORKITEM_FREE(dirrem, D_DIRREM);
2861 return;
2862 }
2863 /*
2864 * Directory deletion. Decrement reference count for both the
2865 * just deleted parent directory entry and the reference for ".".
2866 * Next truncate the directory to length zero. When the
2867 * truncation completes, arrange to have the reference count on
2868 * the parent decremented to account for the loss of "..".
2869 */
2870 ip->i_nlink -= 2;
2871 ip->i_flag |= IN_CHANGE;
2872 if (ip->i_nlink < ip->i_effnlink) {
2873 FREE_LOCK(&lk);
2874 panic("handle_workitem_remove: bad dir delta");
2875 }
2876 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2877 FREE_LOCK(&lk);
ac690a1d 2878 if ((error = ffs_truncate(vp, (off_t)0, 0, proc0.p_ucred)) != 0)
984263bc
MD
2879 softdep_error("handle_workitem_remove: truncate", error);
2880 /*
2881 * Rename a directory to a new parent. Since, we are both deleting
2882 * and creating a new directory entry, the link count on the new
2883 * directory should not change. Thus we skip the followup dirrem.
2884 */
2885 if (dirrem->dm_state & DIRCHG) {
2886 vput(vp);
2887 num_dirrem -= 1;
2888 WORKITEM_FREE(dirrem, D_DIRREM);
2889 return;
2890 }
2891 /*
2892 * If the inodedep does not exist, then the zero'ed inode has
2893 * been written to disk. If the allocated inode has never been
2894 * written to disk, then the on-disk inode is zero'ed. In either
2895 * case we can remove the file immediately.
2896 */
2897 ACQUIRE_LOCK(&lk);
2898 dirrem->dm_state = 0;
2899 oldinum = dirrem->dm_oldinum;
2900 dirrem->dm_oldinum = dirrem->dm_dirinum;
2901 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 ||
2902 check_inode_unwritten(inodedep)) {
2903 FREE_LOCK(&lk);
2904 vput(vp);
2905 handle_workitem_remove(dirrem);
2906 return;
2907 }
2908 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
2909 FREE_LOCK(&lk);
a0ff2e99
MD
2910 ip->i_flag |= IN_CHANGE;
2911 ffs_update(vp, 0);
984263bc
MD
2912 vput(vp);
2913}
2914
2915/*
2916 * Inode de-allocation dependencies.
2917 *
2918 * When an inode's link count is reduced to zero, it can be de-allocated. We
2919 * found it convenient to postpone de-allocation until after the inode is
2920 * written to disk with its new link count (zero). At this point, all of the
2921 * on-disk inode's block pointers are nullified and, with careful dependency
2922 * list ordering, all dependencies related to the inode will be satisfied and
2923 * the corresponding dependency structures de-allocated. So, if/when the
2924 * inode is reused, there will be no mixing of old dependencies with new
2925 * ones. This artificial dependency is set up by the block de-allocation
2926 * procedure above (softdep_setup_freeblocks) and completed by the
2927 * following procedure.
2928 */
2929static void
3fcb1ab8 2930handle_workitem_freefile(struct freefile *freefile)
984263bc
MD
2931{
2932 struct vnode vp;
2933 struct inode tip;
2934 struct inodedep *idp;
2935 int error;
2936
2937#ifdef DEBUG
2938 ACQUIRE_LOCK(&lk);
2939 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp);
2940 FREE_LOCK(&lk);
2941 if (error)
2942 panic("handle_workitem_freefile: inodedep survived");
2943#endif
2944 tip.i_devvp = freefile->fx_devvp;
2945 tip.i_dev = freefile->fx_devvp->v_rdev;
2946 tip.i_fs = freefile->fx_fs;
2947 vp.v_data = &tip;
2948 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0)
2949 softdep_error("handle_workitem_freefile", error);
2950 WORKITEM_FREE(freefile, D_FREEFILE);
2951}
2952
83b29fff
MD
2953/*
2954 * Helper function which unlinks marker element from work list and returns
2955 * the next element on the list.
2956 */
2957static __inline struct worklist *
2958markernext(struct worklist *marker)
2959{
2960 struct worklist *next;
2961
2962 next = LIST_NEXT(marker, wk_list);
2963 LIST_REMOVE(marker, wk_list);
2964 return next;
2965}
2966
27bc0cb1
MD
2967/*
2968 * checkread, checkwrite
2969 *
59a647b1 2970 * bioops callback - hold io_token
27bc0cb1
MD
2971 */
2972static int
2973softdep_checkread(struct buf *bp)
2974{
59a647b1 2975 /* nothing to do, mp lock not needed */
27bc0cb1
MD
2976 return(0);
2977}
2978
59a647b1
MD
2979/*
2980 * bioops callback - hold io_token
2981 */
27bc0cb1
MD
2982static int
2983softdep_checkwrite(struct buf *bp)
2984{
59a647b1 2985 /* nothing to do, mp lock not needed */
27bc0cb1
MD
2986 return(0);
2987}
2988
984263bc
MD
2989/*
2990 * Disk writes.
2991 *
2992 * The dependency structures constructed above are most actively used when file
2993 * system blocks are written to disk. No constraints are placed on when a
2994 * block can be written, but unsatisfied update dependencies are made safe by
2995 * modifying (or replacing) the source memory for the duration of the disk
2996 * write. When the disk write completes, the memory block is again brought
2997 * up-to-date.
2998 *
2999 * In-core inode structure reclamation.
3000 *
3001 * Because there are a finite number of "in-core" inode structures, they are
3002 * reused regularly. By transferring all inode-related dependencies to the
3003 * in-memory inode block and indexing them separately (via "inodedep"s), we
3004 * can allow "in-core" inode structures to be reused at any time and avoid
3005 * any increase in contention.
3006 *
3007 * Called just before entering the device driver to initiate a new disk I/O.
3008 * The buffer must be locked, thus, no I/O completion operations can occur
3009 * while we are manipulating its associated dependencies.
3fcb1ab8 3010 *
59a647b1
MD
3011 * bioops callback - hold io_token
3012 *
3fcb1ab8
SW
3013 * Parameters:
3014 * bp: structure describing disk write to occur
984263bc
MD
3015 */
3016static void
3fcb1ab8 3017softdep_disk_io_initiation(struct buf *bp)
984263bc 3018{
83b29fff
MD
3019 struct worklist *wk;
3020 struct worklist marker;
984263bc
MD
3021 struct indirdep *indirdep;
3022
3023 /*
3024 * We only care about write operations. There should never
3025 * be dependencies for reads.
3026 */
10f3fee5 3027 if (bp->b_cmd == BUF_CMD_READ)
984263bc 3028 panic("softdep_disk_io_initiation: read");
83b29fff 3029
59a647b1 3030 get_mplock();
7b7fca29 3031 ACQUIRE_LOCK(&lk);
83b29fff
MD
3032 marker.wk_type = D_LAST + 1; /* Not a normal workitem */
3033
984263bc
MD
3034 /*
3035 * Do any necessary pre-I/O processing.
3036 */
83b29fff
MD
3037 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = markernext(&marker)) {
3038 LIST_INSERT_AFTER(wk, &marker, wk_list);
3039
984263bc 3040 switch (wk->wk_type) {
984263bc
MD
3041 case D_PAGEDEP:
3042 initiate_write_filepage(WK_PAGEDEP(wk), bp);
3043 continue;
3044
3045 case D_INODEDEP:
3046 initiate_write_inodeblock(WK_INODEDEP(wk), bp);
3047 continue;
3048
3049 case D_INDIRDEP:
3050 indirdep = WK_INDIRDEP(wk);
3051 if (indirdep->ir_state & GOINGAWAY)
3052 panic("disk_io_initiation: indirdep gone");
3053 /*
3054 * If there are no remaining dependencies, this
3055 * will be writing the real pointers, so the
3056 * dependency can be freed.
3057 */
3058 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) {
3059 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
3060 brelse(indirdep->ir_savebp);
3061 /* inline expand WORKLIST_REMOVE(wk); */
3062 wk->wk_state &= ~ONWORKLIST;
3063 LIST_REMOVE(wk, wk_list);
3064 WORKITEM_FREE(indirdep, D_INDIRDEP);
3065 continue;
3066 }
3067 /*
3068 * Replace up-to-date version with safe version.
3069 */
884717e1
SW
3070 indirdep->ir_saveddata = kmalloc(bp->b_bcount,
3071 M_INDIRDEP,
3072 M_SOFTDEP_FLAGS);
984263bc
MD
3073 ACQUIRE_LOCK(&lk);
3074 indirdep->ir_state &= ~ATTACHED;
3075 indirdep->ir_state |= UNDONE;
3076 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
3077 bcopy(indirdep->ir_savebp->b_data, bp->b_data,
3078 bp->b_bcount);
3079 FREE_LOCK(&lk);
3080 continue;
3081
3082 case D_MKDIR:
3083 case D_BMSAFEMAP:
3084 case D_ALLOCDIRECT:
3085 case D_ALLOCINDIR:
3086 continue;
3087
3088 default:
3089 panic("handle_disk_io_initiation: Unexpected type %s",
3090 TYPENAME(wk->wk_type));
3091 /* NOTREACHED */
3092 }
3093 }
7b7fca29 3094 FREE_LOCK(&lk);
59a647b1 3095 rel_mplock();
984263bc
MD
3096}
3097
3098/*
3099 * Called from within the procedure above to deal with unsatisfied
3100 * allocation dependencies in a directory. The buffer must be locked,
3101 * thus, no I/O completion operations can occur while we are
3102 * manipulating its associated dependencies.
3103 */
3104static void
3fcb1ab8 3105initiate_write_filepage(struct pagedep *pagedep, struct buf *bp)
984263bc
MD
3106{
3107 struct diradd *dap;
3108 struct direct *ep;
3109 int i;
3110
3111 if (pagedep->pd_state & IOSTARTED) {
3112 /*
3113 * This can only happen if there is a driver that does not
3114 * understand chaining. Here biodone will reissue the call
3115 * to strategy for the incomplete buffers.
3116 */
086c1d7e 3117 kprintf("initiate_write_filepage: already started\n");
984263bc
MD
3118 return;
3119 }
3120 pagedep->pd_state |= IOSTARTED;
3121 ACQUIRE_LOCK(&lk);
3122 for (i = 0; i < DAHASHSZ; i++) {
3123 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
3124 ep = (struct direct *)
3125 ((char *)bp->b_data + dap->da_offset);
3126 if (ep->d_ino != dap->da_newinum) {
3127 FREE_LOCK(&lk);
f91a71dd 3128 panic("%s: dir inum %d != new %"PRId64,
984263bc
MD
3129 "initiate_write_filepage",
3130 ep->d_ino, dap->da_newinum);
3131 }
3132 if (dap->da_state & DIRCHG)
3133 ep->d_ino = dap->da_previous->dm_oldinum;
3134 else
3135 ep->d_ino = 0;
3136 dap->da_state &= ~ATTACHED;
3137 dap->da_state |= UNDONE;
3138 }
3139 }
3140 FREE_LOCK(&lk);
3141}
3142
3143/*
3144 * Called from within the procedure above to deal with unsatisfied
3145 * allocation dependencies in an inodeblock. The buffer must be
3146 * locked, thus, no I/O completion operations can occur while we
3147 * are manipulating its associated dependencies.
3fcb1ab8
SW
3148 *
3149 * Parameters:
3150 * bp: The inode block
984263bc
MD
3151 */
3152static void
3fcb1ab8 3153initiate_write_inodeblock(struct inodedep *inodedep, struct buf *bp)
984263bc
MD
3154{
3155 struct allocdirect *adp, *lastadp;
50e58362
MD
3156 struct ufs1_dinode *dp;
3157 struct ufs1_dinode *sip;
984263bc
MD
3158 struct fs *fs;
3159 ufs_lbn_t prevlbn = 0;
3160 int i, deplist;
3161
3162 if (inodedep->id_state & IOSTARTED)
3163 panic("initiate_write_inodeblock: already started");
3164 inodedep->id_state |= IOSTARTED;
3165 fs = inodedep->id_fs;
50e58362 3166 dp = (struct ufs1_dinode *)bp->b_data +
984263bc
MD
3167 ino_to_fsbo(fs, inodedep->id_ino);
3168 /*
3169 * If the bitmap is not yet written, then the allocated
3170 * inode cannot be written to disk.
3171 */
3172 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
3173 if (inodedep->id_savedino != NULL)
3174 panic("initiate_write_inodeblock: already doing I/O");
884717e1
SW
3175 sip = kmalloc(sizeof(struct ufs1_dinode), M_INODEDEP,
3176 M_SOFTDEP_FLAGS);
83b29fff 3177 inodedep->id_savedino = sip;
984263bc 3178 *inodedep->id_savedino = *dp;
50e58362 3179 bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
83b29fff 3180 dp->di_gen = inodedep->id_savedino->di_gen;
984263bc
MD
3181 return;
3182 }
3183 /*
3184 * If no dependencies, then there is nothing to roll back.
3185 */
3186 inodedep->id_savedsize = dp->di_size;
3187 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL)
3188 return;
3189 /*
3190 * Set the dependencies to busy.
3191 */
3192 ACQUIRE_LOCK(&lk);
3193 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3194 adp = TAILQ_NEXT(adp, ad_next)) {
3195#ifdef DIAGNOSTIC
3196 if (deplist != 0 && prevlbn >= adp->ad_lbn) {
3197 FREE_LOCK(&lk);
3198 panic("softdep_write_inodeblock: lbn order");
3199 }
3200 prevlbn = adp->ad_lbn;
3201 if (adp->ad_lbn < NDADDR &&
3202 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) {
3203 FREE_LOCK(&lk);
3204 panic("%s: direct pointer #%ld mismatch %d != %d",
3205 "softdep_write_inodeblock", adp->ad_lbn,
3206 dp->di_db[adp->ad_lbn], adp->ad_newblkno);
3207 }
3208 if (adp->ad_lbn >= NDADDR &&
3209 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) {
3210 FREE_LOCK(&lk);
3211 panic("%s: indirect pointer #%ld mismatch %d != %d",
3212 "softdep_write_inodeblock", adp->ad_lbn - NDADDR,
3213 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno);
3214 }
3215 deplist |= 1 << adp->ad_lbn;
3216 if ((adp->ad_state & ATTACHED) == 0) {
3217 FREE_LOCK(&lk);
3218 panic("softdep_write_inodeblock: Unknown state 0x%x",
3219 adp->ad_state);
3220 }
3221#endif /* DIAGNOSTIC */
3222 adp->ad_state &= ~ATTACHED;
3223 adp->ad_state |= UNDONE;
3224 }
3225 /*
3226 * The on-disk inode cannot claim to be any larger than the last
3227 * fragment that has been written. Otherwise, the on-disk inode
3228 * might have fragments that were not the last block in the file
3229 * which would corrupt the filesystem.
3230 */
3231 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3232 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
3233 if (adp->ad_lbn >= NDADDR)
3234 break;
3235 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno;
3236 /* keep going until hitting a rollback to a frag */
3237 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
3238 continue;
3239 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize;
3240 for (i = adp->ad_lbn + 1; i < NDADDR; i++) {
3241#ifdef DIAGNOSTIC
3242 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) {
3243 FREE_LOCK(&lk);
3244 panic("softdep_write_inodeblock: lost dep1");
3245 }
3246#endif /* DIAGNOSTIC */
3247 dp->di_db[i] = 0;
3248 }
3249 for (i = 0; i < NIADDR; i++) {
3250#ifdef DIAGNOSTIC
3251 if (dp->di_ib[i] != 0 &&
3252 (deplist & ((1 << NDADDR) << i)) == 0) {
3253 FREE_LOCK(&lk);
3254 panic("softdep_write_inodeblock: lost dep2");
3255 }
3256#endif /* DIAGNOSTIC */
3257 dp->di_ib[i] = 0;
3258 }
3259 FREE_LOCK(&lk);
3260 return;
3261 }
3262 /*
3263 * If we have zero'ed out the last allocated block of the file,
3264 * roll back the size to the last currently allocated block.
3265 * We know that this last allocated block is a full-sized as
3266 * we already checked for fragments in the loop above.
3267 */
3268 if (lastadp != NULL &&
3269 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) {
3270 for (i = lastadp->ad_lbn; i >= 0; i--)
3271 if (dp->di_db[i] != 0)
3272 break;
3273 dp->di_size = (i + 1) * fs->fs_bsize;
3274 }
3275 /*
3276 * The only dependencies are for indirect blocks.
3277 *
3278 * The file size for indirect block additions is not guaranteed.
3279 * Such a guarantee would be non-trivial to achieve. The conventional
3280 * synchronous write implementation also does not make this guarantee.
3281 * Fsck should catch and fix discrepancies. Arguably, the file size
3282 * can be over-estimated without destroying integrity when the file
3283 * moves into the indirect blocks (i.e., is large). If we want to
3284 * postpone fsck, we are stuck with this argument.
3285 */
3286 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
3287 dp->di_ib[adp->ad_lbn - NDADDR] = 0;
3288 FREE_LOCK(&lk);
3289}
3290
3291/*
3292 * This routine is called during the completion interrupt
3293 * service routine for a disk write (from the procedure called
f719c866 3294 * by the device driver to inform the filesystem caches of
984263bc
MD
3295 * a request completion). It should be called early in this
3296 * procedure, before the block is made available to other
3297 * processes or other routines are called.
3fcb1ab8 3298 *
59a647b1
MD
3299 * bioops callback - hold io_token
3300 *
3fcb1ab8
SW
3301 * Parameters:
3302 * bp: describes the completed disk write
984263bc
MD
3303 */
3304static void
3fcb1ab8 3305softdep_disk_write_complete(struct buf *bp)
984263bc
MD
3306{
3307 struct worklist *wk;
3308 struct workhead reattach;
3309 struct newblk *newblk;
3310 struct allocindir *aip;
3311 struct allocdirect *adp;
3312 struct indirdep *indirdep;
3313 struct inodedep *inodedep;
3314 struct bmsafemap *bmsafemap;
3315
f5be2504
VS
3316 ACQUIRE_LOCK(&lk);
3317
984263bc
MD
3318 LIST_INIT(&reattach);
3319 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
3320 WORKLIST_REMOVE(wk);
3321 switch (wk->wk_type) {
3322
3323 case D_PAGEDEP:
3324 if (handle_written_filepage(WK_PAGEDEP(wk), bp))
3325 WORKLIST_INSERT(&reattach, wk);
3326 continue;
3327
3328 case D_INODEDEP:
3329 if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
3330 WORKLIST_INSERT(&reattach, wk);
3331 continue;
3332
3333 case D_BMSAFEMAP:
3334 bmsafemap = WK_BMSAFEMAP(wk);
3335 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) {
3336 newblk->nb_state |= DEPCOMPLETE;
3337 newblk->nb_bmsafemap = NULL;
3338 LIST_REMOVE(newblk, nb_deps);
3339 }
3340 while ((adp =
3341 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) {
3342 adp->ad_state |= DEPCOMPLETE;
3343 adp->ad_buf = NULL;
3344 LIST_REMOVE(adp, ad_deps);
3345 handle_allocdirect_partdone(adp);
3346 }
3347 while ((aip =
3348 LIST_FIRST(&bmsafemap->sm_allocindirhd))) {
3349 aip->ai_state |= DEPCOMPLETE;
3350 aip->ai_buf = NULL;
3351 LIST_REMOVE(aip, ai_deps);
3352 handle_allocindir_partdone(aip);
3353 }
3354 while ((inodedep =
3355 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) {
3356 inodedep->id_state |= DEPCOMPLETE;
3357 LIST_REMOVE(inodedep, id_deps);
3358 inodedep->id_buf = NULL;
3359 }
3360 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
3361 continue;
3362
3363 case D_MKDIR:
3364 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
3365 continue;
3366
3367 case D_ALLOCDIRECT:
3368 adp = WK_ALLOCDIRECT(wk);
3369 adp->ad_state |= COMPLETE;
3370 handle_allocdirect_partdone(adp);
3371 continue;
3372
3373 case D_ALLOCINDIR:
3374 aip = WK_ALLOCINDIR(wk);
3375 aip->ai_state |= COMPLETE;
3376 handle_allocindir_partdone(aip);
3377 continue;
3378
3379 case D_INDIRDEP:
3380 indirdep = WK_INDIRDEP(wk);
3381 if (indirdep->ir_state & GOINGAWAY) {
984263bc
MD
3382 panic("disk_write_complete: indirdep gone");
3383 }
3384 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
884717e1 3385 kfree(indirdep->ir_saveddata, M_INDIRDEP);
984263bc
MD
3386 indirdep->ir_saveddata = 0;
3387 indirdep->ir_state &= ~UNDONE;
3388 indirdep->ir_state |= ATTACHED;
4090d6ff 3389 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) {
984263bc
MD
3390 handle_allocindir_partdone(aip);
3391 if (aip == LIST_FIRST(&indirdep->ir_donehd)) {
984263bc
MD
3392 panic("disk_write_complete: not gone");
3393 }
3394 }
3395 WORKLIST_INSERT(&reattach, wk);
3396 if ((bp->b_flags & B_DELWRI) == 0)
3397 stat_indir_blk_ptrs++;
3398 bdirty(bp);
3399 continue;
3400
3401 default:
984263bc
MD
3402 panic("handle_disk_write_complete: Unknown type %s",
3403 TYPENAME(wk->wk_type));
3404 /* NOTREACHED */
3405 }
3406 }
3407 /*
3408 * Reattach any requests that must be redone.
3409 */
3410 while ((wk = LIST_FIRST(&reattach)) != NULL) {
3411 WORKLIST_REMOVE(wk);
408357d8 3412 WORKLIST_INSERT_BP(bp, wk);
984263bc 3413 }
f5be2504
VS
3414
3415 FREE_LOCK(&lk);
984263bc
MD
3416}
3417
3418/*
3419 * Called from within softdep_disk_write_complete above. Note that
3420 * this routine is always called from interrupt level with further
3421 * splbio interrupts blocked.
3fcb1ab8
SW
3422 *
3423 * Parameters:
3424 * adp: the completed allocdirect
984263bc
MD
3425 */
3426static void
3fcb1ab8 3427handle_allocdirect_partdone(struct allocdirect *adp)
984263bc
MD
3428{
3429 struct allocdirect *listadp;
3430 struct inodedep *inodedep;
3431 long bsize;
3432
3433 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
3434 return;
f5be2504 3435 if (adp->ad_buf != NULL)
984263bc 3436 panic("handle_allocdirect_partdone: dangling dep");
f5be2504 3437
984263bc
MD
3438 /*
3439 * The on-disk inode cannot claim to be any larger than the last
3440 * fragment that has been written. Otherwise, the on-disk inode
3441 * might have fragments that were not the last block in the file
3442 * which would corrupt the filesystem. Thus, we cannot free any
3443 * allocdirects after one whose ad_oldblkno claims a fragment as
3444 * these blocks must be rolled back to zero before writing the inode.
3445 * We check the currently active set of allocdirects in id_inoupdt.
3446 */
3447 inodedep = adp->ad_inodedep;
3448 bsize = inodedep->id_fs->fs_bsize;
3449 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) {
3450 /* found our block */
3451 if (listadp == adp)
3452 break;
3453 /* continue if ad_oldlbn is not a fragment */
3454 if (listadp->ad_oldsize == 0 ||
3455 listadp->ad_oldsize == bsize)
3456 continue;
3457 /* hit a fragment */
3458 return;
3459 }
3460 /*
3461 * If we have reached the end of the current list without
3462 * finding the just finished dependency, then it must be
3463 * on the future dependency list. Future dependencies cannot
3464 * be freed until they are moved to the current list.
3465 */
3466 if (listadp == NULL) {
3467#ifdef DEBUG
3468 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next)
3469 /* found our block */
3470 if (listadp == adp)
3471 break;
f5be2504 3472 if (listadp == NULL)
984263bc 3473 panic("handle_allocdirect_partdone: lost dep");
984263bc
MD
3474#endif /* DEBUG */
3475 return;
3476 }
3477 /*
3478 * If we have found the just finished dependency, then free
3479 * it along with anything that follows it that is complete.
3480 */
3481 for (; adp; adp = listadp) {
3482 listadp = TAILQ_NEXT(adp, ad_next);
3483 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
3484 return;
3485 free_allocdirect(&inodedep->id_inoupdt, adp, 1);
3486 }
3487}
3488
3489/*
3490 * Called from within softdep_disk_write_complete above. Note that
3491 * this routine is always called from interrupt level with further
3492 * splbio interrupts blocked.
3fcb1ab8
SW
3493 *
3494 * Parameters:
3495 * aip: the completed allocindir
984263bc
MD
3496 */
3497static void
3fcb1ab8 3498handle_allocindir_partdone(struct allocindir *aip)
984263bc
MD
3499{
3500 struct indirdep *indirdep;
3501
3502 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
3503 return;
f5be2504 3504 if (aip->ai_buf != NULL)
984263bc 3505 panic("handle_allocindir_partdone: dangling dependency");
f5be2504 3506
984263bc
MD
3507 indirdep = aip->ai_indirdep;
3508 if (indirdep->ir_state & UNDONE) {
3509 LIST_REMOVE(aip, ai_next);
3510 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
3511 return;
3512 }
3513 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
3514 aip->ai_newblkno;
3515 LIST_REMOVE(aip, ai_next);
3516 if (aip->ai_freefrag != NULL)
3517 add_to_worklist(&aip->ai_freefrag->ff_list);
3518 WORKITEM_FREE(aip, D_ALLOCINDIR);
3519}
3520
3521/*
3522 * Called from within softdep_disk_write_complete above to restore
3523 * in-memory inode block contents to their most up-to-date state. Note
3524 * that this routine is always called from interrupt level with further
3525 * splbio interrupts blocked.
3fcb1ab8
SW
3526 *
3527 * Parameters:
3528 * bp: buffer containing the inode block
984263bc
MD
3529 */
3530static int
3fcb1ab8 3531handle_written_inodeblock(struct inodedep *inodedep, struct buf *bp)
984263bc
MD
3532{
3533 struct worklist *wk, *filefree;
3534 struct allocdirect *adp, *nextadp;
50e58362 3535 struct ufs1_dinode *dp;
984263bc
MD
3536 int hadchanges;
3537
f5be2504 3538 if ((inodedep->id_state & IOSTARTED) == 0)
984263bc 3539 panic("handle_written_inodeblock: not started");
f5be2504 3540
984263bc 3541 inodedep->id_state &= ~IOSTARTED;
50e58362 3542 dp = (struct ufs1_dinode *)bp->b_data +
984263bc
MD
3543 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
3544 /*
3545 * If we had to rollback the inode allocation because of
3546 * bitmaps being incomplete, then simply restore it.
3547 * Keep the block dirty so that it will not be reclaimed until
3548 * all associated dependencies have been cleared and the
3549 * corresponding updates written to disk.
3550 */
3551 if (inodedep->id_savedino != NULL) {
3552 *dp = *inodedep->id_savedino;
884717e1 3553 kfree(inodedep->id_savedino, M_INODEDEP);
984263bc
MD
3554 inodedep->id_savedino = NULL;
3555 if ((bp->b_flags & B_DELWRI) == 0)
3556 stat_inode_bitmap++;
3557 bdirty(bp);
3558 return (1);
3559 }
c06b6dae 3560 inodedep->id_state |= COMPLETE;
984263bc
MD
3561 /*
3562 * Roll forward anything that had to be rolled back before
3563 * the inode could be updated.
3564 */
3565 hadchanges = 0;
3566 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
3567 nextadp = TAILQ_NEXT(adp, ad_next);
f5be2504 3568 if (adp->ad_state & ATTACHED)
984263bc 3569 panic("handle_written_inodeblock: new entry");
f5be2504 3570
984263bc
MD
3571 if (adp->ad_lbn < NDADDR) {
3572 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) {
984263bc
MD
3573 panic("%s: %s #%ld mismatch %d != %d",
3574 "handle_written_inodeblock",
3575 "direct pointer", adp->ad_lbn,
3576 dp->di_db[adp->ad_lbn], adp->ad_oldblkno);
3577 }
3578 dp->di_db[adp->ad_lbn] = adp->ad_newblkno;
3579 } else {
3580 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) {
984263bc
MD
3581 panic("%s: %s #%ld allocated as %d",
3582 "handle_written_inodeblock",
3583 "indirect pointer", adp->ad_lbn - NDADDR,
3584 dp->di_ib[adp->ad_lbn - NDADDR]);
3585 }
3586 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno;
3587 }
3588 adp->ad_state &= ~UNDONE;
3589 adp->ad_state |= ATTACHED;
3590 hadchanges = 1;
3591 }
3592 if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
3593 stat_direct_blk_ptrs++;
3594 /*
3595 * Reset the file size to its most up-to-date value.