proc->thread stage 3.5: Add an IO_CORE flag so coda doesn't have to dig
[dragonfly.git] / sys / vfs / ufs / ffs_softdep.c
CommitLineData
984263bc
MD
1/*
2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved.
3 *
4 * The soft updates code is derived from the appendix of a University
5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
6 * "Soft Updates: A Solution to the Metadata Update Problem in File
7 * Systems", CSE-TR-254-95, August 1995).
8 *
9 * Further information about soft updates can be obtained from:
10 *
11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/
12 * 1614 Oxford Street mckusick@mckusick.com
13 * Berkeley, CA 94709-1608 +1-510-843-9542
14 * USA
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY
27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR
30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00
39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $
3020e3be 40 * $DragonFly: src/sys/vfs/ufs/ffs_softdep.c,v 1.3 2003/06/19 01:55:08 dillon Exp $
984263bc
MD
41 */
42
43/*
44 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide.
45 */
46#ifndef DIAGNOSTIC
47#define DIAGNOSTIC
48#endif
49#ifndef DEBUG
50#define DEBUG
51#endif
52
53#include <sys/param.h>
54#include <sys/kernel.h>
55#include <sys/systm.h>
56#include <sys/buf.h>
57#include <sys/malloc.h>
58#include <sys/mount.h>
59#include <sys/proc.h>
60#include <sys/syslog.h>
61#include <sys/vnode.h>
62#include <sys/conf.h>
3020e3be 63#include <sys/buf2.h>
984263bc
MD
64#include <ufs/ufs/dir.h>
65#include <ufs/ufs/quota.h>
66#include <ufs/ufs/inode.h>
67#include <ufs/ufs/ufsmount.h>
68#include <ufs/ffs/fs.h>
69#include <ufs/ffs/softdep.h>
70#include <ufs/ffs/ffs_extern.h>
71#include <ufs/ufs/ufs_extern.h>
72
73/*
74 * These definitions need to be adapted to the system to which
75 * this file is being ported.
76 */
77/*
78 * malloc types defined for the softdep system.
79 */
80MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies");
81MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies");
82MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation");
83MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map");
84MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode");
85MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies");
86MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block");
87MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode");
88MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode");
89MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated");
90MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry");
91MALLOC_DEFINE(M_MKDIR, "mkdir","New directory");
92MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted");
93
94#define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE)
95
96#define D_PAGEDEP 0
97#define D_INODEDEP 1
98#define D_NEWBLK 2
99#define D_BMSAFEMAP 3
100#define D_ALLOCDIRECT 4
101#define D_INDIRDEP 5
102#define D_ALLOCINDIR 6
103#define D_FREEFRAG 7
104#define D_FREEBLKS 8
105#define D_FREEFILE 9
106#define D_DIRADD 10
107#define D_MKDIR 11
108#define D_DIRREM 12
109#define D_LAST D_DIRREM
110
111/*
112 * translate from workitem type to memory type
113 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
114 */
115static struct malloc_type *memtype[] = {
116 M_PAGEDEP,
117 M_INODEDEP,
118 M_NEWBLK,
119 M_BMSAFEMAP,
120 M_ALLOCDIRECT,
121 M_INDIRDEP,
122 M_ALLOCINDIR,
123 M_FREEFRAG,
124 M_FREEBLKS,
125 M_FREEFILE,
126 M_DIRADD,
127 M_MKDIR,
128 M_DIRREM
129};
130
131#define DtoM(type) (memtype[type])
132
133/*
134 * Names of malloc types.
135 */
136#define TYPENAME(type) \
137 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???")
138#define CURPROC curproc
139/*
140 * End system adaptaion definitions.
141 */
142
143/*
144 * Internal function prototypes.
145 */
146static void softdep_error __P((char *, int));
147static void drain_output __P((struct vnode *, int));
148static int getdirtybuf __P((struct buf **, int));
149static void clear_remove __P((struct proc *));
150static void clear_inodedeps __P((struct proc *));
151static int flush_pagedep_deps __P((struct vnode *, struct mount *,
152 struct diraddhd *));
153static int flush_inodedep_deps __P((struct fs *, ino_t));
154static int handle_written_filepage __P((struct pagedep *, struct buf *));
155static void diradd_inode_written __P((struct diradd *, struct inodedep *));
156static int handle_written_inodeblock __P((struct inodedep *, struct buf *));
157static void handle_allocdirect_partdone __P((struct allocdirect *));
158static void handle_allocindir_partdone __P((struct allocindir *));
159static void initiate_write_filepage __P((struct pagedep *, struct buf *));
160static void handle_written_mkdir __P((struct mkdir *, int));
161static void initiate_write_inodeblock __P((struct inodedep *, struct buf *));
162static void handle_workitem_freefile __P((struct freefile *));
163static void handle_workitem_remove __P((struct dirrem *));
164static struct dirrem *newdirrem __P((struct buf *, struct inode *,
165 struct inode *, int, struct dirrem **));
166static void free_diradd __P((struct diradd *));
167static void free_allocindir __P((struct allocindir *, struct inodedep *));
168static int indir_trunc __P((struct inode *, ufs_daddr_t, int, ufs_lbn_t,
169 long *));
170static void deallocate_dependencies __P((struct buf *, struct inodedep *));
171static void free_allocdirect __P((struct allocdirectlst *,
172 struct allocdirect *, int));
173static int check_inode_unwritten __P((struct inodedep *));
174static int free_inodedep __P((struct inodedep *));
175static void handle_workitem_freeblocks __P((struct freeblks *));
176static void merge_inode_lists __P((struct inodedep *));
177static void setup_allocindir_phase2 __P((struct buf *, struct inode *,
178 struct allocindir *));
179static struct allocindir *newallocindir __P((struct inode *, int, ufs_daddr_t,
180 ufs_daddr_t));
181static void handle_workitem_freefrag __P((struct freefrag *));
182static struct freefrag *newfreefrag __P((struct inode *, ufs_daddr_t, long));
183static void allocdirect_merge __P((struct allocdirectlst *,
184 struct allocdirect *, struct allocdirect *));
185static struct bmsafemap *bmsafemap_lookup __P((struct buf *));
186static int newblk_lookup __P((struct fs *, ufs_daddr_t, int,
187 struct newblk **));
188static int inodedep_lookup __P((struct fs *, ino_t, int, struct inodedep **));
189static int pagedep_lookup __P((struct inode *, ufs_lbn_t, int,
190 struct pagedep **));
191static void pause_timer __P((void *));
192static int request_cleanup __P((int, int));
193static int process_worklist_item __P((struct mount *, int));
194static void add_to_worklist __P((struct worklist *));
195
196/*
197 * Exported softdep operations.
198 */
199static void softdep_disk_io_initiation __P((struct buf *));
200static void softdep_disk_write_complete __P((struct buf *));
201static void softdep_deallocate_dependencies __P((struct buf *));
202static int softdep_fsync __P((struct vnode *));
203static int softdep_process_worklist __P((struct mount *));
204static void softdep_move_dependencies __P((struct buf *, struct buf *));
205static int softdep_count_dependencies __P((struct buf *bp, int));
206
207struct bio_ops bioops = {
208 softdep_disk_io_initiation, /* io_start */
209 softdep_disk_write_complete, /* io_complete */
210 softdep_deallocate_dependencies, /* io_deallocate */
211 softdep_fsync, /* io_fsync */
212 softdep_process_worklist, /* io_sync */
213 softdep_move_dependencies, /* io_movedeps */
214 softdep_count_dependencies, /* io_countdeps */
215};
216
217/*
218 * Locking primitives.
219 *
220 * For a uniprocessor, all we need to do is protect against disk
221 * interrupts. For a multiprocessor, this lock would have to be
222 * a mutex. A single mutex is used throughout this file, though
223 * finer grain locking could be used if contention warranted it.
224 *
225 * For a multiprocessor, the sleep call would accept a lock and
226 * release it after the sleep processing was complete. In a uniprocessor
227 * implementation there is no such interlock, so we simple mark
228 * the places where it needs to be done with the `interlocked' form
229 * of the lock calls. Since the uniprocessor sleep already interlocks
230 * the spl, there is nothing that really needs to be done.
231 */
232#ifndef /* NOT */ DEBUG
233static struct lockit {
234 int lkt_spl;
235} lk = { 0 };
236#define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio()
237#define FREE_LOCK(lk) splx((lk)->lkt_spl)
238
239#else /* DEBUG */
240static struct lockit {
241 int lkt_spl;
242 pid_t lkt_held;
243} lk = { 0, -1 };
244static int lockcnt;
245
246static void acquire_lock __P((struct lockit *));
247static void free_lock __P((struct lockit *));
248void softdep_panic __P((char *));
249
250#define ACQUIRE_LOCK(lk) acquire_lock(lk)
251#define FREE_LOCK(lk) free_lock(lk)
252
253static void
254acquire_lock(lk)
255 struct lockit *lk;
256{
257 pid_t holder;
258
259 if (lk->lkt_held != -1) {
260 holder = lk->lkt_held;
261 FREE_LOCK(lk);
262 if (holder == CURPROC->p_pid)
263 panic("softdep_lock: locking against myself");
264 else
265 panic("softdep_lock: lock held by %d", holder);
266 }
267 lk->lkt_spl = splbio();
268 lk->lkt_held = CURPROC->p_pid;
269 lockcnt++;
270}
271
272static void
273free_lock(lk)
274 struct lockit *lk;
275{
276
277 if (lk->lkt_held == -1)
278 panic("softdep_unlock: lock not held");
279 lk->lkt_held = -1;
280 splx(lk->lkt_spl);
281}
282
283/*
284 * Function to release soft updates lock and panic.
285 */
286void
287softdep_panic(msg)
288 char *msg;
289{
290
291 if (lk.lkt_held != -1)
292 FREE_LOCK(&lk);
293 panic(msg);
294}
295#endif /* DEBUG */
296
297static int interlocked_sleep __P((struct lockit *, int, void *, int,
298 const char *, int));
299
300/*
301 * When going to sleep, we must save our SPL so that it does
302 * not get lost if some other process uses the lock while we
303 * are sleeping. We restore it after we have slept. This routine
304 * wraps the interlocking with functions that sleep. The list
305 * below enumerates the available set of operations.
306 */
307#define UNKNOWN 0
308#define SLEEP 1
309#define LOCKBUF 2
310
311static int
312interlocked_sleep(lk, op, ident, flags, wmesg, timo)
313 struct lockit *lk;
314 int op;
315 void *ident;
316 int flags;
317 const char *wmesg;
318 int timo;
319{
320 pid_t holder;
321 int s, retval;
322
323 s = lk->lkt_spl;
324# ifdef DEBUG
325 if (lk->lkt_held == -1)
326 panic("interlocked_sleep: lock not held");
327 lk->lkt_held = -1;
328# endif /* DEBUG */
329 switch (op) {
330 case SLEEP:
331 retval = tsleep(ident, flags, wmesg, timo);
332 break;
333 case LOCKBUF:
334 retval = BUF_LOCK((struct buf *)ident, flags);
335 break;
336 default:
337 panic("interlocked_sleep: unknown operation");
338 }
339# ifdef DEBUG
340 if (lk->lkt_held != -1) {
341 holder = lk->lkt_held;
342 FREE_LOCK(lk);
343 if (holder == CURPROC->p_pid)
344 panic("interlocked_sleep: locking against self");
345 else
346 panic("interlocked_sleep: lock held by %d", holder);
347 }
348 lk->lkt_held = CURPROC->p_pid;
349 lockcnt++;
350# endif /* DEBUG */
351 lk->lkt_spl = s;
352 return (retval);
353}
354
355/*
356 * Place holder for real semaphores.
357 */
358struct sema {
359 int value;
360 pid_t holder;
361 char *name;
362 int prio;
363 int timo;
364};
365static void sema_init __P((struct sema *, char *, int, int));
366static int sema_get __P((struct sema *, struct lockit *));
367static void sema_release __P((struct sema *));
368
369static void
370sema_init(semap, name, prio, timo)
371 struct sema *semap;
372 char *name;
373 int prio, timo;
374{
375
376 semap->holder = -1;
377 semap->value = 0;
378 semap->name = name;
379 semap->prio = prio;
380 semap->timo = timo;
381}
382
383static int
384sema_get(semap, interlock)
385 struct sema *semap;
386 struct lockit *interlock;
387{
388
389 if (semap->value++ > 0) {
390 if (interlock != NULL) {
391 interlocked_sleep(interlock, SLEEP, (caddr_t)semap,
392 semap->prio, semap->name, semap->timo);
393 FREE_LOCK(interlock);
394 } else {
395 tsleep((caddr_t)semap, semap->prio, semap->name,
396 semap->timo);
397 }
398 return (0);
399 }
400 semap->holder = CURPROC->p_pid;
401 if (interlock != NULL)
402 FREE_LOCK(interlock);
403 return (1);
404}
405
406static void
407sema_release(semap)
408 struct sema *semap;
409{
410
411 if (semap->value <= 0 || semap->holder != CURPROC->p_pid) {
412 if (lk.lkt_held != -1)
413 FREE_LOCK(&lk);
414 panic("sema_release: not held");
415 }
416 if (--semap->value > 0) {
417 semap->value = 0;
418 wakeup(semap);
419 }
420 semap->holder = -1;
421}
422
423/*
424 * Worklist queue management.
425 * These routines require that the lock be held.
426 */
427#ifndef /* NOT */ DEBUG
428#define WORKLIST_INSERT(head, item) do { \
429 (item)->wk_state |= ONWORKLIST; \
430 LIST_INSERT_HEAD(head, item, wk_list); \
431} while (0)
432#define WORKLIST_REMOVE(item) do { \
433 (item)->wk_state &= ~ONWORKLIST; \
434 LIST_REMOVE(item, wk_list); \
435} while (0)
436#define WORKITEM_FREE(item, type) FREE(item, DtoM(type))
437
438#else /* DEBUG */
439static void worklist_insert __P((struct workhead *, struct worklist *));
440static void worklist_remove __P((struct worklist *));
441static void workitem_free __P((struct worklist *, int));
442
443#define WORKLIST_INSERT(head, item) worklist_insert(head, item)
444#define WORKLIST_REMOVE(item) worklist_remove(item)
445#define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type)
446
447static void
448worklist_insert(head, item)
449 struct workhead *head;
450 struct worklist *item;
451{
452
453 if (lk.lkt_held == -1)
454 panic("worklist_insert: lock not held");
455 if (item->wk_state & ONWORKLIST) {
456 FREE_LOCK(&lk);
457 panic("worklist_insert: already on list");
458 }
459 item->wk_state |= ONWORKLIST;
460 LIST_INSERT_HEAD(head, item, wk_list);
461}
462
463static void
464worklist_remove(item)
465 struct worklist *item;
466{
467
468 if (lk.lkt_held == -1)
469 panic("worklist_remove: lock not held");
470 if ((item->wk_state & ONWORKLIST) == 0) {
471 FREE_LOCK(&lk);
472 panic("worklist_remove: not on list");
473 }
474 item->wk_state &= ~ONWORKLIST;
475 LIST_REMOVE(item, wk_list);
476}
477
478static void
479workitem_free(item, type)
480 struct worklist *item;
481 int type;
482{
483
484 if (item->wk_state & ONWORKLIST) {
485 if (lk.lkt_held != -1)
486 FREE_LOCK(&lk);
487 panic("workitem_free: still on list");
488 }
489 if (item->wk_type != type) {
490 if (lk.lkt_held != -1)
491 FREE_LOCK(&lk);
492 panic("workitem_free: type mismatch");
493 }
494 FREE(item, DtoM(type));
495}
496#endif /* DEBUG */
497
498/*
499 * Workitem queue management
500 */
501static struct workhead softdep_workitem_pending;
502static int num_on_worklist; /* number of worklist items to be processed */
503static int softdep_worklist_busy; /* 1 => trying to do unmount */
504static int softdep_worklist_req; /* serialized waiters */
505static int max_softdeps; /* maximum number of structs before slowdown */
506static int tickdelay = 2; /* number of ticks to pause during slowdown */
507static int *stat_countp; /* statistic to count in proc_waiting timeout */
508static int proc_waiting; /* tracks whether we have a timeout posted */
509static struct callout_handle handle; /* handle on posted proc_waiting timeout */
510static struct proc *filesys_syncer; /* proc of filesystem syncer process */
511static int req_clear_inodedeps; /* syncer process flush some inodedeps */
512#define FLUSH_INODES 1
513static int req_clear_remove; /* syncer process flush some freeblks */
514#define FLUSH_REMOVE 2
515/*
516 * runtime statistics
517 */
518static int stat_worklist_push; /* number of worklist cleanups */
519static int stat_blk_limit_push; /* number of times block limit neared */
520static int stat_ino_limit_push; /* number of times inode limit neared */
521static int stat_blk_limit_hit; /* number of times block slowdown imposed */
522static int stat_ino_limit_hit; /* number of times inode slowdown imposed */
523static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */
524static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */
525static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */
526static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
527static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */
528#ifdef DEBUG
529#include <vm/vm.h>
530#include <sys/sysctl.h>
531SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, "");
532SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, "");
533SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,"");
534SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,"");
535SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,"");
536SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, "");
537SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, "");
538SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, "");
539SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, "");
540SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, "");
541SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, "");
542SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, "");
543#endif /* DEBUG */
544
545/*
546 * Add an item to the end of the work queue.
547 * This routine requires that the lock be held.
548 * This is the only routine that adds items to the list.
549 * The following routine is the only one that removes items
550 * and does so in order from first to last.
551 */
552static void
553add_to_worklist(wk)
554 struct worklist *wk;
555{
556 static struct worklist *worklist_tail;
557
558 if (wk->wk_state & ONWORKLIST) {
559 if (lk.lkt_held != -1)
560 FREE_LOCK(&lk);
561 panic("add_to_worklist: already on list");
562 }
563 wk->wk_state |= ONWORKLIST;
564 if (LIST_FIRST(&softdep_workitem_pending) == NULL)
565 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list);
566 else
567 LIST_INSERT_AFTER(worklist_tail, wk, wk_list);
568 worklist_tail = wk;
569 num_on_worklist += 1;
570}
571
572/*
573 * Process that runs once per second to handle items in the background queue.
574 *
575 * Note that we ensure that everything is done in the order in which they
576 * appear in the queue. The code below depends on this property to ensure
577 * that blocks of a file are freed before the inode itself is freed. This
578 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
579 * until all the old ones have been purged from the dependency lists.
580 */
581static int
582softdep_process_worklist(matchmnt)
583 struct mount *matchmnt;
584{
585 struct proc *p = CURPROC;
586 int matchcnt, loopcount;
587 long starttime;
588
589 /*
590 * Record the process identifier of our caller so that we can give
591 * this process preferential treatment in request_cleanup below.
592 */
593 filesys_syncer = p;
594 matchcnt = 0;
595
596 /*
597 * There is no danger of having multiple processes run this
598 * code, but we have to single-thread it when softdep_flushfiles()
599 * is in operation to get an accurate count of the number of items
600 * related to its mount point that are in the list.
601 */
602 if (matchmnt == NULL) {
603 if (softdep_worklist_busy < 0)
604 return(-1);
605 softdep_worklist_busy += 1;
606 }
607
608 /*
609 * If requested, try removing inode or removal dependencies.
610 */
611 if (req_clear_inodedeps) {
612 clear_inodedeps(p);
613 req_clear_inodedeps -= 1;
614 wakeup_one(&proc_waiting);
615 }
616 if (req_clear_remove) {
617 clear_remove(p);
618 req_clear_remove -= 1;
619 wakeup_one(&proc_waiting);
620 }
621 loopcount = 1;
622 starttime = time_second;
623 while (num_on_worklist > 0) {
624 matchcnt += process_worklist_item(matchmnt, 0);
625
626 /*
627 * If a umount operation wants to run the worklist
628 * accurately, abort.
629 */
630 if (softdep_worklist_req && matchmnt == NULL) {
631 matchcnt = -1;
632 break;
633 }
634
635 /*
636 * If requested, try removing inode or removal dependencies.
637 */
638 if (req_clear_inodedeps) {
639 clear_inodedeps(p);
640 req_clear_inodedeps -= 1;
641 wakeup_one(&proc_waiting);
642 }
643 if (req_clear_remove) {
644 clear_remove(p);
645 req_clear_remove -= 1;
646 wakeup_one(&proc_waiting);
647 }
648 /*
649 * We do not generally want to stop for buffer space, but if
650 * we are really being a buffer hog, we will stop and wait.
651 */
652 if (loopcount++ % 128 == 0)
653 bwillwrite();
654 /*
655 * Never allow processing to run for more than one
656 * second. Otherwise the other syncer tasks may get
657 * excessively backlogged.
658 */
659 if (starttime != time_second && matchmnt == NULL) {
660 matchcnt = -1;
661 break;
662 }
663 }
664 if (matchmnt == NULL) {
665 --softdep_worklist_busy;
666 if (softdep_worklist_req && softdep_worklist_busy == 0)
667 wakeup(&softdep_worklist_req);
668 }
669 return (matchcnt);
670}
671
672/*
673 * Process one item on the worklist.
674 */
675static int
676process_worklist_item(matchmnt, flags)
677 struct mount *matchmnt;
678 int flags;
679{
680 struct worklist *wk;
681 struct dirrem *dirrem;
682 struct fs *matchfs;
683 struct vnode *vp;
684 int matchcnt = 0;
685
686 matchfs = NULL;
687 if (matchmnt != NULL)
688 matchfs = VFSTOUFS(matchmnt)->um_fs;
689 ACQUIRE_LOCK(&lk);
690 /*
691 * Normally we just process each item on the worklist in order.
692 * However, if we are in a situation where we cannot lock any
693 * inodes, we have to skip over any dirrem requests whose
694 * vnodes are resident and locked.
695 */
696 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) {
697 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM)
698 break;
699 dirrem = WK_DIRREM(wk);
700 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev,
701 dirrem->dm_oldinum);
702 if (vp == NULL || !VOP_ISLOCKED(vp, CURPROC))
703 break;
704 }
705 if (wk == 0) {
706 FREE_LOCK(&lk);
707 return (0);
708 }
709 WORKLIST_REMOVE(wk);
710 num_on_worklist -= 1;
711 FREE_LOCK(&lk);
712 switch (wk->wk_type) {
713
714 case D_DIRREM:
715 /* removal of a directory entry */
716 if (WK_DIRREM(wk)->dm_mnt == matchmnt)
717 matchcnt += 1;
718 handle_workitem_remove(WK_DIRREM(wk));
719 break;
720
721 case D_FREEBLKS:
722 /* releasing blocks and/or fragments from a file */
723 if (WK_FREEBLKS(wk)->fb_fs == matchfs)
724 matchcnt += 1;
725 handle_workitem_freeblocks(WK_FREEBLKS(wk));
726 break;
727
728 case D_FREEFRAG:
729 /* releasing a fragment when replaced as a file grows */
730 if (WK_FREEFRAG(wk)->ff_fs == matchfs)
731 matchcnt += 1;
732 handle_workitem_freefrag(WK_FREEFRAG(wk));
733 break;
734
735 case D_FREEFILE:
736 /* releasing an inode when its link count drops to 0 */
737 if (WK_FREEFILE(wk)->fx_fs == matchfs)
738 matchcnt += 1;
739 handle_workitem_freefile(WK_FREEFILE(wk));
740 break;
741
742 default:
743 panic("%s_process_worklist: Unknown type %s",
744 "softdep", TYPENAME(wk->wk_type));
745 /* NOTREACHED */
746 }
747 return (matchcnt);
748}
749
750/*
751 * Move dependencies from one buffer to another.
752 */
753static void
754softdep_move_dependencies(oldbp, newbp)
755 struct buf *oldbp;
756 struct buf *newbp;
757{
758 struct worklist *wk, *wktail;
759
760 if (LIST_FIRST(&newbp->b_dep) != NULL)
761 panic("softdep_move_dependencies: need merge code");
762 wktail = 0;
763 ACQUIRE_LOCK(&lk);
764 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
765 LIST_REMOVE(wk, wk_list);
766 if (wktail == 0)
767 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
768 else
769 LIST_INSERT_AFTER(wktail, wk, wk_list);
770 wktail = wk;
771 }
772 FREE_LOCK(&lk);
773}
774
775/*
776 * Purge the work list of all items associated with a particular mount point.
777 */
778int
779softdep_flushfiles(oldmnt, flags, p)
780 struct mount *oldmnt;
781 int flags;
782 struct proc *p;
783{
784 struct vnode *devvp;
785 int error, loopcnt;
786
787 /*
788 * Await our turn to clear out the queue, then serialize access.
789 */
790 while (softdep_worklist_busy != 0) {
791 softdep_worklist_req += 1;
792 tsleep(&softdep_worklist_req, PRIBIO, "softflush", 0);
793 softdep_worklist_req -= 1;
794 }
795 softdep_worklist_busy = -1;
796
797 if ((error = ffs_flushfiles(oldmnt, flags, p)) != 0) {
798 softdep_worklist_busy = 0;
799 if (softdep_worklist_req)
800 wakeup(&softdep_worklist_req);
801 return (error);
802 }
803 /*
804 * Alternately flush the block device associated with the mount
805 * point and process any dependencies that the flushing
806 * creates. In theory, this loop can happen at most twice,
807 * but we give it a few extra just to be sure.
808 */
809 devvp = VFSTOUFS(oldmnt)->um_devvp;
810 for (loopcnt = 10; loopcnt > 0; ) {
811 if (softdep_process_worklist(oldmnt) == 0) {
812 loopcnt--;
813 /*
814 * Do another flush in case any vnodes were brought in
815 * as part of the cleanup operations.
816 */
817 if ((error = ffs_flushfiles(oldmnt, flags, p)) != 0)
818 break;
819 /*
820 * If we still found nothing to do, we are really done.
821 */
822 if (softdep_process_worklist(oldmnt) == 0)
823 break;
824 }
825 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
826 error = VOP_FSYNC(devvp, p->p_ucred, MNT_WAIT, p);
827 VOP_UNLOCK(devvp, 0, p);
828 if (error)
829 break;
830 }
831 softdep_worklist_busy = 0;
832 if (softdep_worklist_req)
833 wakeup(&softdep_worklist_req);
834
835 /*
836 * If we are unmounting then it is an error to fail. If we
837 * are simply trying to downgrade to read-only, then filesystem
838 * activity can keep us busy forever, so we just fail with EBUSY.
839 */
840 if (loopcnt == 0) {
841 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
842 panic("softdep_flushfiles: looping");
843 error = EBUSY;
844 }
845 return (error);
846}
847
848/*
849 * Structure hashing.
850 *
851 * There are three types of structures that can be looked up:
852 * 1) pagedep structures identified by mount point, inode number,
853 * and logical block.
854 * 2) inodedep structures identified by mount point and inode number.
855 * 3) newblk structures identified by mount point and
856 * physical block number.
857 *
858 * The "pagedep" and "inodedep" dependency structures are hashed
859 * separately from the file blocks and inodes to which they correspond.
860 * This separation helps when the in-memory copy of an inode or
861 * file block must be replaced. It also obviates the need to access
862 * an inode or file page when simply updating (or de-allocating)
863 * dependency structures. Lookup of newblk structures is needed to
864 * find newly allocated blocks when trying to associate them with
865 * their allocdirect or allocindir structure.
866 *
867 * The lookup routines optionally create and hash a new instance when
868 * an existing entry is not found.
869 */
870#define DEPALLOC 0x0001 /* allocate structure if lookup fails */
871#define NODELAY 0x0002 /* cannot do background work */
872
873/*
874 * Structures and routines associated with pagedep caching.
875 */
876LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
877u_long pagedep_hash; /* size of hash table - 1 */
878#define PAGEDEP_HASH(mp, inum, lbn) \
879 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
880 pagedep_hash])
881static struct sema pagedep_in_progress;
882
883/*
884 * Look up a pagedep. Return 1 if found, 0 if not found.
885 * If not found, allocate if DEPALLOC flag is passed.
886 * Found or allocated entry is returned in pagedeppp.
887 * This routine must be called with splbio interrupts blocked.
888 */
889static int
890pagedep_lookup(ip, lbn, flags, pagedeppp)
891 struct inode *ip;
892 ufs_lbn_t lbn;
893 int flags;
894 struct pagedep **pagedeppp;
895{
896 struct pagedep *pagedep;
897 struct pagedep_hashhead *pagedephd;
898 struct mount *mp;
899 int i;
900
901#ifdef DEBUG
902 if (lk.lkt_held == -1)
903 panic("pagedep_lookup: lock not held");
904#endif
905 mp = ITOV(ip)->v_mount;
906 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn);
907top:
908 LIST_FOREACH(pagedep, pagedephd, pd_hash)
909 if (ip->i_number == pagedep->pd_ino &&
910 lbn == pagedep->pd_lbn &&
911 mp == pagedep->pd_mnt)
912 break;
913 if (pagedep) {
914 *pagedeppp = pagedep;
915 return (1);
916 }
917 if ((flags & DEPALLOC) == 0) {
918 *pagedeppp = NULL;
919 return (0);
920 }
921 if (sema_get(&pagedep_in_progress, &lk) == 0) {
922 ACQUIRE_LOCK(&lk);
923 goto top;
924 }
925 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP,
926 M_SOFTDEP_FLAGS);
927 bzero(pagedep, sizeof(struct pagedep));
928 pagedep->pd_list.wk_type = D_PAGEDEP;
929 pagedep->pd_mnt = mp;
930 pagedep->pd_ino = ip->i_number;
931 pagedep->pd_lbn = lbn;
932 LIST_INIT(&pagedep->pd_dirremhd);
933 LIST_INIT(&pagedep->pd_pendinghd);
934 for (i = 0; i < DAHASHSZ; i++)
935 LIST_INIT(&pagedep->pd_diraddhd[i]);
936 ACQUIRE_LOCK(&lk);
937 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
938 sema_release(&pagedep_in_progress);
939 *pagedeppp = pagedep;
940 return (0);
941}
942
943/*
944 * Structures and routines associated with inodedep caching.
945 */
946LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
947static u_long inodedep_hash; /* size of hash table - 1 */
948static long num_inodedep; /* number of inodedep allocated */
949#define INODEDEP_HASH(fs, inum) \
950 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
951static struct sema inodedep_in_progress;
952
953/*
954 * Look up a inodedep. Return 1 if found, 0 if not found.
955 * If not found, allocate if DEPALLOC flag is passed.
956 * Found or allocated entry is returned in inodedeppp.
957 * This routine must be called with splbio interrupts blocked.
958 */
959static int
960inodedep_lookup(fs, inum, flags, inodedeppp)
961 struct fs *fs;
962 ino_t inum;
963 int flags;
964 struct inodedep **inodedeppp;
965{
966 struct inodedep *inodedep;
967 struct inodedep_hashhead *inodedephd;
968 int firsttry;
969
970#ifdef DEBUG
971 if (lk.lkt_held == -1)
972 panic("inodedep_lookup: lock not held");
973#endif
974 firsttry = 1;
975 inodedephd = INODEDEP_HASH(fs, inum);
976top:
977 LIST_FOREACH(inodedep, inodedephd, id_hash)
978 if (inum == inodedep->id_ino && fs == inodedep->id_fs)
979 break;
980 if (inodedep) {
981 *inodedeppp = inodedep;
982 return (1);
983 }
984 if ((flags & DEPALLOC) == 0) {
985 *inodedeppp = NULL;
986 return (0);
987 }
988 /*
989 * If we are over our limit, try to improve the situation.
990 */
991 if (num_inodedep > max_softdeps && firsttry &&
992 speedup_syncer() == 0 && (flags & NODELAY) == 0 &&
993 request_cleanup(FLUSH_INODES, 1)) {
994 firsttry = 0;
995 goto top;
996 }
997 if (sema_get(&inodedep_in_progress, &lk) == 0) {
998 ACQUIRE_LOCK(&lk);
999 goto top;
1000 }
1001 num_inodedep += 1;
1002 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep),
1003 M_INODEDEP, M_SOFTDEP_FLAGS);
1004 inodedep->id_list.wk_type = D_INODEDEP;
1005 inodedep->id_fs = fs;
1006 inodedep->id_ino = inum;
1007 inodedep->id_state = ALLCOMPLETE;
1008 inodedep->id_nlinkdelta = 0;
1009 inodedep->id_savedino = NULL;
1010 inodedep->id_savedsize = -1;
1011 inodedep->id_buf = NULL;
1012 LIST_INIT(&inodedep->id_pendinghd);
1013 LIST_INIT(&inodedep->id_inowait);
1014 LIST_INIT(&inodedep->id_bufwait);
1015 TAILQ_INIT(&inodedep->id_inoupdt);
1016 TAILQ_INIT(&inodedep->id_newinoupdt);
1017 ACQUIRE_LOCK(&lk);
1018 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
1019 sema_release(&inodedep_in_progress);
1020 *inodedeppp = inodedep;
1021 return (0);
1022}
1023
1024/*
1025 * Structures and routines associated with newblk caching.
1026 */
1027LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
1028u_long newblk_hash; /* size of hash table - 1 */
1029#define NEWBLK_HASH(fs, inum) \
1030 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
1031static struct sema newblk_in_progress;
1032
1033/*
1034 * Look up a newblk. Return 1 if found, 0 if not found.
1035 * If not found, allocate if DEPALLOC flag is passed.
1036 * Found or allocated entry is returned in newblkpp.
1037 */
1038static int
1039newblk_lookup(fs, newblkno, flags, newblkpp)
1040 struct fs *fs;
1041 ufs_daddr_t newblkno;
1042 int flags;
1043 struct newblk **newblkpp;
1044{
1045 struct newblk *newblk;
1046 struct newblk_hashhead *newblkhd;
1047
1048 newblkhd = NEWBLK_HASH(fs, newblkno);
1049top:
1050 LIST_FOREACH(newblk, newblkhd, nb_hash)
1051 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs)
1052 break;
1053 if (newblk) {
1054 *newblkpp = newblk;
1055 return (1);
1056 }
1057 if ((flags & DEPALLOC) == 0) {
1058 *newblkpp = NULL;
1059 return (0);
1060 }
1061 if (sema_get(&newblk_in_progress, 0) == 0)
1062 goto top;
1063 MALLOC(newblk, struct newblk *, sizeof(struct newblk),
1064 M_NEWBLK, M_SOFTDEP_FLAGS);
1065 newblk->nb_state = 0;
1066 newblk->nb_fs = fs;
1067 newblk->nb_newblkno = newblkno;
1068 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
1069 sema_release(&newblk_in_progress);
1070 *newblkpp = newblk;
1071 return (0);
1072}
1073
1074/*
1075 * Executed during filesystem system initialization before
1076 * mounting any file systems.
1077 */
1078void
1079softdep_initialize()
1080{
1081
1082 LIST_INIT(&mkdirlisthd);
1083 LIST_INIT(&softdep_workitem_pending);
1084 max_softdeps = min(desiredvnodes * 8,
1085 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep)));
1086 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
1087 &pagedep_hash);
1088 sema_init(&pagedep_in_progress, "pagedep", PRIBIO, 0);
1089 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
1090 sema_init(&inodedep_in_progress, "inodedep", PRIBIO, 0);
1091 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash);
1092 sema_init(&newblk_in_progress, "newblk", PRIBIO, 0);
1093}
1094
1095/*
1096 * Called at mount time to notify the dependency code that a
1097 * filesystem wishes to use it.
1098 */
1099int
1100softdep_mount(devvp, mp, fs, cred)
1101 struct vnode *devvp;
1102 struct mount *mp;
1103 struct fs *fs;
1104 struct ucred *cred;
1105{
1106 struct csum cstotal;
1107 struct cg *cgp;
1108 struct buf *bp;
1109 int error, cyl;
1110
1111 mp->mnt_flag &= ~MNT_ASYNC;
1112 mp->mnt_flag |= MNT_SOFTDEP;
1113 /*
1114 * When doing soft updates, the counters in the
1115 * superblock may have gotten out of sync, so we have
1116 * to scan the cylinder groups and recalculate them.
1117 */
1118 if (fs->fs_clean != 0)
1119 return (0);
1120 bzero(&cstotal, sizeof cstotal);
1121 for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
1122 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
1123 fs->fs_cgsize, cred, &bp)) != 0) {
1124 brelse(bp);
1125 return (error);
1126 }
1127 cgp = (struct cg *)bp->b_data;
1128 cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
1129 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
1130 cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
1131 cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
1132 fs->fs_cs(fs, cyl) = cgp->cg_cs;
1133 brelse(bp);
1134 }
1135#ifdef DEBUG
1136 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
1137 printf("ffs_mountfs: superblock updated for soft updates\n");
1138#endif
1139 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
1140 return (0);
1141}
1142
1143/*
1144 * Protecting the freemaps (or bitmaps).
1145 *
1146 * To eliminate the need to execute fsck before mounting a file system
1147 * after a power failure, one must (conservatively) guarantee that the
1148 * on-disk copy of the bitmaps never indicate that a live inode or block is
1149 * free. So, when a block or inode is allocated, the bitmap should be
1150 * updated (on disk) before any new pointers. When a block or inode is
1151 * freed, the bitmap should not be updated until all pointers have been
1152 * reset. The latter dependency is handled by the delayed de-allocation
1153 * approach described below for block and inode de-allocation. The former
1154 * dependency is handled by calling the following procedure when a block or
1155 * inode is allocated. When an inode is allocated an "inodedep" is created
1156 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
1157 * Each "inodedep" is also inserted into the hash indexing structure so
1158 * that any additional link additions can be made dependent on the inode
1159 * allocation.
1160 *
1161 * The ufs file system maintains a number of free block counts (e.g., per
1162 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
1163 * in addition to the bitmaps. These counts are used to improve efficiency
1164 * during allocation and therefore must be consistent with the bitmaps.
1165 * There is no convenient way to guarantee post-crash consistency of these
1166 * counts with simple update ordering, for two main reasons: (1) The counts
1167 * and bitmaps for a single cylinder group block are not in the same disk
1168 * sector. If a disk write is interrupted (e.g., by power failure), one may
1169 * be written and the other not. (2) Some of the counts are located in the
1170 * superblock rather than the cylinder group block. So, we focus our soft
1171 * updates implementation on protecting the bitmaps. When mounting a
1172 * filesystem, we recompute the auxiliary counts from the bitmaps.
1173 */
1174
1175/*
1176 * Called just after updating the cylinder group block to allocate an inode.
1177 */
1178void
1179softdep_setup_inomapdep(bp, ip, newinum)
1180 struct buf *bp; /* buffer for cylgroup block with inode map */
1181 struct inode *ip; /* inode related to allocation */
1182 ino_t newinum; /* new inode number being allocated */
1183{
1184 struct inodedep *inodedep;
1185 struct bmsafemap *bmsafemap;
1186
1187 /*
1188 * Create a dependency for the newly allocated inode.
1189 * Panic if it already exists as something is seriously wrong.
1190 * Otherwise add it to the dependency list for the buffer holding
1191 * the cylinder group map from which it was allocated.
1192 */
1193 ACQUIRE_LOCK(&lk);
1194 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) {
1195 FREE_LOCK(&lk);
1196 panic("softdep_setup_inomapdep: found inode");
1197 }
1198 inodedep->id_buf = bp;
1199 inodedep->id_state &= ~DEPCOMPLETE;
1200 bmsafemap = bmsafemap_lookup(bp);
1201 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
1202 FREE_LOCK(&lk);
1203}
1204
1205/*
1206 * Called just after updating the cylinder group block to
1207 * allocate block or fragment.
1208 */
1209void
1210softdep_setup_blkmapdep(bp, fs, newblkno)
1211 struct buf *bp; /* buffer for cylgroup block with block map */
1212 struct fs *fs; /* filesystem doing allocation */
1213 ufs_daddr_t newblkno; /* number of newly allocated block */
1214{
1215 struct newblk *newblk;
1216 struct bmsafemap *bmsafemap;
1217
1218 /*
1219 * Create a dependency for the newly allocated block.
1220 * Add it to the dependency list for the buffer holding
1221 * the cylinder group map from which it was allocated.
1222 */
1223 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0)
1224 panic("softdep_setup_blkmapdep: found block");
1225 ACQUIRE_LOCK(&lk);
1226 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp);
1227 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
1228 FREE_LOCK(&lk);
1229}
1230
1231/*
1232 * Find the bmsafemap associated with a cylinder group buffer.
1233 * If none exists, create one. The buffer must be locked when
1234 * this routine is called and this routine must be called with
1235 * splbio interrupts blocked.
1236 */
1237static struct bmsafemap *
1238bmsafemap_lookup(bp)
1239 struct buf *bp;
1240{
1241 struct bmsafemap *bmsafemap;
1242 struct worklist *wk;
1243
1244#ifdef DEBUG
1245 if (lk.lkt_held == -1)
1246 panic("bmsafemap_lookup: lock not held");
1247#endif
1248 LIST_FOREACH(wk, &bp->b_dep, wk_list)
1249 if (wk->wk_type == D_BMSAFEMAP)
1250 return (WK_BMSAFEMAP(wk));
1251 FREE_LOCK(&lk);
1252 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap),
1253 M_BMSAFEMAP, M_SOFTDEP_FLAGS);
1254 bmsafemap->sm_list.wk_type = D_BMSAFEMAP;
1255 bmsafemap->sm_list.wk_state = 0;
1256 bmsafemap->sm_buf = bp;
1257 LIST_INIT(&bmsafemap->sm_allocdirecthd);
1258 LIST_INIT(&bmsafemap->sm_allocindirhd);
1259 LIST_INIT(&bmsafemap->sm_inodedephd);
1260 LIST_INIT(&bmsafemap->sm_newblkhd);
1261 ACQUIRE_LOCK(&lk);
1262 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
1263 return (bmsafemap);
1264}
1265
1266/*
1267 * Direct block allocation dependencies.
1268 *
1269 * When a new block is allocated, the corresponding disk locations must be
1270 * initialized (with zeros or new data) before the on-disk inode points to
1271 * them. Also, the freemap from which the block was allocated must be
1272 * updated (on disk) before the inode's pointer. These two dependencies are
1273 * independent of each other and are needed for all file blocks and indirect
1274 * blocks that are pointed to directly by the inode. Just before the
1275 * "in-core" version of the inode is updated with a newly allocated block
1276 * number, a procedure (below) is called to setup allocation dependency
1277 * structures. These structures are removed when the corresponding
1278 * dependencies are satisfied or when the block allocation becomes obsolete
1279 * (i.e., the file is deleted, the block is de-allocated, or the block is a
1280 * fragment that gets upgraded). All of these cases are handled in
1281 * procedures described later.
1282 *
1283 * When a file extension causes a fragment to be upgraded, either to a larger
1284 * fragment or to a full block, the on-disk location may change (if the
1285 * previous fragment could not simply be extended). In this case, the old
1286 * fragment must be de-allocated, but not until after the inode's pointer has
1287 * been updated. In most cases, this is handled by later procedures, which
1288 * will construct a "freefrag" structure to be added to the workitem queue
1289 * when the inode update is complete (or obsolete). The main exception to
1290 * this is when an allocation occurs while a pending allocation dependency
1291 * (for the same block pointer) remains. This case is handled in the main
1292 * allocation dependency setup procedure by immediately freeing the
1293 * unreferenced fragments.
1294 */
1295void
1296softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
1297 struct inode *ip; /* inode to which block is being added */
1298 ufs_lbn_t lbn; /* block pointer within inode */
1299 ufs_daddr_t newblkno; /* disk block number being added */
1300 ufs_daddr_t oldblkno; /* previous block number, 0 unless frag */
1301 long newsize; /* size of new block */
1302 long oldsize; /* size of new block */
1303 struct buf *bp; /* bp for allocated block */
1304{
1305 struct allocdirect *adp, *oldadp;
1306 struct allocdirectlst *adphead;
1307 struct bmsafemap *bmsafemap;
1308 struct inodedep *inodedep;
1309 struct pagedep *pagedep;
1310 struct newblk *newblk;
1311
1312 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect),
1313 M_ALLOCDIRECT, M_SOFTDEP_FLAGS);
1314 bzero(adp, sizeof(struct allocdirect));
1315 adp->ad_list.wk_type = D_ALLOCDIRECT;
1316 adp->ad_lbn = lbn;
1317 adp->ad_newblkno = newblkno;
1318 adp->ad_oldblkno = oldblkno;
1319 adp->ad_newsize = newsize;
1320 adp->ad_oldsize = oldsize;
1321 adp->ad_state = ATTACHED;
1322 if (newblkno == oldblkno)
1323 adp->ad_freefrag = NULL;
1324 else
1325 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize);
1326
1327 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0)
1328 panic("softdep_setup_allocdirect: lost block");
1329
1330 ACQUIRE_LOCK(&lk);
1331 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep);
1332 adp->ad_inodedep = inodedep;
1333
1334 if (newblk->nb_state == DEPCOMPLETE) {
1335 adp->ad_state |= DEPCOMPLETE;
1336 adp->ad_buf = NULL;
1337 } else {
1338 bmsafemap = newblk->nb_bmsafemap;
1339 adp->ad_buf = bmsafemap->sm_buf;
1340 LIST_REMOVE(newblk, nb_deps);
1341 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps);
1342 }
1343 LIST_REMOVE(newblk, nb_hash);
1344 FREE(newblk, M_NEWBLK);
1345
1346 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list);
1347 if (lbn >= NDADDR) {
1348 /* allocating an indirect block */
1349 if (oldblkno != 0) {
1350 FREE_LOCK(&lk);
1351 panic("softdep_setup_allocdirect: non-zero indir");
1352 }
1353 } else {
1354 /*
1355 * Allocating a direct block.
1356 *
1357 * If we are allocating a directory block, then we must
1358 * allocate an associated pagedep to track additions and
1359 * deletions.
1360 */
1361 if ((ip->i_mode & IFMT) == IFDIR &&
1362 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
1363 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
1364 }
1365 /*
1366 * The list of allocdirects must be kept in sorted and ascending
1367 * order so that the rollback routines can quickly determine the
1368 * first uncommitted block (the size of the file stored on disk
1369 * ends at the end of the lowest committed fragment, or if there
1370 * are no fragments, at the end of the highest committed block).
1371 * Since files generally grow, the typical case is that the new
1372 * block is to be added at the end of the list. We speed this
1373 * special case by checking against the last allocdirect in the
1374 * list before laboriously traversing the list looking for the
1375 * insertion point.
1376 */
1377 adphead = &inodedep->id_newinoupdt;
1378 oldadp = TAILQ_LAST(adphead, allocdirectlst);
1379 if (oldadp == NULL || oldadp->ad_lbn <= lbn) {
1380 /* insert at end of list */
1381 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
1382 if (oldadp != NULL && oldadp->ad_lbn == lbn)
1383 allocdirect_merge(adphead, adp, oldadp);
1384 FREE_LOCK(&lk);
1385 return;
1386 }
1387 TAILQ_FOREACH(oldadp, adphead, ad_next) {
1388 if (oldadp->ad_lbn >= lbn)
1389 break;
1390 }
1391 if (oldadp == NULL) {
1392 FREE_LOCK(&lk);
1393 panic("softdep_setup_allocdirect: lost entry");
1394 }
1395 /* insert in middle of list */
1396 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
1397 if (oldadp->ad_lbn == lbn)
1398 allocdirect_merge(adphead, adp, oldadp);
1399 FREE_LOCK(&lk);
1400}
1401
1402/*
1403 * Replace an old allocdirect dependency with a newer one.
1404 * This routine must be called with splbio interrupts blocked.
1405 */
1406static void
1407allocdirect_merge(adphead, newadp, oldadp)
1408 struct allocdirectlst *adphead; /* head of list holding allocdirects */
1409 struct allocdirect *newadp; /* allocdirect being added */
1410 struct allocdirect *oldadp; /* existing allocdirect being checked */
1411{
1412 struct freefrag *freefrag;
1413
1414#ifdef DEBUG
1415 if (lk.lkt_held == -1)
1416 panic("allocdirect_merge: lock not held");
1417#endif
1418 if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
1419 newadp->ad_oldsize != oldadp->ad_newsize ||
1420 newadp->ad_lbn >= NDADDR) {
1421 FREE_LOCK(&lk);
1422 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d",
1423 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn,
1424 NDADDR);
1425 }
1426 newadp->ad_oldblkno = oldadp->ad_oldblkno;
1427 newadp->ad_oldsize = oldadp->ad_oldsize;
1428 /*
1429 * If the old dependency had a fragment to free or had never
1430 * previously had a block allocated, then the new dependency
1431 * can immediately post its freefrag and adopt the old freefrag.
1432 * This action is done by swapping the freefrag dependencies.
1433 * The new dependency gains the old one's freefrag, and the
1434 * old one gets the new one and then immediately puts it on
1435 * the worklist when it is freed by free_allocdirect. It is
1436 * not possible to do this swap when the old dependency had a
1437 * non-zero size but no previous fragment to free. This condition
1438 * arises when the new block is an extension of the old block.
1439 * Here, the first part of the fragment allocated to the new
1440 * dependency is part of the block currently claimed on disk by
1441 * the old dependency, so cannot legitimately be freed until the
1442 * conditions for the new dependency are fulfilled.
1443 */
1444 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
1445 freefrag = newadp->ad_freefrag;
1446 newadp->ad_freefrag = oldadp->ad_freefrag;
1447 oldadp->ad_freefrag = freefrag;
1448 }
1449 free_allocdirect(adphead, oldadp, 0);
1450}
1451
1452/*
1453 * Allocate a new freefrag structure if needed.
1454 */
1455static struct freefrag *
1456newfreefrag(ip, blkno, size)
1457 struct inode *ip;
1458 ufs_daddr_t blkno;
1459 long size;
1460{
1461 struct freefrag *freefrag;
1462 struct fs *fs;
1463
1464 if (blkno == 0)
1465 return (NULL);
1466 fs = ip->i_fs;
1467 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
1468 panic("newfreefrag: frag size");
1469 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag),
1470 M_FREEFRAG, M_SOFTDEP_FLAGS);
1471 freefrag->ff_list.wk_type = D_FREEFRAG;
1472 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */
1473 freefrag->ff_inum = ip->i_number;
1474 freefrag->ff_fs = fs;
1475 freefrag->ff_devvp = ip->i_devvp;
1476 freefrag->ff_blkno = blkno;
1477 freefrag->ff_fragsize = size;
1478 return (freefrag);
1479}
1480
1481/*
1482 * This workitem de-allocates fragments that were replaced during
1483 * file block allocation.
1484 */
1485static void
1486handle_workitem_freefrag(freefrag)
1487 struct freefrag *freefrag;
1488{
1489 struct inode tip;
1490
1491 tip.i_fs = freefrag->ff_fs;
1492 tip.i_devvp = freefrag->ff_devvp;
1493 tip.i_dev = freefrag->ff_devvp->v_rdev;
1494 tip.i_number = freefrag->ff_inum;
1495 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */
1496 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize);
1497 FREE(freefrag, M_FREEFRAG);
1498}
1499
1500/*
1501 * Indirect block allocation dependencies.
1502 *
1503 * The same dependencies that exist for a direct block also exist when
1504 * a new block is allocated and pointed to by an entry in a block of
1505 * indirect pointers. The undo/redo states described above are also
1506 * used here. Because an indirect block contains many pointers that
1507 * may have dependencies, a second copy of the entire in-memory indirect
1508 * block is kept. The buffer cache copy is always completely up-to-date.
1509 * The second copy, which is used only as a source for disk writes,
1510 * contains only the safe pointers (i.e., those that have no remaining
1511 * update dependencies). The second copy is freed when all pointers
1512 * are safe. The cache is not allowed to replace indirect blocks with
1513 * pending update dependencies. If a buffer containing an indirect
1514 * block with dependencies is written, these routines will mark it
1515 * dirty again. It can only be successfully written once all the
1516 * dependencies are removed. The ffs_fsync routine in conjunction with
1517 * softdep_sync_metadata work together to get all the dependencies
1518 * removed so that a file can be successfully written to disk. Three
1519 * procedures are used when setting up indirect block pointer
1520 * dependencies. The division is necessary because of the organization
1521 * of the "balloc" routine and because of the distinction between file
1522 * pages and file metadata blocks.
1523 */
1524
1525/*
1526 * Allocate a new allocindir structure.
1527 */
1528static struct allocindir *
1529newallocindir(ip, ptrno, newblkno, oldblkno)
1530 struct inode *ip; /* inode for file being extended */
1531 int ptrno; /* offset of pointer in indirect block */
1532 ufs_daddr_t newblkno; /* disk block number being added */
1533 ufs_daddr_t oldblkno; /* previous block number, 0 if none */
1534{
1535 struct allocindir *aip;
1536
1537 MALLOC(aip, struct allocindir *, sizeof(struct allocindir),
1538 M_ALLOCINDIR, M_SOFTDEP_FLAGS);
1539 bzero(aip, sizeof(struct allocindir));
1540 aip->ai_list.wk_type = D_ALLOCINDIR;
1541 aip->ai_state = ATTACHED;
1542 aip->ai_offset = ptrno;
1543 aip->ai_newblkno = newblkno;
1544 aip->ai_oldblkno = oldblkno;
1545 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize);
1546 return (aip);
1547}
1548
1549/*
1550 * Called just before setting an indirect block pointer
1551 * to a newly allocated file page.
1552 */
1553void
1554softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
1555 struct inode *ip; /* inode for file being extended */
1556 ufs_lbn_t lbn; /* allocated block number within file */
1557 struct buf *bp; /* buffer with indirect blk referencing page */
1558 int ptrno; /* offset of pointer in indirect block */
1559 ufs_daddr_t newblkno; /* disk block number being added */
1560 ufs_daddr_t oldblkno; /* previous block number, 0 if none */
1561 struct buf *nbp; /* buffer holding allocated page */
1562{
1563 struct allocindir *aip;
1564 struct pagedep *pagedep;
1565
1566 aip = newallocindir(ip, ptrno, newblkno, oldblkno);
1567 ACQUIRE_LOCK(&lk);
1568 /*
1569 * If we are allocating a directory page, then we must
1570 * allocate an associated pagedep to track additions and
1571 * deletions.
1572 */
1573 if ((ip->i_mode & IFMT) == IFDIR &&
1574 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
1575 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list);
1576 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list);
1577 FREE_LOCK(&lk);
1578 setup_allocindir_phase2(bp, ip, aip);
1579}
1580
1581/*
1582 * Called just before setting an indirect block pointer to a
1583 * newly allocated indirect block.
1584 */
1585void
1586softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
1587 struct buf *nbp; /* newly allocated indirect block */
1588 struct inode *ip; /* inode for file being extended */
1589 struct buf *bp; /* indirect block referencing allocated block */
1590 int ptrno; /* offset of pointer in indirect block */
1591 ufs_daddr_t newblkno; /* disk block number being added */
1592{
1593 struct allocindir *aip;
1594
1595 aip = newallocindir(ip, ptrno, newblkno, 0);
1596 ACQUIRE_LOCK(&lk);
1597 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list);
1598 FREE_LOCK(&lk);
1599 setup_allocindir_phase2(bp, ip, aip);
1600}
1601
1602/*
1603 * Called to finish the allocation of the "aip" allocated
1604 * by one of the two routines above.
1605 */
1606static void
1607setup_allocindir_phase2(bp, ip, aip)
1608 struct buf *bp; /* in-memory copy of the indirect block */
1609 struct inode *ip; /* inode for file being extended */
1610 struct allocindir *aip; /* allocindir allocated by the above routines */
1611{
1612 struct worklist *wk;
1613 struct indirdep *indirdep, *newindirdep;
1614 struct bmsafemap *bmsafemap;
1615 struct allocindir *oldaip;
1616 struct freefrag *freefrag;
1617 struct newblk *newblk;
1618
1619 if (bp->b_lblkno >= 0)
1620 panic("setup_allocindir_phase2: not indir blk");
1621 for (indirdep = NULL, newindirdep = NULL; ; ) {
1622 ACQUIRE_LOCK(&lk);
1623 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
1624 if (wk->wk_type != D_INDIRDEP)
1625 continue;
1626 indirdep = WK_INDIRDEP(wk);
1627 break;
1628 }
1629 if (indirdep == NULL && newindirdep) {
1630 indirdep = newindirdep;
1631 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
1632 newindirdep = NULL;
1633 }
1634 FREE_LOCK(&lk);
1635 if (indirdep) {
1636 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0,
1637 &newblk) == 0)
1638 panic("setup_allocindir: lost block");
1639 ACQUIRE_LOCK(&lk);
1640 if (newblk->nb_state == DEPCOMPLETE) {
1641 aip->ai_state |= DEPCOMPLETE;
1642 aip->ai_buf = NULL;
1643 } else {
1644 bmsafemap = newblk->nb_bmsafemap;
1645 aip->ai_buf = bmsafemap->sm_buf;
1646 LIST_REMOVE(newblk, nb_deps);
1647 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd,
1648 aip, ai_deps);
1649 }
1650 LIST_REMOVE(newblk, nb_hash);
1651 FREE(newblk, M_NEWBLK);
1652 aip->ai_indirdep = indirdep;
1653 /*
1654 * Check to see if there is an existing dependency
1655 * for this block. If there is, merge the old
1656 * dependency into the new one.
1657 */
1658 if (aip->ai_oldblkno == 0)
1659 oldaip = NULL;
1660 else
1661
1662 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next)
1663 if (oldaip->ai_offset == aip->ai_offset)
1664 break;
1665 if (oldaip != NULL) {
1666 if (oldaip->ai_newblkno != aip->ai_oldblkno) {
1667 FREE_LOCK(&lk);
1668 panic("setup_allocindir_phase2: blkno");
1669 }
1670 aip->ai_oldblkno = oldaip->ai_oldblkno;
1671 freefrag = oldaip->ai_freefrag;
1672 oldaip->ai_freefrag = aip->ai_freefrag;
1673 aip->ai_freefrag = freefrag;
1674 free_allocindir(oldaip, NULL);
1675 }
1676 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
1677 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)
1678 [aip->ai_offset] = aip->ai_oldblkno;
1679 FREE_LOCK(&lk);
1680 }
1681 if (newindirdep) {
1682 if (indirdep->ir_savebp != NULL)
1683 brelse(newindirdep->ir_savebp);
1684 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP);
1685 }
1686 if (indirdep)
1687 break;
1688 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep),
1689 M_INDIRDEP, M_SOFTDEP_FLAGS);
1690 newindirdep->ir_list.wk_type = D_INDIRDEP;
1691 newindirdep->ir_state = ATTACHED;
1692 LIST_INIT(&newindirdep->ir_deplisthd);
1693 LIST_INIT(&newindirdep->ir_donehd);
1694 if (bp->b_blkno == bp->b_lblkno) {
1695 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1696 NULL, NULL);
1697 }
1698 newindirdep->ir_savebp =
1699 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0);
1700 BUF_KERNPROC(newindirdep->ir_savebp);
1701 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
1702 }
1703}
1704
1705/*
1706 * Block de-allocation dependencies.
1707 *
1708 * When blocks are de-allocated, the on-disk pointers must be nullified before
1709 * the blocks are made available for use by other files. (The true
1710 * requirement is that old pointers must be nullified before new on-disk
1711 * pointers are set. We chose this slightly more stringent requirement to
1712 * reduce complexity.) Our implementation handles this dependency by updating
1713 * the inode (or indirect block) appropriately but delaying the actual block
1714 * de-allocation (i.e., freemap and free space count manipulation) until
1715 * after the updated versions reach stable storage. After the disk is
1716 * updated, the blocks can be safely de-allocated whenever it is convenient.
1717 * This implementation handles only the common case of reducing a file's
1718 * length to zero. Other cases are handled by the conventional synchronous
1719 * write approach.
1720 *
1721 * The ffs implementation with which we worked double-checks
1722 * the state of the block pointers and file size as it reduces
1723 * a file's length. Some of this code is replicated here in our
1724 * soft updates implementation. The freeblks->fb_chkcnt field is
1725 * used to transfer a part of this information to the procedure
1726 * that eventually de-allocates the blocks.
1727 *
1728 * This routine should be called from the routine that shortens
1729 * a file's length, before the inode's size or block pointers
1730 * are modified. It will save the block pointer information for
1731 * later release and zero the inode so that the calling routine
1732 * can release it.
1733 */
1734void
1735softdep_setup_freeblocks(ip, length)
1736 struct inode *ip; /* The inode whose length is to be reduced */
1737 off_t length; /* The new length for the file */
1738{
1739 struct freeblks *freeblks;
1740 struct inodedep *inodedep;
1741 struct allocdirect *adp;
1742 struct vnode *vp;
1743 struct buf *bp;
1744 struct fs *fs;
1745 int i, error, delay;
1746
1747 fs = ip->i_fs;
1748 if (length != 0)
1749 panic("softde_setup_freeblocks: non-zero length");
1750 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks),
1751 M_FREEBLKS, M_SOFTDEP_FLAGS);
1752 bzero(freeblks, sizeof(struct freeblks));
1753 freeblks->fb_list.wk_type = D_FREEBLKS;
1754 freeblks->fb_uid = ip->i_uid;
1755 freeblks->fb_previousinum = ip->i_number;
1756 freeblks->fb_devvp = ip->i_devvp;
1757 freeblks->fb_fs = fs;
1758 freeblks->fb_oldsize = ip->i_size;
1759 freeblks->fb_newsize = length;
1760 freeblks->fb_chkcnt = ip->i_blocks;
1761 for (i = 0; i < NDADDR; i++) {
1762 freeblks->fb_dblks[i] = ip->i_db[i];
1763 ip->i_db[i] = 0;
1764 }
1765 for (i = 0; i < NIADDR; i++) {
1766 freeblks->fb_iblks[i] = ip->i_ib[i];
1767 ip->i_ib[i] = 0;
1768 }
1769 ip->i_blocks = 0;
1770 ip->i_size = 0;
1771 /*
1772 * Push the zero'ed inode to to its disk buffer so that we are free
1773 * to delete its dependencies below. Once the dependencies are gone
1774 * the buffer can be safely released.
1775 */
1776 if ((error = bread(ip->i_devvp,
1777 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
1778 (int)fs->fs_bsize, NOCRED, &bp)) != 0)
1779 softdep_error("softdep_setup_freeblocks", error);
1780 *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) =
1781 ip->i_din;
1782 /*
1783 * Find and eliminate any inode dependencies.
1784 */
1785 ACQUIRE_LOCK(&lk);
1786 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep);
1787 if ((inodedep->id_state & IOSTARTED) != 0) {
1788 FREE_LOCK(&lk);
1789 panic("softdep_setup_freeblocks: inode busy");
1790 }
1791 /*
1792 * Add the freeblks structure to the list of operations that
1793 * must await the zero'ed inode being written to disk. If we
1794 * still have a bitmap dependency (delay == 0), then the inode
1795 * has never been written to disk, so we can process the
1796 * freeblks below once we have deleted the dependencies.
1797 */
1798 delay = (inodedep->id_state & DEPCOMPLETE);
1799 if (delay)
1800 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list);
1801 /*
1802 * Because the file length has been truncated to zero, any
1803 * pending block allocation dependency structures associated
1804 * with this inode are obsolete and can simply be de-allocated.
1805 * We must first merge the two dependency lists to get rid of
1806 * any duplicate freefrag structures, then purge the merged list.
1807 */
1808 merge_inode_lists(inodedep);
1809 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
1810 free_allocdirect(&inodedep->id_inoupdt, adp, 1);
1811 FREE_LOCK(&lk);
1812 bdwrite(bp);
1813 /*
1814 * We must wait for any I/O in progress to finish so that
1815 * all potential buffers on the dirty list will be visible.
1816 * Once they are all there, walk the list and get rid of
1817 * any dependencies.
1818 */
1819 vp = ITOV(ip);
1820 ACQUIRE_LOCK(&lk);
1821 drain_output(vp, 1);
1822 while (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT)) {
1823 bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
1824 (void) inodedep_lookup(fs, ip->i_number, 0, &inodedep);
1825 deallocate_dependencies(bp, inodedep);
1826 bp->b_flags |= B_INVAL | B_NOCACHE;
1827 FREE_LOCK(&lk);
1828 brelse(bp);
1829 ACQUIRE_LOCK(&lk);
1830 }
1831 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0)
1832 (void)free_inodedep(inodedep);
1833 FREE_LOCK(&lk);
1834 /*
1835 * If the inode has never been written to disk (delay == 0),
1836 * then we can process the freeblks now that we have deleted
1837 * the dependencies.
1838 */
1839 if (!delay)
1840 handle_workitem_freeblocks(freeblks);
1841}
1842
1843/*
1844 * Reclaim any dependency structures from a buffer that is about to
1845 * be reallocated to a new vnode. The buffer must be locked, thus,
1846 * no I/O completion operations can occur while we are manipulating
1847 * its associated dependencies. The mutex is held so that other I/O's
1848 * associated with related dependencies do not occur.
1849 */
1850static void
1851deallocate_dependencies(bp, inodedep)
1852 struct buf *bp;
1853 struct inodedep *inodedep;
1854{
1855 struct worklist *wk;
1856 struct indirdep *indirdep;
1857 struct allocindir *aip;
1858 struct pagedep *pagedep;
1859 struct dirrem *dirrem;
1860 struct diradd *dap;
1861 int i;
1862
1863 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
1864 switch (wk->wk_type) {
1865
1866 case D_INDIRDEP:
1867 indirdep = WK_INDIRDEP(wk);
1868 /*
1869 * None of the indirect pointers will ever be visible,
1870 * so they can simply be tossed. GOINGAWAY ensures
1871 * that allocated pointers will be saved in the buffer
1872 * cache until they are freed. Note that they will
1873 * only be able to be found by their physical address
1874 * since the inode mapping the logical address will
1875 * be gone. The save buffer used for the safe copy
1876 * was allocated in setup_allocindir_phase2 using
1877 * the physical address so it could be used for this
1878 * purpose. Hence we swap the safe copy with the real
1879 * copy, allowing the safe copy to be freed and holding
1880 * on to the real copy for later use in indir_trunc.
1881 */
1882 if (indirdep->ir_state & GOINGAWAY) {
1883 FREE_LOCK(&lk);
1884 panic("deallocate_dependencies: already gone");
1885 }
1886 indirdep->ir_state |= GOINGAWAY;
1887 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
1888 free_allocindir(aip, inodedep);
1889 if (bp->b_lblkno >= 0 ||
1890 bp->b_blkno != indirdep->ir_savebp->b_lblkno) {
1891 FREE_LOCK(&lk);
1892 panic("deallocate_dependencies: not indir");
1893 }
1894 bcopy(bp->b_data, indirdep->ir_savebp->b_data,
1895 bp->b_bcount);
1896 WORKLIST_REMOVE(wk);
1897 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk);
1898 continue;
1899
1900 case D_PAGEDEP:
1901 pagedep = WK_PAGEDEP(wk);
1902 /*
1903 * None of the directory additions will ever be
1904 * visible, so they can simply be tossed.
1905 */
1906 for (i = 0; i < DAHASHSZ; i++)
1907 while ((dap =
1908 LIST_FIRST(&pagedep->pd_diraddhd[i])))
1909 free_diradd(dap);
1910 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0)
1911 free_diradd(dap);
1912 /*
1913 * Copy any directory remove dependencies to the list
1914 * to be processed after the zero'ed inode is written.
1915 * If the inode has already been written, then they
1916 * can be dumped directly onto the work list.
1917 */
1918 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
1919 LIST_REMOVE(dirrem, dm_next);
1920 dirrem->dm_dirinum = pagedep->pd_ino;
1921 if (inodedep == NULL ||
1922 (inodedep->id_state & ALLCOMPLETE) ==
1923 ALLCOMPLETE)
1924 add_to_worklist(&dirrem->dm_list);
1925 else
1926 WORKLIST_INSERT(&inodedep->id_bufwait,
1927 &dirrem->dm_list);
1928 }
1929 WORKLIST_REMOVE(&pagedep->pd_list);
1930 LIST_REMOVE(pagedep, pd_hash);
1931 WORKITEM_FREE(pagedep, D_PAGEDEP);
1932 continue;
1933
1934 case D_ALLOCINDIR:
1935 free_allocindir(WK_ALLOCINDIR(wk), inodedep);
1936 continue;
1937
1938 case D_ALLOCDIRECT:
1939 case D_INODEDEP:
1940 FREE_LOCK(&lk);
1941 panic("deallocate_dependencies: Unexpected type %s",
1942 TYPENAME(wk->wk_type));
1943 /* NOTREACHED */
1944
1945 default:
1946 FREE_LOCK(&lk);
1947 panic("deallocate_dependencies: Unknown type %s",
1948 TYPENAME(wk->wk_type));
1949 /* NOTREACHED */
1950 }
1951 }
1952}
1953
1954/*
1955 * Free an allocdirect. Generate a new freefrag work request if appropriate.
1956 * This routine must be called with splbio interrupts blocked.
1957 */
1958static void
1959free_allocdirect(adphead, adp, delay)
1960 struct allocdirectlst *adphead;
1961 struct allocdirect *adp;
1962 int delay;
1963{
1964
1965#ifdef DEBUG
1966 if (lk.lkt_held == -1)
1967 panic("free_allocdirect: lock not held");
1968#endif
1969 if ((adp->ad_state & DEPCOMPLETE) == 0)
1970 LIST_REMOVE(adp, ad_deps);
1971 TAILQ_REMOVE(adphead, adp, ad_next);
1972 if ((adp->ad_state & COMPLETE) == 0)
1973 WORKLIST_REMOVE(&adp->ad_list);
1974 if (adp->ad_freefrag != NULL) {
1975 if (delay)
1976 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait,
1977 &adp->ad_freefrag->ff_list);
1978 else
1979 add_to_worklist(&adp->ad_freefrag->ff_list);
1980 }
1981 WORKITEM_FREE(adp, D_ALLOCDIRECT);
1982}
1983
1984/*
1985 * Prepare an inode to be freed. The actual free operation is not
1986 * done until the zero'ed inode has been written to disk.
1987 */
1988void
1989softdep_freefile(pvp, ino, mode)
1990 struct vnode *pvp;
1991 ino_t ino;
1992 int mode;
1993{
1994 struct inode *ip = VTOI(pvp);
1995 struct inodedep *inodedep;
1996 struct freefile *freefile;
1997
1998 /*
1999 * This sets up the inode de-allocation dependency.
2000 */
2001 MALLOC(freefile, struct freefile *, sizeof(struct freefile),
2002 M_FREEFILE, M_SOFTDEP_FLAGS);
2003 freefile->fx_list.wk_type = D_FREEFILE;
2004 freefile->fx_list.wk_state = 0;
2005 freefile->fx_mode = mode;
2006 freefile->fx_oldinum = ino;
2007 freefile->fx_devvp = ip->i_devvp;
2008 freefile->fx_fs = ip->i_fs;
2009
2010 /*
2011 * If the inodedep does not exist, then the zero'ed inode has
2012 * been written to disk. If the allocated inode has never been
2013 * written to disk, then the on-disk inode is zero'ed. In either
2014 * case we can free the file immediately.
2015 */
2016 ACQUIRE_LOCK(&lk);
2017 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 ||
2018 check_inode_unwritten(inodedep)) {
2019 FREE_LOCK(&lk);
2020 handle_workitem_freefile(freefile);
2021 return;
2022 }
2023 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
2024 FREE_LOCK(&lk);
2025}
2026
2027/*
2028 * Check to see if an inode has never been written to disk. If
2029 * so free the inodedep and return success, otherwise return failure.
2030 * This routine must be called with splbio interrupts blocked.
2031 *
2032 * If we still have a bitmap dependency, then the inode has never
2033 * been written to disk. Drop the dependency as it is no longer
2034 * necessary since the inode is being deallocated. We set the
2035 * ALLCOMPLETE flags since the bitmap now properly shows that the
2036 * inode is not allocated. Even if the inode is actively being
2037 * written, it has been rolled back to its zero'ed state, so we
2038 * are ensured that a zero inode is what is on the disk. For short
2039 * lived files, this change will usually result in removing all the
2040 * dependencies from the inode so that it can be freed immediately.
2041 */
2042static int
2043check_inode_unwritten(inodedep)
2044 struct inodedep *inodedep;
2045{
2046
2047 if ((inodedep->id_state & DEPCOMPLETE) != 0 ||
2048 LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2049 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2050 LIST_FIRST(&inodedep->id_inowait) != NULL ||
2051 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2052 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2053 inodedep->id_nlinkdelta != 0)
2054 return (0);
2055 inodedep->id_state |= ALLCOMPLETE;
2056 LIST_REMOVE(inodedep, id_deps);
2057 inodedep->id_buf = NULL;
2058 if (inodedep->id_state & ONWORKLIST)
2059 WORKLIST_REMOVE(&inodedep->id_list);
2060 if (inodedep->id_savedino != NULL) {
2061 FREE(inodedep->id_savedino, M_INODEDEP);
2062 inodedep->id_savedino = NULL;
2063 }
2064 if (free_inodedep(inodedep) == 0) {
2065 FREE_LOCK(&lk);
2066 panic("check_inode_unwritten: busy inode");
2067 }
2068 return (1);
2069}
2070
2071/*
2072 * Try to free an inodedep structure. Return 1 if it could be freed.
2073 */
2074static int
2075free_inodedep(inodedep)
2076 struct inodedep *inodedep;
2077{
2078
2079 if ((inodedep->id_state & ONWORKLIST) != 0 ||
2080 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
2081 LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2082 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2083 LIST_FIRST(&inodedep->id_inowait) != NULL ||
2084 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2085 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2086 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL)
2087 return (0);
2088 LIST_REMOVE(inodedep, id_hash);
2089 WORKITEM_FREE(inodedep, D_INODEDEP);
2090 num_inodedep -= 1;
2091 return (1);
2092}
2093
2094/*
2095 * This workitem routine performs the block de-allocation.
2096 * The workitem is added to the pending list after the updated
2097 * inode block has been written to disk. As mentioned above,
2098 * checks regarding the number of blocks de-allocated (compared
2099 * to the number of blocks allocated for the file) are also
2100 * performed in this function.
2101 */
2102static void
2103handle_workitem_freeblocks(freeblks)
2104 struct freeblks *freeblks;
2105{
2106 struct inode tip;
2107 ufs_daddr_t bn;
2108 struct fs *fs;
2109 int i, level, bsize;
2110 long nblocks, blocksreleased = 0;
2111 int error, allerror = 0;
2112 ufs_lbn_t baselbns[NIADDR], tmpval;
2113
2114 tip.i_number = freeblks->fb_previousinum;
2115 tip.i_devvp = freeblks->fb_devvp;
2116 tip.i_dev = freeblks->fb_devvp->v_rdev;
2117 tip.i_fs = freeblks->fb_fs;
2118 tip.i_size = freeblks->fb_oldsize;
2119 tip.i_uid = freeblks->fb_uid;
2120 fs = freeblks->fb_fs;
2121 tmpval = 1;
2122 baselbns[0] = NDADDR;
2123 for (i = 1; i < NIADDR; i++) {
2124 tmpval *= NINDIR(fs);
2125 baselbns[i] = baselbns[i - 1] + tmpval;
2126 }
2127 nblocks = btodb(fs->fs_bsize);
2128 blocksreleased = 0;
2129 /*
2130 * Indirect blocks first.
2131 */
2132 for (level = (NIADDR - 1); level >= 0; level--) {
2133 if ((bn = freeblks->fb_iblks[level]) == 0)
2134 continue;
2135 if ((error = indir_trunc(&tip, fsbtodb(fs, bn), level,
2136 baselbns[level], &blocksreleased)) == 0)
2137 allerror = error;
2138 ffs_blkfree(&tip, bn, fs->fs_bsize);
2139 blocksreleased += nblocks;
2140 }
2141 /*
2142 * All direct blocks or frags.
2143 */
2144 for (i = (NDADDR - 1); i >= 0; i--) {
2145 if ((bn = freeblks->fb_dblks[i]) == 0)
2146 continue;
2147 bsize = blksize(fs, &tip, i);
2148 ffs_blkfree(&tip, bn, bsize);
2149 blocksreleased += btodb(bsize);
2150 }
2151
2152#ifdef DIAGNOSTIC
2153 if (freeblks->fb_chkcnt != blocksreleased)
2154 printf("handle_workitem_freeblocks: block count\n");
2155 if (allerror)
2156 softdep_error("handle_workitem_freeblks", allerror);
2157#endif /* DIAGNOSTIC */
2158 WORKITEM_FREE(freeblks, D_FREEBLKS);
2159}
2160
2161/*
2162 * Release blocks associated with the inode ip and stored in the indirect
2163 * block dbn. If level is greater than SINGLE, the block is an indirect block
2164 * and recursive calls to indirtrunc must be used to cleanse other indirect
2165 * blocks.
2166 */
2167static int
2168indir_trunc(ip, dbn, level, lbn, countp)
2169 struct inode *ip;
2170 ufs_daddr_t dbn;
2171 int level;
2172 ufs_lbn_t lbn;
2173 long *countp;
2174{
2175 struct buf *bp;
2176 ufs_daddr_t *bap;
2177 ufs_daddr_t nb;
2178 struct fs *fs;
2179 struct worklist *wk;
2180 struct indirdep *indirdep;
2181 int i, lbnadd, nblocks;
2182 int error, allerror = 0;
2183
2184 fs = ip->i_fs;
2185 lbnadd = 1;
2186 for (i = level; i > 0; i--)
2187 lbnadd *= NINDIR(fs);
2188 /*
2189 * Get buffer of block pointers to be freed. This routine is not
2190 * called until the zero'ed inode has been written, so it is safe
2191 * to free blocks as they are encountered. Because the inode has
2192 * been zero'ed, calls to bmap on these blocks will fail. So, we
2193 * have to use the on-disk address and the block device for the
2194 * filesystem to look them up. If the file was deleted before its
2195 * indirect blocks were all written to disk, the routine that set
2196 * us up (deallocate_dependencies) will have arranged to leave
2197 * a complete copy of the indirect block in memory for our use.
2198 * Otherwise we have to read the blocks in from the disk.
2199 */
2200 ACQUIRE_LOCK(&lk);
2201 if ((bp = incore(ip->i_devvp, dbn)) != NULL &&
2202 (wk = LIST_FIRST(&bp->b_dep)) != NULL) {
2203 if (wk->wk_type != D_INDIRDEP ||
2204 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp ||
2205 (indirdep->ir_state & GOINGAWAY) == 0) {
2206 FREE_LOCK(&lk);
2207 panic("indir_trunc: lost indirdep");
2208 }
2209 WORKLIST_REMOVE(wk);
2210 WORKITEM_FREE(indirdep, D_INDIRDEP);
2211 if (LIST_FIRST(&bp->b_dep) != NULL) {
2212 FREE_LOCK(&lk);
2213 panic("indir_trunc: dangling dep");
2214 }
2215 FREE_LOCK(&lk);
2216 } else {
2217 FREE_LOCK(&lk);
2218 error = bread(ip->i_devvp, dbn, (int)fs->fs_bsize, NOCRED, &bp);
2219 if (error)
2220 return (error);
2221 }
2222 /*
2223 * Recursively free indirect blocks.
2224 */
2225 bap = (ufs_daddr_t *)bp->b_data;
2226 nblocks = btodb(fs->fs_bsize);
2227 for (i = NINDIR(fs) - 1; i >= 0; i--) {
2228 if ((nb = bap[i]) == 0)
2229 continue;
2230 if (level != 0) {
2231 if ((error = indir_trunc(ip, fsbtodb(fs, nb),
2232 level - 1, lbn + (i * lbnadd), countp)) != 0)
2233 allerror = error;
2234 }
2235 ffs_blkfree(ip, nb, fs->fs_bsize);
2236 *countp += nblocks;
2237 }
2238 bp->b_flags |= B_INVAL | B_NOCACHE;
2239 brelse(bp);
2240 return (allerror);
2241}
2242
2243/*
2244 * Free an allocindir.
2245 * This routine must be called with splbio interrupts blocked.
2246 */
2247static void
2248free_allocindir(aip, inodedep)
2249 struct allocindir *aip;
2250 struct inodedep *inodedep;
2251{
2252 struct freefrag *freefrag;
2253
2254#ifdef DEBUG
2255 if (lk.lkt_held == -1)
2256 panic("free_allocindir: lock not held");
2257#endif
2258 if ((aip->ai_state & DEPCOMPLETE) == 0)
2259 LIST_REMOVE(aip, ai_deps);
2260 if (aip->ai_state & ONWORKLIST)
2261 WORKLIST_REMOVE(&aip->ai_list);
2262 LIST_REMOVE(aip, ai_next);
2263 if ((freefrag = aip->ai_freefrag) != NULL) {
2264 if (inodedep == NULL)
2265 add_to_worklist(&freefrag->ff_list);
2266 else
2267 WORKLIST_INSERT(&inodedep->id_bufwait,
2268 &freefrag->ff_list);
2269 }
2270 WORKITEM_FREE(aip, D_ALLOCINDIR);
2271}
2272
2273/*
2274 * Directory entry addition dependencies.
2275 *
2276 * When adding a new directory entry, the inode (with its incremented link
2277 * count) must be written to disk before the directory entry's pointer to it.
2278 * Also, if the inode is newly allocated, the corresponding freemap must be
2279 * updated (on disk) before the directory entry's pointer. These requirements
2280 * are met via undo/redo on the directory entry's pointer, which consists
2281 * simply of the inode number.
2282 *
2283 * As directory entries are added and deleted, the free space within a
2284 * directory block can become fragmented. The ufs file system will compact
2285 * a fragmented directory block to make space for a new entry. When this
2286 * occurs, the offsets of previously added entries change. Any "diradd"
2287 * dependency structures corresponding to these entries must be updated with
2288 * the new offsets.
2289 */
2290
2291/*
2292 * This routine is called after the in-memory inode's link
2293 * count has been incremented, but before the directory entry's
2294 * pointer to the inode has been set.
2295 */
2296void
2297softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp)
2298 struct buf *bp; /* buffer containing directory block */
2299 struct inode *dp; /* inode for directory */
2300 off_t diroffset; /* offset of new entry in directory */
2301 long newinum; /* inode referenced by new directory entry */
2302 struct buf *newdirbp; /* non-NULL => contents of new mkdir */
2303{
2304 int offset; /* offset of new entry within directory block */
2305 ufs_lbn_t lbn; /* block in directory containing new entry */
2306 struct fs *fs;
2307 struct diradd *dap;
2308 struct pagedep *pagedep;
2309 struct inodedep *inodedep;
2310 struct mkdir *mkdir1, *mkdir2;
2311
2312 /*
2313 * Whiteouts have no dependencies.
2314 */
2315 if (newinum == WINO) {
2316 if (newdirbp != NULL)
2317 bdwrite(newdirbp);
2318 return;
2319 }
2320
2321 fs = dp->i_fs;
2322 lbn = lblkno(fs, diroffset);
2323 offset = blkoff(fs, diroffset);
2324 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD,
2325 M_SOFTDEP_FLAGS);
2326 bzero(dap, sizeof(struct diradd));
2327 dap->da_list.wk_type = D_DIRADD;
2328 dap->da_offset = offset;
2329 dap->da_newinum = newinum;
2330 dap->da_state = ATTACHED;
2331 if (newdirbp == NULL) {
2332 dap->da_state |= DEPCOMPLETE;
2333 ACQUIRE_LOCK(&lk);
2334 } else {
2335 dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
2336 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR,
2337 M_SOFTDEP_FLAGS);
2338 mkdir1->md_list.wk_type = D_MKDIR;
2339 mkdir1->md_state = MKDIR_BODY;
2340 mkdir1->md_diradd = dap;
2341 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR,
2342 M_SOFTDEP_FLAGS);
2343 mkdir2->md_list.wk_type = D_MKDIR;
2344 mkdir2->md_state = MKDIR_PARENT;
2345 mkdir2->md_diradd = dap;
2346 /*
2347 * Dependency on "." and ".." being written to disk.
2348 */
2349 mkdir1->md_buf = newdirbp;
2350 ACQUIRE_LOCK(&lk);
2351 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
2352 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list);
2353 FREE_LOCK(&lk);
2354 bdwrite(newdirbp);
2355 /*
2356 * Dependency on link count increase for parent directory
2357 */
2358 ACQUIRE_LOCK(&lk);
2359 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0
2360 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
2361 dap->da_state &= ~MKDIR_PARENT;
2362 WORKITEM_FREE(mkdir2, D_MKDIR);
2363 } else {
2364 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
2365 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list);
2366 }
2367 }
2368 /*
2369 * Link into parent directory pagedep to await its being written.
2370 */
2371 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
2372 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2373 dap->da_pagedep = pagedep;
2374 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
2375 da_pdlist);
2376 /*
2377 * Link into its inodedep. Put it on the id_bufwait list if the inode
2378 * is not yet written. If it is written, do the post-inode write
2379 * processing to put it on the id_pendinghd list.
2380 */
2381 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep);
2382 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
2383 diradd_inode_written(dap, inodedep);
2384 else
2385 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
2386 FREE_LOCK(&lk);
2387}
2388
2389/*
2390 * This procedure is called to change the offset of a directory
2391 * entry when compacting a directory block which must be owned
2392 * exclusively by the caller. Note that the actual entry movement
2393 * must be done in this procedure to ensure that no I/O completions
2394 * occur while the move is in progress.
2395 */
2396void
2397softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize)
2398 struct inode *dp; /* inode for directory */
2399 caddr_t base; /* address of dp->i_offset */
2400 caddr_t oldloc; /* address of old directory location */
2401 caddr_t newloc; /* address of new directory location */
2402 int entrysize; /* size of directory entry */
2403{
2404 int offset, oldoffset, newoffset;
2405 struct pagedep *pagedep;
2406 struct diradd *dap;
2407 ufs_lbn_t lbn;
2408
2409 ACQUIRE_LOCK(&lk);
2410 lbn = lblkno(dp->i_fs, dp->i_offset);
2411 offset = blkoff(dp->i_fs, dp->i_offset);
2412 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0)
2413 goto done;
2414 oldoffset = offset + (oldloc - base);
2415 newoffset = offset + (newloc - base);
2416
2417 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) {
2418 if (dap->da_offset != oldoffset)
2419 continue;
2420 dap->da_offset = newoffset;
2421 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset))
2422 break;
2423 LIST_REMOVE(dap, da_pdlist);
2424 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)],
2425 dap, da_pdlist);
2426 break;
2427 }
2428 if (dap == NULL) {
2429
2430 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) {
2431 if (dap->da_offset == oldoffset) {
2432 dap->da_offset = newoffset;
2433 break;
2434 }
2435 }
2436 }
2437done:
2438 bcopy(oldloc, newloc, entrysize);
2439 FREE_LOCK(&lk);
2440}
2441
2442/*
2443 * Free a diradd dependency structure. This routine must be called
2444 * with splbio interrupts blocked.
2445 */
2446static void
2447free_diradd(dap)
2448 struct diradd *dap;
2449{
2450 struct dirrem *dirrem;
2451 struct pagedep *pagedep;
2452 struct inodedep *inodedep;
2453 struct mkdir *mkdir, *nextmd;
2454
2455#ifdef DEBUG
2456 if (lk.lkt_held == -1)
2457 panic("free_diradd: lock not held");
2458#endif
2459 WORKLIST_REMOVE(&dap->da_list);
2460 LIST_REMOVE(dap, da_pdlist);
2461 if ((dap->da_state & DIRCHG) == 0) {
2462 pagedep = dap->da_pagedep;
2463 } else {
2464 dirrem = dap->da_previous;
2465 pagedep = dirrem->dm_pagedep;
2466 dirrem->dm_dirinum = pagedep->pd_ino;
2467 add_to_worklist(&dirrem->dm_list);
2468 }
2469 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum,
2470 0, &inodedep) != 0)
2471 (void) free_inodedep(inodedep);
2472 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
2473 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
2474 nextmd = LIST_NEXT(mkdir, md_mkdirs);
2475 if (mkdir->md_diradd != dap)
2476 continue;
2477 dap->da_state &= ~mkdir->md_state;
2478 WORKLIST_REMOVE(&mkdir->md_list);
2479 LIST_REMOVE(mkdir, md_mkdirs);
2480 WORKITEM_FREE(mkdir, D_MKDIR);
2481 }
2482 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
2483 FREE_LOCK(&lk);
2484 panic("free_diradd: unfound ref");
2485 }
2486 }
2487 WORKITEM_FREE(dap, D_DIRADD);
2488}
2489
2490/*
2491 * Directory entry removal dependencies.
2492 *
2493 * When removing a directory entry, the entry's inode pointer must be
2494 * zero'ed on disk before the corresponding inode's link count is decremented
2495 * (possibly freeing the inode for re-use). This dependency is handled by
2496 * updating the directory entry but delaying the inode count reduction until
2497 * after the directory block has been written to disk. After this point, the
2498 * inode count can be decremented whenever it is convenient.
2499 */
2500
2501/*
2502 * This routine should be called immediately after removing
2503 * a directory entry. The inode's link count should not be
2504 * decremented by the calling procedure -- the soft updates
2505 * code will do this task when it is safe.
2506 */
2507void
2508softdep_setup_remove(bp, dp, ip, isrmdir)
2509 struct buf *bp; /* buffer containing directory block */
2510 struct inode *dp; /* inode for the directory being modified */
2511 struct inode *ip; /* inode for directory entry being removed */
2512 int isrmdir; /* indicates if doing RMDIR */
2513{
2514 struct dirrem *dirrem, *prevdirrem;
2515
2516 /*
2517 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.
2518 */
2519 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
2520
2521 /*
2522 * If the COMPLETE flag is clear, then there were no active
2523 * entries and we want to roll back to a zeroed entry until
2524 * the new inode is committed to disk. If the COMPLETE flag is
2525 * set then we have deleted an entry that never made it to
2526 * disk. If the entry we deleted resulted from a name change,
2527 * then the old name still resides on disk. We cannot delete
2528 * its inode (returned to us in prevdirrem) until the zeroed
2529 * directory entry gets to disk. The new inode has never been
2530 * referenced on the disk, so can be deleted immediately.
2531 */
2532 if ((dirrem->dm_state & COMPLETE) == 0) {
2533 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
2534 dm_next);
2535 FREE_LOCK(&lk);
2536 } else {
2537 if (prevdirrem != NULL)
2538 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
2539 prevdirrem, dm_next);
2540 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
2541 FREE_LOCK(&lk);
2542 handle_workitem_remove(dirrem);
2543 }
2544}
2545
2546/*
2547 * Allocate a new dirrem if appropriate and return it along with
2548 * its associated pagedep. Called without a lock, returns with lock.
2549 */
2550static long num_dirrem; /* number of dirrem allocated */
2551static struct dirrem *
2552newdirrem(bp, dp, ip, isrmdir, prevdirremp)
2553 struct buf *bp; /* buffer containing directory block */
2554 struct inode *dp; /* inode for the directory being modified */
2555 struct inode *ip; /* inode for directory entry being removed */
2556 int isrmdir; /* indicates if doing RMDIR */
2557 struct dirrem **prevdirremp; /* previously referenced inode, if any */
2558{
2559 int offset;
2560 ufs_lbn_t lbn;
2561 struct diradd *dap;
2562 struct dirrem *dirrem;
2563 struct pagedep *pagedep;
2564
2565 /*
2566 * Whiteouts have no deletion dependencies.
2567 */
2568 if (ip == NULL)
2569 panic("newdirrem: whiteout");
2570 /*
2571 * If we are over our limit, try to improve the situation.
2572 * Limiting the number of dirrem structures will also limit
2573 * the number of freefile and freeblks structures.
2574 */
2575 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0)
2576 (void) request_cleanup(FLUSH_REMOVE, 0);
2577 num_dirrem += 1;
2578 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem),
2579 M_DIRREM, M_SOFTDEP_FLAGS);
2580 bzero(dirrem, sizeof(struct dirrem));
2581 dirrem->dm_list.wk_type = D_DIRREM;
2582 dirrem->dm_state = isrmdir ? RMDIR : 0;
2583 dirrem->dm_mnt = ITOV(ip)->v_mount;
2584 dirrem->dm_oldinum = ip->i_number;
2585 *prevdirremp = NULL;
2586
2587 ACQUIRE_LOCK(&lk);
2588 lbn = lblkno(dp->i_fs, dp->i_offset);
2589 offset = blkoff(dp->i_fs, dp->i_offset);
2590 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
2591 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2592 dirrem->dm_pagedep = pagedep;
2593 /*
2594 * Check for a diradd dependency for the same directory entry.
2595 * If present, then both dependencies become obsolete and can
2596 * be de-allocated. Check for an entry on both the pd_dirraddhd
2597 * list and the pd_pendinghd list.
2598 */
2599
2600 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
2601 if (dap->da_offset == offset)
2602 break;
2603 if (dap == NULL) {
2604
2605 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
2606 if (dap->da_offset == offset)
2607 break;
2608 if (dap == NULL)
2609 return (dirrem);
2610 }
2611 /*
2612 * Must be ATTACHED at this point.
2613 */
2614 if ((dap->da_state & ATTACHED) == 0) {
2615 FREE_LOCK(&lk);
2616 panic("newdirrem: not ATTACHED");
2617 }
2618 if (dap->da_newinum != ip->i_number) {
2619 FREE_LOCK(&lk);
2620 panic("newdirrem: inum %d should be %d",
2621 ip->i_number, dap->da_newinum);
2622 }
2623 /*
2624 * If we are deleting a changed name that never made it to disk,
2625 * then return the dirrem describing the previous inode (which
2626 * represents the inode currently referenced from this entry on disk).
2627 */
2628 if ((dap->da_state & DIRCHG) != 0) {
2629 *prevdirremp = dap->da_previous;
2630 dap->da_state &= ~DIRCHG;
2631 dap->da_pagedep = pagedep;
2632 }
2633 /*
2634 * We are deleting an entry that never made it to disk.
2635 * Mark it COMPLETE so we can delete its inode immediately.
2636 */
2637 dirrem->dm_state |= COMPLETE;
2638 free_diradd(dap);
2639 return (dirrem);
2640}
2641
2642/*
2643 * Directory entry change dependencies.
2644 *
2645 * Changing an existing directory entry requires that an add operation
2646 * be completed first followed by a deletion. The semantics for the addition
2647 * are identical to the description of adding a new entry above except
2648 * that the rollback is to the old inode number rather than zero. Once
2649 * the addition dependency is completed, the removal is done as described
2650 * in the removal routine above.
2651 */
2652
2653/*
2654 * This routine should be called immediately after changing
2655 * a directory entry. The inode's link count should not be
2656 * decremented by the calling procedure -- the soft updates
2657 * code will perform this task when it is safe.
2658 */
2659void
2660softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
2661 struct buf *bp; /* buffer containing directory block */
2662 struct inode *dp; /* inode for the directory being modified */
2663 struct inode *ip; /* inode for directory entry being removed */
2664 long newinum; /* new inode number for changed entry */
2665 int isrmdir; /* indicates if doing RMDIR */
2666{
2667 int offset;
2668 struct diradd *dap = NULL;
2669 struct dirrem *dirrem, *prevdirrem;
2670 struct pagedep *pagedep;
2671 struct inodedep *inodedep;
2672
2673 offset = blkoff(dp->i_fs, dp->i_offset);
2674
2675 /*
2676 * Whiteouts do not need diradd dependencies.
2677 */
2678 if (newinum != WINO) {
2679 MALLOC(dap, struct diradd *, sizeof(struct diradd),
2680 M_DIRADD, M_SOFTDEP_FLAGS);
2681 bzero(dap, sizeof(struct diradd));
2682 dap->da_list.wk_type = D_DIRADD;
2683 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
2684 dap->da_offset = offset;
2685 dap->da_newinum = newinum;
2686 }
2687
2688 /*
2689 * Allocate a new dirrem and ACQUIRE_LOCK.
2690 */
2691 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
2692 pagedep = dirrem->dm_pagedep;
2693 /*
2694 * The possible values for isrmdir:
2695 * 0 - non-directory file rename
2696 * 1 - directory rename within same directory
2697 * inum - directory rename to new directory of given inode number
2698 * When renaming to a new directory, we are both deleting and
2699 * creating a new directory entry, so the link count on the new
2700 * directory should not change. Thus we do not need the followup
2701 * dirrem which is usually done in handle_workitem_remove. We set
2702 * the DIRCHG flag to tell handle_workitem_remove to skip the
2703 * followup dirrem.
2704 */
2705 if (isrmdir > 1)
2706 dirrem->dm_state |= DIRCHG;
2707
2708 /*
2709 * Whiteouts have no additional dependencies,
2710 * so just put the dirrem on the correct list.
2711 */
2712 if (newinum == WINO) {
2713 if ((dirrem->dm_state & COMPLETE) == 0) {
2714 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
2715 dm_next);
2716 } else {
2717 dirrem->dm_dirinum = pagedep->pd_ino;
2718 add_to_worklist(&dirrem->dm_list);
2719 }
2720 FREE_LOCK(&lk);
2721 return;
2722 }
2723
2724 /*
2725 * If the COMPLETE flag is clear, then there were no active
2726 * entries and we want to roll back to the previous inode until
2727 * the new inode is committed to disk. If the COMPLETE flag is
2728 * set, then we have deleted an entry that never made it to disk.
2729 * If the entry we deleted resulted from a name change, then the old
2730 * inode reference still resides on disk. Any rollback that we do
2731 * needs to be to that old inode (returned to us in prevdirrem). If
2732 * the entry we deleted resulted from a create, then there is
2733 * no entry on the disk, so we want to roll back to zero rather
2734 * than the uncommitted inode. In either of the COMPLETE cases we
2735 * want to immediately free the unwritten and unreferenced inode.
2736 */
2737 if ((dirrem->dm_state & COMPLETE) == 0) {
2738 dap->da_previous = dirrem;
2739 } else {
2740 if (prevdirrem != NULL) {
2741 dap->da_previous = prevdirrem;
2742 } else {
2743 dap->da_state &= ~DIRCHG;
2744 dap->da_pagedep = pagedep;
2745 }
2746 dirrem->dm_dirinum = pagedep->pd_ino;
2747 add_to_worklist(&dirrem->dm_list);
2748 }
2749 /*
2750 * Link into its inodedep. Put it on the id_bufwait list if the inode
2751 * is not yet written. If it is written, do the post-inode write
2752 * processing to put it on the id_pendinghd list.
2753 */
2754 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 ||
2755 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
2756 dap->da_state |= COMPLETE;
2757 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
2758 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
2759 } else {
2760 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
2761 dap, da_pdlist);
2762 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
2763 }
2764 FREE_LOCK(&lk);
2765}
2766
2767/*
2768 * Called whenever the link count on an inode is changed.
2769 * It creates an inode dependency so that the new reference(s)
2770 * to the inode cannot be committed to disk until the updated
2771 * inode has been written.
2772 */
2773void
2774softdep_change_linkcnt(ip)
2775 struct inode *ip; /* the inode with the increased link count */
2776{
2777 struct inodedep *inodedep;
2778
2779 ACQUIRE_LOCK(&lk);
2780 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep);
2781 if (ip->i_nlink < ip->i_effnlink) {
2782 FREE_LOCK(&lk);
2783 panic("softdep_change_linkcnt: bad delta");
2784 }
2785 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2786 FREE_LOCK(&lk);
2787}
2788
2789/*
2790 * This workitem decrements the inode's link count.
2791 * If the link count reaches zero, the file is removed.
2792 */
2793static void
2794handle_workitem_remove(dirrem)
2795 struct dirrem *dirrem;
2796{
2797 struct proc *p = CURPROC; /* XXX */
2798 struct inodedep *inodedep;
2799 struct vnode *vp;
2800 struct inode *ip;
2801 ino_t oldinum;
2802 int error;
2803
2804 if ((error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, &vp)) != 0) {
2805 softdep_error("handle_workitem_remove: vget", error);
2806 return;
2807 }
2808 ip = VTOI(vp);
2809 ACQUIRE_LOCK(&lk);
2810 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){
2811 FREE_LOCK(&lk);
2812 panic("handle_workitem_remove: lost inodedep");
2813 }
2814 /*
2815 * Normal file deletion.
2816 */
2817 if ((dirrem->dm_state & RMDIR) == 0) {
2818 ip->i_nlink--;
2819 ip->i_flag |= IN_CHANGE;
2820 if (ip->i_nlink < ip->i_effnlink) {
2821 FREE_LOCK(&lk);
2822 panic("handle_workitem_remove: bad file delta");
2823 }
2824 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2825 FREE_LOCK(&lk);
2826 vput(vp);
2827 num_dirrem -= 1;
2828 WORKITEM_FREE(dirrem, D_DIRREM);
2829 return;
2830 }
2831 /*
2832 * Directory deletion. Decrement reference count for both the
2833 * just deleted parent directory entry and the reference for ".".
2834 * Next truncate the directory to length zero. When the
2835 * truncation completes, arrange to have the reference count on
2836 * the parent decremented to account for the loss of "..".
2837 */
2838 ip->i_nlink -= 2;
2839 ip->i_flag |= IN_CHANGE;
2840 if (ip->i_nlink < ip->i_effnlink) {
2841 FREE_LOCK(&lk);
2842 panic("handle_workitem_remove: bad dir delta");
2843 }
2844 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2845 FREE_LOCK(&lk);
2846 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, p->p_ucred, p)) != 0)
2847 softdep_error("handle_workitem_remove: truncate", error);
2848 /*
2849 * Rename a directory to a new parent. Since, we are both deleting
2850 * and creating a new directory entry, the link count on the new
2851 * directory should not change. Thus we skip the followup dirrem.
2852 */
2853 if (dirrem->dm_state & DIRCHG) {
2854 vput(vp);
2855 num_dirrem -= 1;
2856 WORKITEM_FREE(dirrem, D_DIRREM);
2857 return;
2858 }
2859 /*
2860 * If the inodedep does not exist, then the zero'ed inode has
2861 * been written to disk. If the allocated inode has never been
2862 * written to disk, then the on-disk inode is zero'ed. In either
2863 * case we can remove the file immediately.
2864 */
2865 ACQUIRE_LOCK(&lk);
2866 dirrem->dm_state = 0;
2867 oldinum = dirrem->dm_oldinum;
2868 dirrem->dm_oldinum = dirrem->dm_dirinum;
2869 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 ||
2870 check_inode_unwritten(inodedep)) {
2871 FREE_LOCK(&lk);
2872 vput(vp);
2873 handle_workitem_remove(dirrem);
2874 return;
2875 }
2876 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
2877 FREE_LOCK(&lk);
2878 vput(vp);
2879}
2880
2881/*
2882 * Inode de-allocation dependencies.
2883 *
2884 * When an inode's link count is reduced to zero, it can be de-allocated. We
2885 * found it convenient to postpone de-allocation until after the inode is
2886 * written to disk with its new link count (zero). At this point, all of the
2887 * on-disk inode's block pointers are nullified and, with careful dependency
2888 * list ordering, all dependencies related to the inode will be satisfied and
2889 * the corresponding dependency structures de-allocated. So, if/when the
2890 * inode is reused, there will be no mixing of old dependencies with new
2891 * ones. This artificial dependency is set up by the block de-allocation
2892 * procedure above (softdep_setup_freeblocks) and completed by the
2893 * following procedure.
2894 */
2895static void
2896handle_workitem_freefile(freefile)
2897 struct freefile *freefile;
2898{
2899 struct vnode vp;
2900 struct inode tip;
2901 struct inodedep *idp;
2902 int error;
2903
2904#ifdef DEBUG
2905 ACQUIRE_LOCK(&lk);
2906 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp);
2907 FREE_LOCK(&lk);
2908 if (error)
2909 panic("handle_workitem_freefile: inodedep survived");
2910#endif
2911 tip.i_devvp = freefile->fx_devvp;
2912 tip.i_dev = freefile->fx_devvp->v_rdev;
2913 tip.i_fs = freefile->fx_fs;
2914 vp.v_data = &tip;
2915 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0)
2916 softdep_error("handle_workitem_freefile", error);
2917 WORKITEM_FREE(freefile, D_FREEFILE);
2918}
2919
2920/*
2921 * Disk writes.
2922 *
2923 * The dependency structures constructed above are most actively used when file
2924 * system blocks are written to disk. No constraints are placed on when a
2925 * block can be written, but unsatisfied update dependencies are made safe by
2926 * modifying (or replacing) the source memory for the duration of the disk
2927 * write. When the disk write completes, the memory block is again brought
2928 * up-to-date.
2929 *
2930 * In-core inode structure reclamation.
2931 *
2932 * Because there are a finite number of "in-core" inode structures, they are
2933 * reused regularly. By transferring all inode-related dependencies to the
2934 * in-memory inode block and indexing them separately (via "inodedep"s), we
2935 * can allow "in-core" inode structures to be reused at any time and avoid
2936 * any increase in contention.
2937 *
2938 * Called just before entering the device driver to initiate a new disk I/O.
2939 * The buffer must be locked, thus, no I/O completion operations can occur
2940 * while we are manipulating its associated dependencies.
2941 */
2942static void
2943softdep_disk_io_initiation(bp)
2944 struct buf *bp; /* structure describing disk write to occur */
2945{
2946 struct worklist *wk, *nextwk;
2947 struct indirdep *indirdep;
2948
2949 /*
2950 * We only care about write operations. There should never
2951 * be dependencies for reads.
2952 */
2953 if (bp->b_flags & B_READ)
2954 panic("softdep_disk_io_initiation: read");
2955 /*
2956 * Do any necessary pre-I/O processing.
2957 */
2958 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) {
2959 nextwk = LIST_NEXT(wk, wk_list);
2960 switch (wk->wk_type) {
2961
2962 case D_PAGEDEP:
2963 initiate_write_filepage(WK_PAGEDEP(wk), bp);
2964 continue;
2965
2966 case D_INODEDEP:
2967 initiate_write_inodeblock(WK_INODEDEP(wk), bp);
2968 continue;
2969
2970 case D_INDIRDEP:
2971 indirdep = WK_INDIRDEP(wk);
2972 if (indirdep->ir_state & GOINGAWAY)
2973 panic("disk_io_initiation: indirdep gone");
2974 /*
2975 * If there are no remaining dependencies, this
2976 * will be writing the real pointers, so the
2977 * dependency can be freed.
2978 */
2979 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) {
2980 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
2981 brelse(indirdep->ir_savebp);
2982 /* inline expand WORKLIST_REMOVE(wk); */
2983 wk->wk_state &= ~ONWORKLIST;
2984 LIST_REMOVE(wk, wk_list);
2985 WORKITEM_FREE(indirdep, D_INDIRDEP);
2986 continue;
2987 }
2988 /*
2989 * Replace up-to-date version with safe version.
2990 */
2991 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount,
2992 M_INDIRDEP, M_SOFTDEP_FLAGS);
2993 ACQUIRE_LOCK(&lk);
2994 indirdep->ir_state &= ~ATTACHED;
2995 indirdep->ir_state |= UNDONE;
2996 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
2997 bcopy(indirdep->ir_savebp->b_data, bp->b_data,
2998 bp->b_bcount);
2999 FREE_LOCK(&lk);
3000 continue;
3001
3002 case D_MKDIR:
3003 case D_BMSAFEMAP:
3004 case D_ALLOCDIRECT:
3005 case D_ALLOCINDIR:
3006 continue;
3007
3008 default:
3009 panic("handle_disk_io_initiation: Unexpected type %s",
3010 TYPENAME(wk->wk_type));
3011 /* NOTREACHED */
3012 }
3013 }
3014}
3015
3016/*
3017 * Called from within the procedure above to deal with unsatisfied
3018 * allocation dependencies in a directory. The buffer must be locked,
3019 * thus, no I/O completion operations can occur while we are
3020 * manipulating its associated dependencies.
3021 */
3022static void
3023initiate_write_filepage(pagedep, bp)
3024 struct pagedep *pagedep;
3025 struct buf *bp;
3026{
3027 struct diradd *dap;
3028 struct direct *ep;
3029 int i;
3030
3031 if (pagedep->pd_state & IOSTARTED) {
3032 /*
3033 * This can only happen if there is a driver that does not
3034 * understand chaining. Here biodone will reissue the call
3035 * to strategy for the incomplete buffers.
3036 */
3037 printf("initiate_write_filepage: already started\n");
3038 return;
3039 }
3040 pagedep->pd_state |= IOSTARTED;
3041 ACQUIRE_LOCK(&lk);
3042 for (i = 0; i < DAHASHSZ; i++) {
3043 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
3044 ep = (struct direct *)
3045 ((char *)bp->b_data + dap->da_offset);
3046 if (ep->d_ino != dap->da_newinum) {
3047 FREE_LOCK(&lk);
3048 panic("%s: dir inum %d != new %d",
3049 "initiate_write_filepage",
3050 ep->d_ino, dap->da_newinum);
3051 }
3052 if (dap->da_state & DIRCHG)
3053 ep->d_ino = dap->da_previous->dm_oldinum;
3054 else
3055 ep->d_ino = 0;
3056 dap->da_state &= ~ATTACHED;
3057 dap->da_state |= UNDONE;
3058 }
3059 }
3060 FREE_LOCK(&lk);
3061}
3062
3063/*
3064 * Called from within the procedure above to deal with unsatisfied
3065 * allocation dependencies in an inodeblock. The buffer must be
3066 * locked, thus, no I/O completion operations can occur while we
3067 * are manipulating its associated dependencies.
3068 */
3069static void
3070initiate_write_inodeblock(inodedep, bp)
3071 struct inodedep *inodedep;
3072 struct buf *bp; /* The inode block */
3073{
3074 struct allocdirect *adp, *lastadp;
3075 struct dinode *dp;
3076 struct fs *fs;
3077 ufs_lbn_t prevlbn = 0;
3078 int i, deplist;
3079
3080 if (inodedep->id_state & IOSTARTED)
3081 panic("initiate_write_inodeblock: already started");
3082 inodedep->id_state |= IOSTARTED;
3083 fs = inodedep->id_fs;
3084 dp = (struct dinode *)bp->b_data +
3085 ino_to_fsbo(fs, inodedep->id_ino);
3086 /*
3087 * If the bitmap is not yet written, then the allocated
3088 * inode cannot be written to disk.
3089 */
3090 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
3091 if (inodedep->id_savedino != NULL)
3092 panic("initiate_write_inodeblock: already doing I/O");
3093 MALLOC(inodedep->id_savedino, struct dinode *,
3094 sizeof(struct dinode), M_INODEDEP, M_SOFTDEP_FLAGS);
3095 *inodedep->id_savedino = *dp;
3096 bzero((caddr_t)dp, sizeof(struct dinode));
3097 return;
3098 }
3099 /*
3100 * If no dependencies, then there is nothing to roll back.
3101 */
3102 inodedep->id_savedsize = dp->di_size;
3103 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL)
3104 return;
3105 /*
3106 * Set the dependencies to busy.
3107 */
3108 ACQUIRE_LOCK(&lk);
3109 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3110 adp = TAILQ_NEXT(adp, ad_next)) {
3111#ifdef DIAGNOSTIC
3112 if (deplist != 0 && prevlbn >= adp->ad_lbn) {
3113 FREE_LOCK(&lk);
3114 panic("softdep_write_inodeblock: lbn order");
3115 }
3116 prevlbn = adp->ad_lbn;
3117 if (adp->ad_lbn < NDADDR &&
3118 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) {
3119 FREE_LOCK(&lk);
3120 panic("%s: direct pointer #%ld mismatch %d != %d",
3121 "softdep_write_inodeblock", adp->ad_lbn,
3122 dp->di_db[adp->ad_lbn], adp->ad_newblkno);
3123 }
3124 if (adp->ad_lbn >= NDADDR &&
3125 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) {
3126 FREE_LOCK(&lk);
3127 panic("%s: indirect pointer #%ld mismatch %d != %d",
3128 "softdep_write_inodeblock", adp->ad_lbn - NDADDR,
3129 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno);
3130 }
3131 deplist |= 1 << adp->ad_lbn;
3132 if ((adp->ad_state & ATTACHED) == 0) {
3133 FREE_LOCK(&lk);
3134 panic("softdep_write_inodeblock: Unknown state 0x%x",
3135 adp->ad_state);
3136 }
3137#endif /* DIAGNOSTIC */
3138 adp->ad_state &= ~ATTACHED;
3139 adp->ad_state |= UNDONE;
3140 }
3141 /*
3142 * The on-disk inode cannot claim to be any larger than the last
3143 * fragment that has been written. Otherwise, the on-disk inode
3144 * might have fragments that were not the last block in the file
3145 * which would corrupt the filesystem.
3146 */
3147 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3148 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
3149 if (adp->ad_lbn >= NDADDR)
3150 break;
3151 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno;
3152 /* keep going until hitting a rollback to a frag */
3153 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
3154 continue;
3155 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize;
3156 for (i = adp->ad_lbn + 1; i < NDADDR; i++) {
3157#ifdef DIAGNOSTIC
3158 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) {
3159 FREE_LOCK(&lk);
3160 panic("softdep_write_inodeblock: lost dep1");
3161 }
3162#endif /* DIAGNOSTIC */
3163 dp->di_db[i] = 0;
3164 }
3165 for (i = 0; i < NIADDR; i++) {
3166#ifdef DIAGNOSTIC
3167 if (dp->di_ib[i] != 0 &&
3168 (deplist & ((1 << NDADDR) << i)) == 0) {
3169 FREE_LOCK(&lk);
3170 panic("softdep_write_inodeblock: lost dep2");
3171 }
3172#endif /* DIAGNOSTIC */
3173 dp->di_ib[i] = 0;
3174 }
3175 FREE_LOCK(&lk);
3176 return;
3177 }
3178 /*
3179 * If we have zero'ed out the last allocated block of the file,
3180 * roll back the size to the last currently allocated block.
3181 * We know that this last allocated block is a full-sized as
3182 * we already checked for fragments in the loop above.
3183 */
3184 if (lastadp != NULL &&
3185 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) {
3186 for (i = lastadp->ad_lbn; i >= 0; i--)
3187 if (dp->di_db[i] != 0)
3188 break;
3189 dp->di_size = (i + 1) * fs->fs_bsize;
3190 }
3191 /*
3192 * The only dependencies are for indirect blocks.
3193 *
3194 * The file size for indirect block additions is not guaranteed.
3195 * Such a guarantee would be non-trivial to achieve. The conventional
3196 * synchronous write implementation also does not make this guarantee.
3197 * Fsck should catch and fix discrepancies. Arguably, the file size
3198 * can be over-estimated without destroying integrity when the file
3199 * moves into the indirect blocks (i.e., is large). If we want to
3200 * postpone fsck, we are stuck with this argument.
3201 */
3202 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
3203 dp->di_ib[adp->ad_lbn - NDADDR] = 0;
3204 FREE_LOCK(&lk);
3205}
3206
3207/*
3208 * This routine is called during the completion interrupt
3209 * service routine for a disk write (from the procedure called
3210 * by the device driver to inform the file system caches of
3211 * a request completion). It should be called early in this
3212 * procedure, before the block is made available to other
3213 * processes or other routines are called.
3214 */
3215static void
3216softdep_disk_write_complete(bp)
3217 struct buf *bp; /* describes the completed disk write */
3218{
3219 struct worklist *wk;
3220 struct workhead reattach;
3221 struct newblk *newblk;
3222 struct allocindir *aip;
3223 struct allocdirect *adp;
3224 struct indirdep *indirdep;
3225 struct inodedep *inodedep;
3226 struct bmsafemap *bmsafemap;
3227
3228#ifdef DEBUG
3229 if (lk.lkt_held != -1)
3230 panic("softdep_disk_write_complete: lock is held");
3231 lk.lkt_held = -2;
3232#endif
3233 LIST_INIT(&reattach);
3234 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
3235 WORKLIST_REMOVE(wk);
3236 switch (wk->wk_type) {
3237
3238 case D_PAGEDEP:
3239 if (handle_written_filepage(WK_PAGEDEP(wk), bp))
3240 WORKLIST_INSERT(&reattach, wk);
3241 continue;
3242
3243 case D_INODEDEP:
3244 if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
3245 WORKLIST_INSERT(&reattach, wk);
3246 continue;
3247
3248 case D_BMSAFEMAP:
3249 bmsafemap = WK_BMSAFEMAP(wk);
3250 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) {
3251 newblk->nb_state |= DEPCOMPLETE;
3252 newblk->nb_bmsafemap = NULL;
3253 LIST_REMOVE(newblk, nb_deps);
3254 }
3255 while ((adp =
3256 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) {
3257 adp->ad_state |= DEPCOMPLETE;
3258 adp->ad_buf = NULL;
3259 LIST_REMOVE(adp, ad_deps);
3260 handle_allocdirect_partdone(adp);
3261 }
3262 while ((aip =
3263 LIST_FIRST(&bmsafemap->sm_allocindirhd))) {
3264 aip->ai_state |= DEPCOMPLETE;
3265 aip->ai_buf = NULL;
3266 LIST_REMOVE(aip, ai_deps);
3267 handle_allocindir_partdone(aip);
3268 }
3269 while ((inodedep =
3270 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) {
3271 inodedep->id_state |= DEPCOMPLETE;
3272 LIST_REMOVE(inodedep, id_deps);
3273 inodedep->id_buf = NULL;
3274 }
3275 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
3276 continue;
3277
3278 case D_MKDIR:
3279 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
3280 continue;
3281
3282 case D_ALLOCDIRECT:
3283 adp = WK_ALLOCDIRECT(wk);
3284 adp->ad_state |= COMPLETE;
3285 handle_allocdirect_partdone(adp);
3286 continue;
3287
3288 case D_ALLOCINDIR:
3289 aip = WK_ALLOCINDIR(wk);
3290 aip->ai_state |= COMPLETE;
3291 handle_allocindir_partdone(aip);
3292 continue;
3293
3294 case D_INDIRDEP:
3295 indirdep = WK_INDIRDEP(wk);
3296 if (indirdep->ir_state & GOINGAWAY) {
3297 lk.lkt_held = -1;
3298 panic("disk_write_complete: indirdep gone");
3299 }
3300 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
3301 FREE(indirdep->ir_saveddata, M_INDIRDEP);
3302 indirdep->ir_saveddata = 0;
3303 indirdep->ir_state &= ~UNDONE;
3304 indirdep->ir_state |= ATTACHED;
3305 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
3306 handle_allocindir_partdone(aip);
3307 if (aip == LIST_FIRST(&indirdep->ir_donehd)) {
3308 lk.lkt_held = -1;
3309 panic("disk_write_complete: not gone");
3310 }
3311 }
3312 WORKLIST_INSERT(&reattach, wk);
3313 if ((bp->b_flags & B_DELWRI) == 0)
3314 stat_indir_blk_ptrs++;
3315 bdirty(bp);
3316 continue;
3317
3318 default:
3319 lk.lkt_held = -1;
3320 panic("handle_disk_write_complete: Unknown type %s",
3321 TYPENAME(wk->wk_type));
3322 /* NOTREACHED */
3323 }
3324 }
3325 /*
3326 * Reattach any requests that must be redone.
3327 */
3328 while ((wk = LIST_FIRST(&reattach)) != NULL) {
3329 WORKLIST_REMOVE(wk);
3330 WORKLIST_INSERT(&bp->b_dep, wk);
3331 }
3332#ifdef DEBUG
3333 if (lk.lkt_held != -2)
3334 panic("softdep_disk_write_complete: lock lost");
3335 lk.lkt_held = -1;
3336#endif
3337}
3338
3339/*
3340 * Called from within softdep_disk_write_complete above. Note that
3341 * this routine is always called from interrupt level with further
3342 * splbio interrupts blocked.
3343 */
3344static void
3345handle_allocdirect_partdone(adp)
3346 struct allocdirect *adp; /* the completed allocdirect */
3347{
3348 struct allocdirect *listadp;
3349 struct inodedep *inodedep;
3350 long bsize;
3351
3352 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
3353 return;
3354 if (adp->ad_buf != NULL) {
3355 lk.lkt_held = -1;
3356 panic("handle_allocdirect_partdone: dangling dep");
3357 }
3358 /*
3359 * The on-disk inode cannot claim to be any larger than the last
3360 * fragment that has been written. Otherwise, the on-disk inode
3361 * might have fragments that were not the last block in the file
3362 * which would corrupt the filesystem. Thus, we cannot free any
3363 * allocdirects after one whose ad_oldblkno claims a fragment as
3364 * these blocks must be rolled back to zero before writing the inode.
3365 * We check the currently active set of allocdirects in id_inoupdt.
3366 */
3367 inodedep = adp->ad_inodedep;
3368 bsize = inodedep->id_fs->fs_bsize;
3369 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) {
3370 /* found our block */
3371 if (listadp == adp)
3372 break;
3373 /* continue if ad_oldlbn is not a fragment */
3374 if (listadp->ad_oldsize == 0 ||
3375 listadp->ad_oldsize == bsize)
3376 continue;
3377 /* hit a fragment */
3378 return;
3379 }
3380 /*
3381 * If we have reached the end of the current list without
3382 * finding the just finished dependency, then it must be
3383 * on the future dependency list. Future dependencies cannot
3384 * be freed until they are moved to the current list.
3385 */
3386 if (listadp == NULL) {
3387#ifdef DEBUG
3388 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next)
3389 /* found our block */
3390 if (listadp == adp)
3391 break;
3392 if (listadp == NULL) {
3393 lk.lkt_held = -1;
3394 panic("handle_allocdirect_partdone: lost dep");
3395 }
3396#endif /* DEBUG */
3397 return;
3398 }
3399 /*
3400 * If we have found the just finished dependency, then free
3401 * it along with anything that follows it that is complete.
3402 */
3403 for (; adp; adp = listadp) {
3404 listadp = TAILQ_NEXT(adp, ad_next);
3405 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
3406 return;
3407 free_allocdirect(&inodedep->id_inoupdt, adp, 1);
3408 }
3409}
3410
3411/*
3412 * Called from within softdep_disk_write_complete above. Note that
3413 * this routine is always called from interrupt level with further
3414 * splbio interrupts blocked.
3415 */
3416static void
3417handle_allocindir_partdone(aip)
3418 struct allocindir *aip; /* the completed allocindir */
3419{
3420 struct indirdep *indirdep;
3421
3422 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
3423 return;
3424 if (aip->ai_buf != NULL) {
3425 lk.lkt_held = -1;
3426 panic("handle_allocindir_partdone: dangling dependency");
3427 }
3428 indirdep = aip->ai_indirdep;
3429 if (indirdep->ir_state & UNDONE) {
3430 LIST_REMOVE(aip, ai_next);
3431 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
3432 return;
3433 }
3434 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
3435 aip->ai_newblkno;
3436 LIST_REMOVE(aip, ai_next);
3437 if (aip->ai_freefrag != NULL)
3438 add_to_worklist(&aip->ai_freefrag->ff_list);
3439 WORKITEM_FREE(aip, D_ALLOCINDIR);
3440}
3441
3442/*
3443 * Called from within softdep_disk_write_complete above to restore
3444 * in-memory inode block contents to their most up-to-date state. Note
3445 * that this routine is always called from interrupt level with further
3446 * splbio interrupts blocked.
3447 */
3448static int
3449handle_written_inodeblock(inodedep, bp)
3450 struct inodedep *inodedep;
3451 struct buf *bp; /* buffer containing the inode block */
3452{
3453 struct worklist *wk, *filefree;
3454 struct allocdirect *adp, *nextadp;
3455 struct dinode *dp;
3456 int hadchanges;
3457
3458 if ((inodedep->id_state & IOSTARTED) == 0) {
3459 lk.lkt_held = -1;
3460 panic("handle_written_inodeblock: not started");
3461 }
3462 inodedep->id_state &= ~IOSTARTED;
3463 inodedep->id_state |= COMPLETE;
3464 dp = (struct dinode *)bp->b_data +
3465 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
3466 /*
3467 * If we had to rollback the inode allocation because of
3468 * bitmaps being incomplete, then simply restore it.
3469 * Keep the block dirty so that it will not be reclaimed until
3470 * all associated dependencies have been cleared and the
3471 * corresponding updates written to disk.
3472 */
3473 if (inodedep->id_savedino != NULL) {
3474 *dp = *inodedep->id_savedino;
3475 FREE(inodedep->id_savedino, M_INODEDEP);
3476 inodedep->id_savedino = NULL;
3477 if ((bp->b_flags & B_DELWRI) == 0)
3478 stat_inode_bitmap++;
3479 bdirty(bp);
3480 return (1);
3481 }
3482 /*
3483 * Roll forward anything that had to be rolled back before
3484 * the inode could be updated.
3485 */
3486 hadchanges = 0;
3487 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
3488 nextadp = TAILQ_NEXT(adp, ad_next);
3489 if (adp->ad_state & ATTACHED) {
3490 lk.lkt_held = -1;
3491 panic("handle_written_inodeblock: new entry");
3492 }
3493 if (adp->ad_lbn < NDADDR) {
3494 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) {
3495 lk.lkt_held = -1;
3496 panic("%s: %s #%ld mismatch %d != %d",
3497 "handle_written_inodeblock",
3498 "direct pointer", adp->ad_lbn,
3499 dp->di_db[adp->ad_lbn], adp->ad_oldblkno);
3500 }
3501 dp->di_db[adp->ad_lbn] = adp->ad_newblkno;
3502 } else {
3503 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) {
3504 lk.lkt_held = -1;
3505 panic("%s: %s #%ld allocated as %d",
3506 "handle_written_inodeblock",
3507 "indirect pointer", adp->ad_lbn - NDADDR,
3508 dp->di_ib[adp->ad_lbn - NDADDR]);
3509 }
3510 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno;
3511 }
3512 adp->ad_state &= ~UNDONE;
3513 adp->ad_state |= ATTACHED;
3514 hadchanges = 1;
3515 }
3516 if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
3517 stat_direct_blk_ptrs++;
3518 /*
3519 * Reset the file size to its most up-to-date value.
3520 */
3521 if (inodedep->id_savedsize == -1) {
3522 lk.lkt_held = -1;
3523 panic("handle_written_inodeblock: bad size");
3524 }
3525 if (dp->di_size != inodedep->id_savedsize) {
3526 dp->di_size = inodedep->id_savedsize;
3527 hadchanges = 1;
3528 }
3529 inodedep->id_savedsize = -1;
3530 /*
3531 * If there were any rollbacks in the inode block, then it must be
3532 * marked dirty so that its will eventually get written back in
3533 * its correct form.
3534 */
3535 if (hadchanges)
3536 bdirty(bp);
3537 /*
3538 * Process any allocdirects that completed during the update.
3539 */
3540 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
3541 handle_allocdirect_partdone(adp);
3542 /*
3543 * Process deallocations that were held pending until the
3544 * inode had been written to disk. Freeing of the inode
3545 * is delayed until after all blocks have been freed to
3546 * avoid creation of new <vfsid, inum, lbn> triples
3547 * before the old ones have been deleted.
3548 */
3549 filefree = NULL;
3550 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
3551 WORKLIST_REMOVE(wk);
3552 switch (wk->wk_type) {
3553
3554 case D_FREEFILE:
3555 /*
3556 * We defer adding filefree to the worklist until
3557 * all other additions have been made to ensure
3558 * that it will be done after all the old blocks
3559 * have been freed.
3560 */
3561 if (filefree != NULL) {
3562 lk.lkt_held = -1;
3563 panic("handle_written_inodeblock: filefree");
3564 }
3565 filefree = wk;
3566 continue;
3567
3568 case D_MKDIR:
3569 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
3570 continue;
3571
3572 case D_DIRADD:
3573 diradd_inode_written(WK_DIRADD(wk), inodedep);
3574 continue;
3575
3576 case D_FREEBLKS:
3577 case D_FREEFRAG:
3578 case D_DIRREM:
3579 add_to_worklist(wk);
3580 continue;
3581
3582 default:
3583 lk.lkt_held = -1;
3584 panic("handle_written_inodeblock: Unknown type %s",
3585 TYPENAME(wk->wk_type));
3586 /* NOTREACHED */
3587 }
3588 }
3589 if (filefree != NULL) {
3590 if (free_inodedep(inodedep) == 0) {
3591 lk.lkt_held = -1;
3592 panic("handle_written_inodeblock: live inodedep");
3593 }
3594 add_to_worklist(filefree);
3595 return (0);
3596 }
3597
3598 /*
3599 * If no outstanding dependencies, free it.
3600 */
3601 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0)
3602 return (0);
3603 return (hadchanges);
3604}
3605
3606/*
3607 * Process a diradd entry after its dependent inode has been written.
3608 * This routine must be called with splbio interrupts blocked.
3609 */
3610static void
3611diradd_inode_written(dap, inodedep)
3612 struct diradd *dap;
3613 struct inodedep *inodedep;
3614{
3615 struct pagedep *pagedep;
3616
3617 dap->da_state |= COMPLETE;
3618 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
3619 if (dap->da_state & DIRCHG)
3620 pagedep = dap->da_previous->dm_pagedep;
3621 else
3622 pagedep = dap->da_pagedep;
3623 LIST_REMOVE(dap, da_pdlist);
3624 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
3625 }
3626 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
3627}
3628
3629/*
3630 * Handle the completion of a mkdir dependency.
3631 */
3632static void
3633handle_written_mkdir(mkdir, type)
3634 struct mkdir *mkdir;
3635 int type;
3636{
3637 struct diradd *dap;
3638 struct pagedep *pagedep;
3639
3640 if (mkdir->md_state != type) {
3641 lk.lkt_held = -1;
3642 panic("handle_written_mkdir: bad type");
3643 }
3644 dap = mkdir->md_diradd;
3645 dap->da_state &= ~type;
3646 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
3647 dap->da_state |= DEPCOMPLETE;
3648 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
3649 if (dap->da_state & DIRCHG)
3650 pagedep = dap->da_previous->dm_pagedep;
3651 else
3652 pagedep = dap->da_pagedep;
3653 LIST_REMOVE(dap, da_pdlist);
3654 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
3655 }
3656 LIST_REMOVE(mkdir, md_mkdirs);
3657 WORKITEM_FREE(mkdir, D_MKDIR);
3658}
3659
3660/*
3661 * Called from within softdep_disk_write_complete above.
3662 * A write operation was just completed. Removed inodes can
3663 * now be freed and associated block pointers may be committed.
3664 * Note that this routine is always called from interrupt level
3665 * with further splbio interrupts blocked.
3666 */
3667static int
3668handle_written_filepage(pagedep, bp)
3669 struct pagedep *pagedep;
3670 struct buf *bp; /* buffer containing the written page */
3671{
3672 struct dirrem *dirrem;
3673 struct diradd *dap, *nextdap;
3674 struct direct *ep;
3675 int i, chgs;
3676
3677 if ((pagedep->pd_state & IOSTARTED) == 0) {
3678 lk.lkt_held = -1;
3679 panic("handle_written_filepage: not started");
3680 }
3681 pagedep->pd_state &= ~IOSTARTED;
3682 /*
3683 * Process any directory removals that have been committed.
3684 */
3685 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
3686 LIST_REMOVE(dirrem, dm_next);
3687 dirrem->dm_dirinum = pagedep->pd_ino;
3688 add_to_worklist(&dirrem->dm_list);
3689 }
3690 /*
3691 * Free any directory additions that have been committed.
3692 */
3693 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
3694 free_diradd(dap);
3695 /*
3696 * Uncommitted directory entries must be restored.
3697 */
3698 for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
3699 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
3700 dap = nextdap) {
3701 nextdap = LIST_NEXT(dap, da_pdlist);
3702 if (dap->da_state & ATTACHED) {
3703 lk.lkt_held = -1;
3704 panic("handle_written_filepage: attached");
3705 }
3706 ep = (struct direct *)
3707 ((char *)bp->b_data + dap->da_offset);
3708 ep->d_ino = dap->da_newinum;
3709 dap->da_state &= ~UNDONE;
3710 dap->da_state |= ATTACHED;
3711 chgs = 1;
3712 /*
3713 * If the inode referenced by the directory has
3714 * been written out, then the dependency can be
3715 * moved to the pending list.
3716 */
3717 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
3718 LIST_REMOVE(dap, da_pdlist);
3719 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
3720 da_pdlist);
3721 }
3722 }
3723 }
3724 /*
3725 * If there were any rollbacks in the directory, then it must be
3726 * marked dirty so that its will eventually get written back in
3727 * its correct form.
3728 */
3729 if (chgs) {
3730 if ((bp->b_flags & B_DELWRI) == 0)
3731 stat_dir_entry++;
3732 bdirty(bp);
3733 }
3734 /*
3735 * If no dependencies remain, the pagedep will be freed.
3736 * Otherwise it will remain to update the page before it
3737 * is written back to disk.
3738 */
3739 if (LIST_FIRST(&pagedep->pd_pendinghd) == 0) {
3740 for (i = 0; i < DAHASHSZ; i++)
3741 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL)
3742 break;
3743 if (i == DAHASHSZ) {
3744 LIST_REMOVE(pagedep, pd_hash);
3745 WORKITEM_FREE(pagedep, D_PAGEDEP);
3746 return (0);
3747 }
3748 }
3749 return (1);
3750}
3751
3752/*
3753 * Writing back in-core inode structures.
3754 *
3755 * The file system only accesses an inode's contents when it occupies an
3756 * "in-core" inode structure. These "in-core" structures are separate from
3757 * the page frames used to cache inode blocks. Only the latter are
3758 * transferred to/from the disk. So, when the updated contents of the
3759 * "in-core" inode structure are copied to the corresponding in-memory inode
3760 * block, the dependencies are also transferred. The following procedure is
3761 * called when copying a dirty "in-core" inode to a cached inode block.
3762 */
3763
3764/*
3765 * Called when an inode is loaded from disk. If the effective link count
3766 * differed from the actual link count when it was last flushed, then we
3767 * need to ensure that the correct effective link count is put back.
3768 */
3769void
3770softdep_load_inodeblock(ip)
3771 struct inode *ip; /* the "in_core" copy of the inode */
3772{
3773 struct inodedep *inodedep;
3774
3775 /*
3776 * Check for alternate nlink count.
3777 */
3778 ip->i_effnlink = ip->i_nlink;
3779 ACQUIRE_LOCK(&lk);
3780 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) {
3781 FREE_LOCK(&lk);
3782 return;
3783 }
3784 ip->i_effnlink -= inodedep->id_nlinkdelta;
3785 FREE_LOCK(&lk);
3786}
3787
3788/*
3789 * This routine is called just before the "in-core" inode
3790 * information is to be copied to the in-memory inode block.
3791 * Recall that an inode block contains several inodes. If
3792 * the force flag is set, then the dependencies will be
3793 * cleared so that the update can always be made. Note that
3794 * the buffer is locked when this routine is called, so we
3795 * will never be in the middle of writing the inode block
3796 * to disk.
3797 */
3798void
3799softdep_update_inodeblock(ip, bp, waitfor)
3800 struct inode *ip; /* the "in_core" copy of the inode */
3801 struct buf *bp; /* the buffer containing the inode block */
3802 int waitfor; /* nonzero => update must be allowed */
3803{
3804 struct inodedep *inodedep;
3805 struct worklist *wk;
3806 int error, gotit;
3807
3808 /*
3809 * If the effective link count is not equal to the actual link
3810 * count, then we must track the difference in an inodedep while
3811 * the inode is (potentially) tossed out of the cache. Otherwise,
3812 * if there is no existing inodedep, then there are no dependencies
3813 * to track.
3814 */
3815 ACQUIRE_LOCK(&lk);
3816 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) {
3817 FREE_LOCK(&lk);
3818 if (ip->i_effnlink != ip->i_nlink)
3819 panic("softdep_update_inodeblock: bad link count");
3820 return;
3821 }
3822 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) {
3823 FREE_LOCK(&lk);
3824 panic("softdep_update_inodeblock: bad delta");
3825 }
3826 /*
3827 * Changes have been initiated. Anything depending on these
3828 * changes cannot occur until this inode has been written.
3829 */
3830 inodedep->id_state &= ~COMPLETE;
3831 if ((inodedep->id_state & ONWORKLIST) == 0)
3832 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
3833 /*
3834 * Any new dependencies associated with the incore inode must
3835 * now be moved to the list associated with the buffer holding
3836 * the in-memory copy of the inode. Once merged process any
3837 * allocdirects that are completed by the merger.
3838 */
3839 merge_inode_lists(inodedep);
3840 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL)
3841 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt));
3842 /*
3843 * Now that the inode has been pushed into the buffer, the
3844 * operations dependent on the inode being written to disk
3845 * can be moved to the id_bufwait so that they will be
3846 * processed when the buffer I/O completes.
3847 */
3848 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
3849 WORKLIST_REMOVE(wk);
3850 WORKLIST_INSERT(&inodedep->id_bufwait, wk);
3851 }
3852 /*
3853 * Newly allocated inodes cannot be written until the bitmap
3854 * that allocates them have been written (indicated by
3855 * DEPCOMPLETE being set in id_state). If we are doing a
3856 * forced sync (e.g., an fsync on a file), we force the bitmap
3857 * to be written so that the update can be done.
3858 */
3859 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) {
3860 FREE_LOCK(&lk);
3861 return;
3862 }
3863 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT);
3864 FREE_LOCK(&lk);
3865 if (gotit &&
3866 (error = VOP_BWRITE(inodedep->id_buf->b_vp, inodedep->id_buf)) != 0)
3867 softdep_error("softdep_update_inodeblock: bwrite", error);
3868 if ((inodedep->id_state & DEPCOMPLETE) == 0)
3869 panic("softdep_update_inodeblock: update failed");
3870}
3871
3872/*
3873 * Merge the new inode dependency list (id_newinoupdt) into the old
3874 * inode dependency list (id_inoupdt). This routine must be called
3875 * with splbio interrupts blocked.
3876 */
3877static void
3878merge_inode_lists(inodedep)
3879 struct inodedep *inodedep;
3880{
3881 struct allocdirect *listadp, *newadp;
3882
3883 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt);
3884 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) {
3885 if (listadp->ad_lbn < newadp->ad_lbn) {
3886 listadp = TAILQ_NEXT(listadp, ad_next);
3887 continue;
3888 }
3889 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next);
3890 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
3891 if (listadp->ad_lbn == newadp->ad_lbn) {
3892 allocdirect_merge(&inodedep->id_inoupdt, newadp,
3893 listadp);
3894 listadp = newadp;
3895 }
3896 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt);
3897 }
3898 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) {
3899 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next);
3900 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next);
3901 }
3902}
3903
3904/*
3905 * If we are doing an fsync, then we must ensure that any directory
3906 * entries for the inode have been written after the inode gets to disk.
3907 */
3908static int
3909softdep_fsync(vp)
3910 struct vnode *vp; /* the "in_core" copy of the inode */
3911{
3912 struct inodedep *inodedep;
3913 struct pagedep *pagedep;
3914 struct worklist *wk;
3915 struct diradd *dap;
3916 struct mount *mnt;
3917 struct vnode *pvp;
3918 struct inode *ip;
3919 struct buf *bp;
3920 struct fs *fs;
3921 struct proc *p = CURPROC; /* XXX */
3922 int error, flushparent;
3923 ino_t parentino;
3924 ufs_lbn_t lbn;
3925
3926 ip = VTOI(vp);
3927 fs = ip->i_fs;
3928 ACQUIRE_LOCK(&lk);
3929 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) {
3930 FREE_LOCK(&lk);
3931 return (0);
3932 }
3933 if (LIST_FIRST(&inodedep->id_inowait) != NULL ||
3934 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
3935 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
3936 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) {
3937 FREE_LOCK(&lk);
3938 panic("softdep_fsync: pending ops");
3939 }
3940 for (error = 0, flushparent = 0; ; ) {
3941 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
3942 break;
3943 if (wk->wk_type != D_DIRADD) {
3944 FREE_LOCK(&lk);
3945 panic("softdep_fsync: Unexpected type %s",
3946 TYPENAME(wk->wk_type));
3947 }
3948 dap = WK_DIRADD(wk);
3949 /*
3950 * Flush our parent if this directory entry
3951 * has a MKDIR_PARENT dependency.
3952 */
3953 if (dap->da_state & DIRCHG)
3954 pagedep = dap->da_previous->dm_pagedep;
3955 else
3956 pagedep = dap->da_pagedep;
3957 mnt = pagedep->pd_mnt;
3958 parentino = pagedep->pd_ino;
3959 lbn = pagedep->pd_lbn;
3960 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) {
3961 FREE_LOCK(&lk);
3962 panic("softdep_fsync: dirty");
3963 }
3964 flushparent = dap->da_state & MKDIR_PARENT;
3965 /*
3966 * If we are being fsync'ed as part of vgone'ing this vnode,
3967 * then we will not be able to release and recover the
3968 * vnode below, so we just have to give up on writing its
3969 * directory entry out. It will eventually be written, just
3970 * not now, but then the user was not asking to have it
3971 * written, so we are not breaking any promises.
3972 */
3973 if (vp->v_flag & VXLOCK)
3974 break;
3975 /*
3976 * We prevent deadlock by always fetching inodes from the
3977 * root, moving down the directory tree. Thus, when fetching
3978 * our parent directory, we must unlock ourselves before
3979 * requesting the lock on our parent. See the comment in
3980 * ufs_lookup for details on possible races.
3981 */
3982 FREE_LOCK(&lk);
3983 VOP_UNLOCK(vp, 0, p);
3984 error = VFS_VGET(mnt, parentino, &pvp);
3985 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
3986 if (error != 0)
3987 return (error);
3988 if (flushparent) {
3989 if ((error = UFS_UPDATE(pvp, 1)) != 0) {
3990 vput(pvp);
3991 return (error);
3992 }
3993 }
3994 /*
3995 * Flush directory page containing the inode's name.
3996 */
3997 error = bread(pvp, lbn,