FreeBSD commit message:
[dragonfly.git] / sys / vfs / ufs / ffs_softdep.c
CommitLineData
984263bc
MD
1/*
2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved.
3 *
4 * The soft updates code is derived from the appendix of a University
5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
6 * "Soft Updates: A Solution to the Metadata Update Problem in File
7 * Systems", CSE-TR-254-95, August 1995).
8 *
9 * Further information about soft updates can be obtained from:
10 *
11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/
12 * 1614 Oxford Street mckusick@mckusick.com
13 * Berkeley, CA 94709-1608 +1-510-843-9542
14 * USA
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY
27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR
30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00
39 * $FreeBSD: src/sys/ufs/ffs/ffs_softdep.c,v 1.57.2.11 2002/02/05 18:46:53 dillon Exp $
a0ff2e99 40 * $DragonFly: src/sys/vfs/ufs/ffs_softdep.c,v 1.29 2005/10/14 21:04:13 dillon Exp $
984263bc
MD
41 */
42
43/*
44 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide.
45 */
46#ifndef DIAGNOSTIC
47#define DIAGNOSTIC
48#endif
49#ifndef DEBUG
50#define DEBUG
51#endif
52
53#include <sys/param.h>
54#include <sys/kernel.h>
55#include <sys/systm.h>
56#include <sys/buf.h>
57#include <sys/malloc.h>
58#include <sys/mount.h>
59#include <sys/proc.h>
60#include <sys/syslog.h>
61#include <sys/vnode.h>
62#include <sys/conf.h>
3020e3be 63#include <sys/buf2.h>
f91a71dd 64#include <machine/inttypes.h>
1f2de5d4
MD
65#include "dir.h"
66#include "quota.h"
67#include "inode.h"
68#include "ufsmount.h"
69#include "fs.h"
70#include "softdep.h"
71#include "ffs_extern.h"
72#include "ufs_extern.h"
984263bc 73
165dba55
DR
74#include <sys/thread2.h>
75
984263bc
MD
76/*
77 * These definitions need to be adapted to the system to which
78 * this file is being ported.
79 */
80/*
81 * malloc types defined for the softdep system.
82 */
83MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies");
84MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies");
85MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation");
86MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map");
87MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode");
88MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies");
89MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block");
90MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode");
91MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode");
92MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated");
93MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry");
94MALLOC_DEFINE(M_MKDIR, "mkdir","New directory");
95MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted");
96
97#define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE)
98
99#define D_PAGEDEP 0
100#define D_INODEDEP 1
101#define D_NEWBLK 2
102#define D_BMSAFEMAP 3
103#define D_ALLOCDIRECT 4
104#define D_INDIRDEP 5
105#define D_ALLOCINDIR 6
106#define D_FREEFRAG 7
107#define D_FREEBLKS 8
108#define D_FREEFILE 9
109#define D_DIRADD 10
110#define D_MKDIR 11
111#define D_DIRREM 12
112#define D_LAST D_DIRREM
113
114/*
115 * translate from workitem type to memory type
116 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
117 */
118static struct malloc_type *memtype[] = {
119 M_PAGEDEP,
120 M_INODEDEP,
121 M_NEWBLK,
122 M_BMSAFEMAP,
123 M_ALLOCDIRECT,
124 M_INDIRDEP,
125 M_ALLOCINDIR,
126 M_FREEFRAG,
127 M_FREEBLKS,
128 M_FREEFILE,
129 M_DIRADD,
130 M_MKDIR,
131 M_DIRREM
132};
133
134#define DtoM(type) (memtype[type])
135
136/*
137 * Names of malloc types.
138 */
139#define TYPENAME(type) \
140 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???")
984263bc
MD
141/*
142 * End system adaptaion definitions.
143 */
144
145/*
146 * Internal function prototypes.
147 */
f719c866
DR
148static void softdep_error(char *, int);
149static void drain_output(struct vnode *, int);
150static int getdirtybuf(struct buf **, int);
151static void clear_remove(struct thread *);
152static void clear_inodedeps(struct thread *);
153static int flush_pagedep_deps(struct vnode *, struct mount *,
a6ee311a 154 struct diraddhd *);
f719c866
DR
155static int flush_inodedep_deps(struct fs *, ino_t);
156static int handle_written_filepage(struct pagedep *, struct buf *);
157static void diradd_inode_written(struct diradd *, struct inodedep *);
158static int handle_written_inodeblock(struct inodedep *, struct buf *);
159static void handle_allocdirect_partdone(struct allocdirect *);
160static void handle_allocindir_partdone(struct allocindir *);
161static void initiate_write_filepage(struct pagedep *, struct buf *);
162static void handle_written_mkdir(struct mkdir *, int);
163static void initiate_write_inodeblock(struct inodedep *, struct buf *);
164static void handle_workitem_freefile(struct freefile *);
165static void handle_workitem_remove(struct dirrem *);
166static struct dirrem *newdirrem(struct buf *, struct inode *,
a6ee311a 167 struct inode *, int, struct dirrem **);
f719c866
DR
168static void free_diradd(struct diradd *);
169static void free_allocindir(struct allocindir *, struct inodedep *);
a6ee311a
RG
170static int indir_trunc (struct inode *, ufs_daddr_t, int, ufs_lbn_t,
171 long *);
f719c866
DR
172static void deallocate_dependencies(struct buf *, struct inodedep *);
173static void free_allocdirect(struct allocdirectlst *,
a6ee311a 174 struct allocdirect *, int);
f719c866
DR
175static int check_inode_unwritten(struct inodedep *);
176static int free_inodedep(struct inodedep *);
177static void handle_workitem_freeblocks(struct freeblks *);
178static void merge_inode_lists(struct inodedep *);
179static void setup_allocindir_phase2(struct buf *, struct inode *,
a6ee311a 180 struct allocindir *);
f719c866 181static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t,
a6ee311a 182 ufs_daddr_t);
f719c866
DR
183static void handle_workitem_freefrag(struct freefrag *);
184static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long);
185static void allocdirect_merge(struct allocdirectlst *,
a6ee311a 186 struct allocdirect *, struct allocdirect *);
f719c866
DR
187static struct bmsafemap *bmsafemap_lookup(struct buf *);
188static int newblk_lookup(struct fs *, ufs_daddr_t, int,
a6ee311a 189 struct newblk **);
f719c866
DR
190static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **);
191static int pagedep_lookup(struct inode *, ufs_lbn_t, int,
a6ee311a 192 struct pagedep **);
f719c866
DR
193static void pause_timer(void *);
194static int request_cleanup(int, int);
195static int process_worklist_item(struct mount *, int);
196static void add_to_worklist(struct worklist *);
984263bc
MD
197
198/*
199 * Exported softdep operations.
200 */
f719c866
DR
201static void softdep_disk_io_initiation(struct buf *);
202static void softdep_disk_write_complete(struct buf *);
203static void softdep_deallocate_dependencies(struct buf *);
204static int softdep_fsync(struct vnode *);
205static int softdep_process_worklist(struct mount *);
206static void softdep_move_dependencies(struct buf *, struct buf *);
207static int softdep_count_dependencies(struct buf *bp, int);
984263bc 208
bc50d880 209static struct bio_ops softdep_bioops = {
984263bc
MD
210 softdep_disk_io_initiation, /* io_start */
211 softdep_disk_write_complete, /* io_complete */
212 softdep_deallocate_dependencies, /* io_deallocate */
213 softdep_fsync, /* io_fsync */
214 softdep_process_worklist, /* io_sync */
215 softdep_move_dependencies, /* io_movedeps */
216 softdep_count_dependencies, /* io_countdeps */
217};
218
219/*
220 * Locking primitives.
221 *
222 * For a uniprocessor, all we need to do is protect against disk
223 * interrupts. For a multiprocessor, this lock would have to be
224 * a mutex. A single mutex is used throughout this file, though
225 * finer grain locking could be used if contention warranted it.
226 *
227 * For a multiprocessor, the sleep call would accept a lock and
228 * release it after the sleep processing was complete. In a uniprocessor
229 * implementation there is no such interlock, so we simple mark
230 * the places where it needs to be done with the `interlocked' form
231 * of the lock calls. Since the uniprocessor sleep already interlocks
232 * the spl, there is nothing that really needs to be done.
233 */
234#ifndef /* NOT */ DEBUG
235static struct lockit {
984263bc 236} lk = { 0 };
02d8a449
MD
237#define ACQUIRE_LOCK(lk) crit_enter_id("softupdates");
238#define FREE_LOCK(lk) crit_exit_id("softupdates");
984263bc
MD
239
240#else /* DEBUG */
dadab5e9
MD
241#define NOHOLDER ((struct thread *)-1)
242#define SPECIAL_FLAG ((struct thread *)-2)
984263bc
MD
243static struct lockit {
244 int lkt_spl;
dadab5e9
MD
245 struct thread *lkt_held;
246} lk = { 0, NOHOLDER };
984263bc
MD
247static int lockcnt;
248
f719c866
DR
249static void acquire_lock(struct lockit *);
250static void free_lock(struct lockit *);
251void softdep_panic(char *);
984263bc
MD
252
253#define ACQUIRE_LOCK(lk) acquire_lock(lk)
254#define FREE_LOCK(lk) free_lock(lk)
255
256static void
257acquire_lock(lk)
258 struct lockit *lk;
259{
dadab5e9 260 thread_t holder;
984263bc 261
dadab5e9 262 if (lk->lkt_held != NOHOLDER) {
984263bc
MD
263 holder = lk->lkt_held;
264 FREE_LOCK(lk);
dadab5e9 265 if (holder == curthread)
984263bc
MD
266 panic("softdep_lock: locking against myself");
267 else
dadab5e9 268 panic("softdep_lock: lock held by %p", holder);
984263bc 269 }
02d8a449 270 crit_enter_id("softupdates");
dadab5e9 271 lk->lkt_held = curthread;
984263bc
MD
272 lockcnt++;
273}
274
275static void
276free_lock(lk)
277 struct lockit *lk;
278{
279
dadab5e9 280 if (lk->lkt_held == NOHOLDER)
984263bc 281 panic("softdep_unlock: lock not held");
dadab5e9 282 lk->lkt_held = NOHOLDER;
02d8a449 283 crit_exit_id("softupdates");
984263bc
MD
284}
285
286/*
287 * Function to release soft updates lock and panic.
288 */
289void
290softdep_panic(msg)
291 char *msg;
292{
293
dadab5e9 294 if (lk.lkt_held != NOHOLDER)
984263bc
MD
295 FREE_LOCK(&lk);
296 panic(msg);
297}
298#endif /* DEBUG */
299
f719c866 300static int interlocked_sleep(struct lockit *, int, void *, int,
a6ee311a 301 const char *, int);
984263bc
MD
302
303/*
304 * When going to sleep, we must save our SPL so that it does
305 * not get lost if some other process uses the lock while we
306 * are sleeping. We restore it after we have slept. This routine
307 * wraps the interlocking with functions that sleep. The list
308 * below enumerates the available set of operations.
309 */
310#define UNKNOWN 0
311#define SLEEP 1
312#define LOCKBUF 2
313
314static int
315interlocked_sleep(lk, op, ident, flags, wmesg, timo)
316 struct lockit *lk;
317 int op;
318 void *ident;
319 int flags;
320 const char *wmesg;
321 int timo;
322{
dadab5e9 323 thread_t holder;
984263bc
MD
324 int s, retval;
325
326 s = lk->lkt_spl;
327# ifdef DEBUG
dadab5e9 328 if (lk->lkt_held == NOHOLDER)
984263bc 329 panic("interlocked_sleep: lock not held");
dadab5e9 330 lk->lkt_held = NOHOLDER;
984263bc
MD
331# endif /* DEBUG */
332 switch (op) {
333 case SLEEP:
334 retval = tsleep(ident, flags, wmesg, timo);
335 break;
336 case LOCKBUF:
337 retval = BUF_LOCK((struct buf *)ident, flags);
338 break;
339 default:
340 panic("interlocked_sleep: unknown operation");
341 }
342# ifdef DEBUG
dadab5e9 343 if (lk->lkt_held != NOHOLDER) {
984263bc
MD
344 holder = lk->lkt_held;
345 FREE_LOCK(lk);
dadab5e9 346 if (holder == curthread)
984263bc
MD
347 panic("interlocked_sleep: locking against self");
348 else
dadab5e9 349 panic("interlocked_sleep: lock held by %p", holder);
984263bc 350 }
dadab5e9 351 lk->lkt_held = curthread;
984263bc
MD
352 lockcnt++;
353# endif /* DEBUG */
354 lk->lkt_spl = s;
355 return (retval);
356}
357
358/*
359 * Place holder for real semaphores.
360 */
361struct sema {
362 int value;
dadab5e9 363 thread_t holder;
984263bc
MD
364 char *name;
365 int prio;
366 int timo;
367};
f719c866
DR
368static void sema_init(struct sema *, char *, int, int);
369static int sema_get(struct sema *, struct lockit *);
370static void sema_release(struct sema *);
984263bc
MD
371
372static void
373sema_init(semap, name, prio, timo)
374 struct sema *semap;
375 char *name;
376 int prio, timo;
377{
378
dadab5e9 379 semap->holder = NOHOLDER;
984263bc
MD
380 semap->value = 0;
381 semap->name = name;
382 semap->prio = prio;
383 semap->timo = timo;
384}
385
386static int
387sema_get(semap, interlock)
388 struct sema *semap;
389 struct lockit *interlock;
390{
391
392 if (semap->value++ > 0) {
393 if (interlock != NULL) {
394 interlocked_sleep(interlock, SLEEP, (caddr_t)semap,
395 semap->prio, semap->name, semap->timo);
396 FREE_LOCK(interlock);
397 } else {
398 tsleep((caddr_t)semap, semap->prio, semap->name,
399 semap->timo);
400 }
401 return (0);
402 }
dadab5e9 403 semap->holder = curthread;
984263bc
MD
404 if (interlock != NULL)
405 FREE_LOCK(interlock);
406 return (1);
407}
408
409static void
410sema_release(semap)
411 struct sema *semap;
412{
413
dadab5e9
MD
414 if (semap->value <= 0 || semap->holder != curthread) {
415 if (lk.lkt_held != NOHOLDER)
984263bc
MD
416 FREE_LOCK(&lk);
417 panic("sema_release: not held");
418 }
419 if (--semap->value > 0) {
420 semap->value = 0;
421 wakeup(semap);
422 }
dadab5e9 423 semap->holder = NOHOLDER;
984263bc
MD
424}
425
426/*
427 * Worklist queue management.
428 * These routines require that the lock be held.
429 */
430#ifndef /* NOT */ DEBUG
431#define WORKLIST_INSERT(head, item) do { \
432 (item)->wk_state |= ONWORKLIST; \
433 LIST_INSERT_HEAD(head, item, wk_list); \
434} while (0)
435#define WORKLIST_REMOVE(item) do { \
436 (item)->wk_state &= ~ONWORKLIST; \
437 LIST_REMOVE(item, wk_list); \
438} while (0)
439#define WORKITEM_FREE(item, type) FREE(item, DtoM(type))
440
441#else /* DEBUG */
f719c866
DR
442static void worklist_insert(struct workhead *, struct worklist *);
443static void worklist_remove(struct worklist *);
444static void workitem_free(struct worklist *, int);
984263bc
MD
445
446#define WORKLIST_INSERT(head, item) worklist_insert(head, item)
447#define WORKLIST_REMOVE(item) worklist_remove(item)
448#define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type)
449
450static void
451worklist_insert(head, item)
452 struct workhead *head;
453 struct worklist *item;
454{
455
dadab5e9 456 if (lk.lkt_held == NOHOLDER)
984263bc
MD
457 panic("worklist_insert: lock not held");
458 if (item->wk_state & ONWORKLIST) {
459 FREE_LOCK(&lk);
460 panic("worklist_insert: already on list");
461 }
462 item->wk_state |= ONWORKLIST;
463 LIST_INSERT_HEAD(head, item, wk_list);
464}
465
466static void
467worklist_remove(item)
468 struct worklist *item;
469{
470
dadab5e9 471 if (lk.lkt_held == NOHOLDER)
984263bc
MD
472 panic("worklist_remove: lock not held");
473 if ((item->wk_state & ONWORKLIST) == 0) {
474 FREE_LOCK(&lk);
475 panic("worklist_remove: not on list");
476 }
477 item->wk_state &= ~ONWORKLIST;
478 LIST_REMOVE(item, wk_list);
479}
480
481static void
482workitem_free(item, type)
483 struct worklist *item;
484 int type;
485{
486
487 if (item->wk_state & ONWORKLIST) {
dadab5e9 488 if (lk.lkt_held != NOHOLDER)
984263bc
MD
489 FREE_LOCK(&lk);
490 panic("workitem_free: still on list");
491 }
492 if (item->wk_type != type) {
dadab5e9 493 if (lk.lkt_held != NOHOLDER)
984263bc
MD
494 FREE_LOCK(&lk);
495 panic("workitem_free: type mismatch");
496 }
497 FREE(item, DtoM(type));
498}
499#endif /* DEBUG */
500
501/*
502 * Workitem queue management
503 */
504static struct workhead softdep_workitem_pending;
505static int num_on_worklist; /* number of worklist items to be processed */
506static int softdep_worklist_busy; /* 1 => trying to do unmount */
507static int softdep_worklist_req; /* serialized waiters */
508static int max_softdeps; /* maximum number of structs before slowdown */
509static int tickdelay = 2; /* number of ticks to pause during slowdown */
510static int *stat_countp; /* statistic to count in proc_waiting timeout */
511static int proc_waiting; /* tracks whether we have a timeout posted */
47979091 512static struct callout handle; /* handle on posted proc_waiting timeout */
dadab5e9 513static struct thread *filesys_syncer; /* proc of filesystem syncer process */
984263bc
MD
514static int req_clear_inodedeps; /* syncer process flush some inodedeps */
515#define FLUSH_INODES 1
516static int req_clear_remove; /* syncer process flush some freeblks */
517#define FLUSH_REMOVE 2
518/*
519 * runtime statistics
520 */
521static int stat_worklist_push; /* number of worklist cleanups */
522static int stat_blk_limit_push; /* number of times block limit neared */
523static int stat_ino_limit_push; /* number of times inode limit neared */
524static int stat_blk_limit_hit; /* number of times block slowdown imposed */
525static int stat_ino_limit_hit; /* number of times inode slowdown imposed */
526static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */
527static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */
528static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */
529static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
530static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */
531#ifdef DEBUG
532#include <vm/vm.h>
533#include <sys/sysctl.h>
534SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, "");
535SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, "");
536SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,"");
537SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,"");
538SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,"");
539SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, "");
540SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, "");
541SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, "");
542SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, "");
543SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, "");
544SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, "");
545SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, "");
546#endif /* DEBUG */
547
548/*
549 * Add an item to the end of the work queue.
550 * This routine requires that the lock be held.
551 * This is the only routine that adds items to the list.
552 * The following routine is the only one that removes items
553 * and does so in order from first to last.
554 */
555static void
556add_to_worklist(wk)
557 struct worklist *wk;
558{
559 static struct worklist *worklist_tail;
560
561 if (wk->wk_state & ONWORKLIST) {
dadab5e9 562 if (lk.lkt_held != NOHOLDER)
984263bc
MD
563 FREE_LOCK(&lk);
564 panic("add_to_worklist: already on list");
565 }
566 wk->wk_state |= ONWORKLIST;
567 if (LIST_FIRST(&softdep_workitem_pending) == NULL)
568 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list);
569 else
570 LIST_INSERT_AFTER(worklist_tail, wk, wk_list);
571 worklist_tail = wk;
572 num_on_worklist += 1;
573}
574
575/*
576 * Process that runs once per second to handle items in the background queue.
577 *
578 * Note that we ensure that everything is done in the order in which they
579 * appear in the queue. The code below depends on this property to ensure
580 * that blocks of a file are freed before the inode itself is freed. This
581 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
582 * until all the old ones have been purged from the dependency lists.
583 */
584static int
585softdep_process_worklist(matchmnt)
586 struct mount *matchmnt;
587{
dadab5e9 588 thread_t td = curthread;
984263bc
MD
589 int matchcnt, loopcount;
590 long starttime;
591
592 /*
593 * Record the process identifier of our caller so that we can give
594 * this process preferential treatment in request_cleanup below.
595 */
dadab5e9 596 filesys_syncer = td;
984263bc
MD
597 matchcnt = 0;
598
599 /*
600 * There is no danger of having multiple processes run this
601 * code, but we have to single-thread it when softdep_flushfiles()
602 * is in operation to get an accurate count of the number of items
603 * related to its mount point that are in the list.
604 */
605 if (matchmnt == NULL) {
606 if (softdep_worklist_busy < 0)
607 return(-1);
608 softdep_worklist_busy += 1;
609 }
610
611 /*
612 * If requested, try removing inode or removal dependencies.
613 */
614 if (req_clear_inodedeps) {
dadab5e9 615 clear_inodedeps(td);
984263bc
MD
616 req_clear_inodedeps -= 1;
617 wakeup_one(&proc_waiting);
618 }
619 if (req_clear_remove) {
dadab5e9 620 clear_remove(td);
984263bc
MD
621 req_clear_remove -= 1;
622 wakeup_one(&proc_waiting);
623 }
624 loopcount = 1;
625 starttime = time_second;
626 while (num_on_worklist > 0) {
627 matchcnt += process_worklist_item(matchmnt, 0);
628
629 /*
630 * If a umount operation wants to run the worklist
631 * accurately, abort.
632 */
633 if (softdep_worklist_req && matchmnt == NULL) {
634 matchcnt = -1;
635 break;
636 }
637
638 /*
639 * If requested, try removing inode or removal dependencies.
640 */
641 if (req_clear_inodedeps) {
dadab5e9 642 clear_inodedeps(td);
984263bc
MD
643 req_clear_inodedeps -= 1;
644 wakeup_one(&proc_waiting);
645 }
646 if (req_clear_remove) {
dadab5e9 647 clear_remove(td);
984263bc
MD
648 req_clear_remove -= 1;
649 wakeup_one(&proc_waiting);
650 }
651 /*
652 * We do not generally want to stop for buffer space, but if
653 * we are really being a buffer hog, we will stop and wait.
654 */
655 if (loopcount++ % 128 == 0)
656 bwillwrite();
657 /*
658 * Never allow processing to run for more than one
659 * second. Otherwise the other syncer tasks may get
660 * excessively backlogged.
661 */
662 if (starttime != time_second && matchmnt == NULL) {
663 matchcnt = -1;
664 break;
665 }
666 }
667 if (matchmnt == NULL) {
668 --softdep_worklist_busy;
669 if (softdep_worklist_req && softdep_worklist_busy == 0)
670 wakeup(&softdep_worklist_req);
671 }
672 return (matchcnt);
673}
674
675/*
676 * Process one item on the worklist.
677 */
678static int
679process_worklist_item(matchmnt, flags)
680 struct mount *matchmnt;
681 int flags;
682{
683 struct worklist *wk;
684 struct dirrem *dirrem;
685 struct fs *matchfs;
686 struct vnode *vp;
687 int matchcnt = 0;
688
689 matchfs = NULL;
690 if (matchmnt != NULL)
691 matchfs = VFSTOUFS(matchmnt)->um_fs;
692 ACQUIRE_LOCK(&lk);
693 /*
694 * Normally we just process each item on the worklist in order.
695 * However, if we are in a situation where we cannot lock any
696 * inodes, we have to skip over any dirrem requests whose
697 * vnodes are resident and locked.
698 */
699 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) {
700 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM)
701 break;
702 dirrem = WK_DIRREM(wk);
703 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev,
704 dirrem->dm_oldinum);
dadab5e9 705 if (vp == NULL || !VOP_ISLOCKED(vp, curthread))
984263bc
MD
706 break;
707 }
708 if (wk == 0) {
709 FREE_LOCK(&lk);
710 return (0);
711 }
712 WORKLIST_REMOVE(wk);
713 num_on_worklist -= 1;
714 FREE_LOCK(&lk);
715 switch (wk->wk_type) {
716
717 case D_DIRREM:
718 /* removal of a directory entry */
719 if (WK_DIRREM(wk)->dm_mnt == matchmnt)
720 matchcnt += 1;
721 handle_workitem_remove(WK_DIRREM(wk));
722 break;
723
724 case D_FREEBLKS:
725 /* releasing blocks and/or fragments from a file */
726 if (WK_FREEBLKS(wk)->fb_fs == matchfs)
727 matchcnt += 1;
728 handle_workitem_freeblocks(WK_FREEBLKS(wk));
729 break;
730
731 case D_FREEFRAG:
732 /* releasing a fragment when replaced as a file grows */
733 if (WK_FREEFRAG(wk)->ff_fs == matchfs)
734 matchcnt += 1;
735 handle_workitem_freefrag(WK_FREEFRAG(wk));
736 break;
737
738 case D_FREEFILE:
739 /* releasing an inode when its link count drops to 0 */
740 if (WK_FREEFILE(wk)->fx_fs == matchfs)
741 matchcnt += 1;
742 handle_workitem_freefile(WK_FREEFILE(wk));
743 break;
744
745 default:
746 panic("%s_process_worklist: Unknown type %s",
747 "softdep", TYPENAME(wk->wk_type));
748 /* NOTREACHED */
749 }
750 return (matchcnt);
751}
752
753/*
754 * Move dependencies from one buffer to another.
755 */
756static void
757softdep_move_dependencies(oldbp, newbp)
758 struct buf *oldbp;
759 struct buf *newbp;
760{
761 struct worklist *wk, *wktail;
762
763 if (LIST_FIRST(&newbp->b_dep) != NULL)
764 panic("softdep_move_dependencies: need merge code");
765 wktail = 0;
766 ACQUIRE_LOCK(&lk);
767 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
768 LIST_REMOVE(wk, wk_list);
769 if (wktail == 0)
770 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
771 else
772 LIST_INSERT_AFTER(wktail, wk, wk_list);
773 wktail = wk;
774 }
775 FREE_LOCK(&lk);
776}
777
778/*
779 * Purge the work list of all items associated with a particular mount point.
780 */
781int
dadab5e9 782softdep_flushfiles(struct mount *oldmnt, int flags, struct thread *td)
984263bc
MD
783{
784 struct vnode *devvp;
785 int error, loopcnt;
786
787 /*
788 * Await our turn to clear out the queue, then serialize access.
789 */
790 while (softdep_worklist_busy != 0) {
791 softdep_worklist_req += 1;
377d4740 792 tsleep(&softdep_worklist_req, 0, "softflush", 0);
984263bc
MD
793 softdep_worklist_req -= 1;
794 }
795 softdep_worklist_busy = -1;
796
dadab5e9 797 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) {
984263bc
MD
798 softdep_worklist_busy = 0;
799 if (softdep_worklist_req)
800 wakeup(&softdep_worklist_req);
801 return (error);
802 }
803 /*
804 * Alternately flush the block device associated with the mount
805 * point and process any dependencies that the flushing
806 * creates. In theory, this loop can happen at most twice,
807 * but we give it a few extra just to be sure.
808 */
809 devvp = VFSTOUFS(oldmnt)->um_devvp;
810 for (loopcnt = 10; loopcnt > 0; ) {
811 if (softdep_process_worklist(oldmnt) == 0) {
812 loopcnt--;
813 /*
814 * Do another flush in case any vnodes were brought in
815 * as part of the cleanup operations.
816 */
dadab5e9 817 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0)
984263bc
MD
818 break;
819 /*
820 * If we still found nothing to do, we are really done.
821 */
822 if (softdep_process_worklist(oldmnt) == 0)
823 break;
824 }
5fd012e0 825 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
3b568787 826 error = VOP_FSYNC(devvp, MNT_WAIT, td);
5fd012e0 827 VOP_UNLOCK(devvp, 0, td);
984263bc
MD
828 if (error)
829 break;
830 }
831 softdep_worklist_busy = 0;
832 if (softdep_worklist_req)
833 wakeup(&softdep_worklist_req);
834
835 /*
836 * If we are unmounting then it is an error to fail. If we
837 * are simply trying to downgrade to read-only, then filesystem
838 * activity can keep us busy forever, so we just fail with EBUSY.
839 */
840 if (loopcnt == 0) {
841 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
842 panic("softdep_flushfiles: looping");
843 error = EBUSY;
844 }
845 return (error);
846}
847
848/*
849 * Structure hashing.
850 *
851 * There are three types of structures that can be looked up:
852 * 1) pagedep structures identified by mount point, inode number,
853 * and logical block.
854 * 2) inodedep structures identified by mount point and inode number.
855 * 3) newblk structures identified by mount point and
856 * physical block number.
857 *
858 * The "pagedep" and "inodedep" dependency structures are hashed
859 * separately from the file blocks and inodes to which they correspond.
860 * This separation helps when the in-memory copy of an inode or
861 * file block must be replaced. It also obviates the need to access
862 * an inode or file page when simply updating (or de-allocating)
863 * dependency structures. Lookup of newblk structures is needed to
864 * find newly allocated blocks when trying to associate them with
865 * their allocdirect or allocindir structure.
866 *
867 * The lookup routines optionally create and hash a new instance when
868 * an existing entry is not found.
869 */
870#define DEPALLOC 0x0001 /* allocate structure if lookup fails */
871#define NODELAY 0x0002 /* cannot do background work */
872
873/*
874 * Structures and routines associated with pagedep caching.
875 */
876LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
877u_long pagedep_hash; /* size of hash table - 1 */
878#define PAGEDEP_HASH(mp, inum, lbn) \
879 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
880 pagedep_hash])
881static struct sema pagedep_in_progress;
882
883/*
884 * Look up a pagedep. Return 1 if found, 0 if not found.
885 * If not found, allocate if DEPALLOC flag is passed.
886 * Found or allocated entry is returned in pagedeppp.
887 * This routine must be called with splbio interrupts blocked.
888 */
889static int
890pagedep_lookup(ip, lbn, flags, pagedeppp)
891 struct inode *ip;
892 ufs_lbn_t lbn;
893 int flags;
894 struct pagedep **pagedeppp;
895{
896 struct pagedep *pagedep;
897 struct pagedep_hashhead *pagedephd;
898 struct mount *mp;
899 int i;
900
901#ifdef DEBUG
dadab5e9 902 if (lk.lkt_held == NOHOLDER)
984263bc
MD
903 panic("pagedep_lookup: lock not held");
904#endif
905 mp = ITOV(ip)->v_mount;
906 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn);
907top:
908 LIST_FOREACH(pagedep, pagedephd, pd_hash)
909 if (ip->i_number == pagedep->pd_ino &&
910 lbn == pagedep->pd_lbn &&
911 mp == pagedep->pd_mnt)
912 break;
913 if (pagedep) {
914 *pagedeppp = pagedep;
915 return (1);
916 }
917 if ((flags & DEPALLOC) == 0) {
918 *pagedeppp = NULL;
919 return (0);
920 }
921 if (sema_get(&pagedep_in_progress, &lk) == 0) {
922 ACQUIRE_LOCK(&lk);
923 goto top;
924 }
925 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP,
926 M_SOFTDEP_FLAGS);
927 bzero(pagedep, sizeof(struct pagedep));
928 pagedep->pd_list.wk_type = D_PAGEDEP;
929 pagedep->pd_mnt = mp;
930 pagedep->pd_ino = ip->i_number;
931 pagedep->pd_lbn = lbn;
932 LIST_INIT(&pagedep->pd_dirremhd);
933 LIST_INIT(&pagedep->pd_pendinghd);
934 for (i = 0; i < DAHASHSZ; i++)
935 LIST_INIT(&pagedep->pd_diraddhd[i]);
936 ACQUIRE_LOCK(&lk);
937 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
938 sema_release(&pagedep_in_progress);
939 *pagedeppp = pagedep;
940 return (0);
941}
942
943/*
944 * Structures and routines associated with inodedep caching.
945 */
946LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
947static u_long inodedep_hash; /* size of hash table - 1 */
948static long num_inodedep; /* number of inodedep allocated */
949#define INODEDEP_HASH(fs, inum) \
950 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
951static struct sema inodedep_in_progress;
952
953/*
954 * Look up a inodedep. Return 1 if found, 0 if not found.
955 * If not found, allocate if DEPALLOC flag is passed.
956 * Found or allocated entry is returned in inodedeppp.
957 * This routine must be called with splbio interrupts blocked.
958 */
959static int
960inodedep_lookup(fs, inum, flags, inodedeppp)
961 struct fs *fs;
962 ino_t inum;
963 int flags;
964 struct inodedep **inodedeppp;
965{
966 struct inodedep *inodedep;
967 struct inodedep_hashhead *inodedephd;
968 int firsttry;
969
970#ifdef DEBUG
dadab5e9 971 if (lk.lkt_held == NOHOLDER)
984263bc
MD
972 panic("inodedep_lookup: lock not held");
973#endif
974 firsttry = 1;
975 inodedephd = INODEDEP_HASH(fs, inum);
976top:
977 LIST_FOREACH(inodedep, inodedephd, id_hash)
978 if (inum == inodedep->id_ino && fs == inodedep->id_fs)
979 break;
980 if (inodedep) {
981 *inodedeppp = inodedep;
982 return (1);
983 }
984 if ((flags & DEPALLOC) == 0) {
985 *inodedeppp = NULL;
986 return (0);
987 }
988 /*
989 * If we are over our limit, try to improve the situation.
990 */
991 if (num_inodedep > max_softdeps && firsttry &&
992 speedup_syncer() == 0 && (flags & NODELAY) == 0 &&
993 request_cleanup(FLUSH_INODES, 1)) {
994 firsttry = 0;
995 goto top;
996 }
997 if (sema_get(&inodedep_in_progress, &lk) == 0) {
998 ACQUIRE_LOCK(&lk);
999 goto top;
1000 }
1001 num_inodedep += 1;
1002 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep),
1003 M_INODEDEP, M_SOFTDEP_FLAGS);
1004 inodedep->id_list.wk_type = D_INODEDEP;
1005 inodedep->id_fs = fs;
1006 inodedep->id_ino = inum;
1007 inodedep->id_state = ALLCOMPLETE;
1008 inodedep->id_nlinkdelta = 0;
1009 inodedep->id_savedino = NULL;
1010 inodedep->id_savedsize = -1;
1011 inodedep->id_buf = NULL;
1012 LIST_INIT(&inodedep->id_pendinghd);
1013 LIST_INIT(&inodedep->id_inowait);
1014 LIST_INIT(&inodedep->id_bufwait);
1015 TAILQ_INIT(&inodedep->id_inoupdt);
1016 TAILQ_INIT(&inodedep->id_newinoupdt);
1017 ACQUIRE_LOCK(&lk);
1018 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
1019 sema_release(&inodedep_in_progress);
1020 *inodedeppp = inodedep;
1021 return (0);
1022}
1023
1024/*
1025 * Structures and routines associated with newblk caching.
1026 */
1027LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
1028u_long newblk_hash; /* size of hash table - 1 */
1029#define NEWBLK_HASH(fs, inum) \
1030 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
1031static struct sema newblk_in_progress;
1032
1033/*
1034 * Look up a newblk. Return 1 if found, 0 if not found.
1035 * If not found, allocate if DEPALLOC flag is passed.
1036 * Found or allocated entry is returned in newblkpp.
1037 */
1038static int
1039newblk_lookup(fs, newblkno, flags, newblkpp)
1040 struct fs *fs;
1041 ufs_daddr_t newblkno;
1042 int flags;
1043 struct newblk **newblkpp;
1044{
1045 struct newblk *newblk;
1046 struct newblk_hashhead *newblkhd;
1047
1048 newblkhd = NEWBLK_HASH(fs, newblkno);
1049top:
1050 LIST_FOREACH(newblk, newblkhd, nb_hash)
1051 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs)
1052 break;
1053 if (newblk) {
1054 *newblkpp = newblk;
1055 return (1);
1056 }
1057 if ((flags & DEPALLOC) == 0) {
1058 *newblkpp = NULL;
1059 return (0);
1060 }
1061 if (sema_get(&newblk_in_progress, 0) == 0)
1062 goto top;
1063 MALLOC(newblk, struct newblk *, sizeof(struct newblk),
1064 M_NEWBLK, M_SOFTDEP_FLAGS);
1065 newblk->nb_state = 0;
1066 newblk->nb_fs = fs;
1067 newblk->nb_newblkno = newblkno;
1068 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
1069 sema_release(&newblk_in_progress);
1070 *newblkpp = newblk;
1071 return (0);
1072}
1073
1074/*
1075 * Executed during filesystem system initialization before
f719c866 1076 * mounting any filesystems.
984263bc
MD
1077 */
1078void
1079softdep_initialize()
1080{
47979091 1081 callout_init(&handle);
bc50d880 1082 bioops = softdep_bioops; /* XXX hack */
984263bc
MD
1083
1084 LIST_INIT(&mkdirlisthd);
1085 LIST_INIT(&softdep_workitem_pending);
1086 max_softdeps = min(desiredvnodes * 8,
1087 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep)));
1088 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
1089 &pagedep_hash);
377d4740 1090 sema_init(&pagedep_in_progress, "pagedep", 0, 0);
984263bc 1091 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
377d4740 1092 sema_init(&inodedep_in_progress, "inodedep", 0, 0);
984263bc 1093 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash);
377d4740 1094 sema_init(&newblk_in_progress, "newblk", 0, 0);
984263bc
MD
1095}
1096
1097/*
1098 * Called at mount time to notify the dependency code that a
1099 * filesystem wishes to use it.
1100 */
1101int
3b568787 1102softdep_mount(devvp, mp, fs)
984263bc
MD
1103 struct vnode *devvp;
1104 struct mount *mp;
1105 struct fs *fs;
984263bc
MD
1106{
1107 struct csum cstotal;
1108 struct cg *cgp;
1109 struct buf *bp;
1110 int error, cyl;
1111
1112 mp->mnt_flag &= ~MNT_ASYNC;
1113 mp->mnt_flag |= MNT_SOFTDEP;
1114 /*
1115 * When doing soft updates, the counters in the
1116 * superblock may have gotten out of sync, so we have
1117 * to scan the cylinder groups and recalculate them.
1118 */
1119 if (fs->fs_clean != 0)
1120 return (0);
1121 bzero(&cstotal, sizeof cstotal);
1122 for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
1123 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
3b568787 1124 fs->fs_cgsize, &bp)) != 0) {
984263bc
MD
1125 brelse(bp);
1126 return (error);
1127 }
1128 cgp = (struct cg *)bp->b_data;
1129 cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
1130 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
1131 cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
1132 cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
1133 fs->fs_cs(fs, cyl) = cgp->cg_cs;
1134 brelse(bp);
1135 }
1136#ifdef DEBUG
1137 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
1138 printf("ffs_mountfs: superblock updated for soft updates\n");
1139#endif
1140 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
1141 return (0);
1142}
1143
1144/*
1145 * Protecting the freemaps (or bitmaps).
1146 *
f719c866 1147 * To eliminate the need to execute fsck before mounting a filesystem
984263bc
MD
1148 * after a power failure, one must (conservatively) guarantee that the
1149 * on-disk copy of the bitmaps never indicate that a live inode or block is
1150 * free. So, when a block or inode is allocated, the bitmap should be
1151 * updated (on disk) before any new pointers. When a block or inode is
1152 * freed, the bitmap should not be updated until all pointers have been
1153 * reset. The latter dependency is handled by the delayed de-allocation
1154 * approach described below for block and inode de-allocation. The former
1155 * dependency is handled by calling the following procedure when a block or
1156 * inode is allocated. When an inode is allocated an "inodedep" is created
1157 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
1158 * Each "inodedep" is also inserted into the hash indexing structure so
1159 * that any additional link additions can be made dependent on the inode
1160 * allocation.
1161 *
f719c866 1162 * The ufs filesystem maintains a number of free block counts (e.g., per
984263bc
MD
1163 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
1164 * in addition to the bitmaps. These counts are used to improve efficiency
1165 * during allocation and therefore must be consistent with the bitmaps.
1166 * There is no convenient way to guarantee post-crash consistency of these
1167 * counts with simple update ordering, for two main reasons: (1) The counts
1168 * and bitmaps for a single cylinder group block are not in the same disk
1169 * sector. If a disk write is interrupted (e.g., by power failure), one may
1170 * be written and the other not. (2) Some of the counts are located in the
1171 * superblock rather than the cylinder group block. So, we focus our soft
1172 * updates implementation on protecting the bitmaps. When mounting a
1173 * filesystem, we recompute the auxiliary counts from the bitmaps.
1174 */
1175
1176/*
1177 * Called just after updating the cylinder group block to allocate an inode.
1178 */
1179void
1180softdep_setup_inomapdep(bp, ip, newinum)
1181 struct buf *bp; /* buffer for cylgroup block with inode map */
1182 struct inode *ip; /* inode related to allocation */
1183 ino_t newinum; /* new inode number being allocated */
1184{
1185 struct inodedep *inodedep;
1186 struct bmsafemap *bmsafemap;
1187
1188 /*
1189 * Create a dependency for the newly allocated inode.
1190 * Panic if it already exists as something is seriously wrong.
1191 * Otherwise add it to the dependency list for the buffer holding
1192 * the cylinder group map from which it was allocated.
1193 */
1194 ACQUIRE_LOCK(&lk);
1195 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) {
1196 FREE_LOCK(&lk);
1197 panic("softdep_setup_inomapdep: found inode");
1198 }
1199 inodedep->id_buf = bp;
1200 inodedep->id_state &= ~DEPCOMPLETE;
1201 bmsafemap = bmsafemap_lookup(bp);
1202 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
1203 FREE_LOCK(&lk);
1204}
1205
1206/*
1207 * Called just after updating the cylinder group block to
1208 * allocate block or fragment.
1209 */
1210void
1211softdep_setup_blkmapdep(bp, fs, newblkno)
1212 struct buf *bp; /* buffer for cylgroup block with block map */
1213 struct fs *fs; /* filesystem doing allocation */
1214 ufs_daddr_t newblkno; /* number of newly allocated block */
1215{
1216 struct newblk *newblk;
1217 struct bmsafemap *bmsafemap;
1218
1219 /*
1220 * Create a dependency for the newly allocated block.
1221 * Add it to the dependency list for the buffer holding
1222 * the cylinder group map from which it was allocated.
1223 */
1224 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0)
1225 panic("softdep_setup_blkmapdep: found block");
1226 ACQUIRE_LOCK(&lk);
1227 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp);
1228 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
1229 FREE_LOCK(&lk);
1230}
1231
1232/*
1233 * Find the bmsafemap associated with a cylinder group buffer.
1234 * If none exists, create one. The buffer must be locked when
1235 * this routine is called and this routine must be called with
1236 * splbio interrupts blocked.
1237 */
1238static struct bmsafemap *
1239bmsafemap_lookup(bp)
1240 struct buf *bp;
1241{
1242 struct bmsafemap *bmsafemap;
1243 struct worklist *wk;
1244
1245#ifdef DEBUG
dadab5e9 1246 if (lk.lkt_held == NOHOLDER)
984263bc
MD
1247 panic("bmsafemap_lookup: lock not held");
1248#endif
1249 LIST_FOREACH(wk, &bp->b_dep, wk_list)
1250 if (wk->wk_type == D_BMSAFEMAP)
1251 return (WK_BMSAFEMAP(wk));
1252 FREE_LOCK(&lk);
1253 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap),
1254 M_BMSAFEMAP, M_SOFTDEP_FLAGS);
1255 bmsafemap->sm_list.wk_type = D_BMSAFEMAP;
1256 bmsafemap->sm_list.wk_state = 0;
1257 bmsafemap->sm_buf = bp;
1258 LIST_INIT(&bmsafemap->sm_allocdirecthd);
1259 LIST_INIT(&bmsafemap->sm_allocindirhd);
1260 LIST_INIT(&bmsafemap->sm_inodedephd);
1261 LIST_INIT(&bmsafemap->sm_newblkhd);
1262 ACQUIRE_LOCK(&lk);
1263 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
1264 return (bmsafemap);
1265}
1266
1267/*
1268 * Direct block allocation dependencies.
1269 *
1270 * When a new block is allocated, the corresponding disk locations must be
1271 * initialized (with zeros or new data) before the on-disk inode points to
1272 * them. Also, the freemap from which the block was allocated must be
1273 * updated (on disk) before the inode's pointer. These two dependencies are
1274 * independent of each other and are needed for all file blocks and indirect
1275 * blocks that are pointed to directly by the inode. Just before the
1276 * "in-core" version of the inode is updated with a newly allocated block
1277 * number, a procedure (below) is called to setup allocation dependency
1278 * structures. These structures are removed when the corresponding
1279 * dependencies are satisfied or when the block allocation becomes obsolete
1280 * (i.e., the file is deleted, the block is de-allocated, or the block is a
1281 * fragment that gets upgraded). All of these cases are handled in
1282 * procedures described later.
1283 *
1284 * When a file extension causes a fragment to be upgraded, either to a larger
1285 * fragment or to a full block, the on-disk location may change (if the
1286 * previous fragment could not simply be extended). In this case, the old
1287 * fragment must be de-allocated, but not until after the inode's pointer has
1288 * been updated. In most cases, this is handled by later procedures, which
1289 * will construct a "freefrag" structure to be added to the workitem queue
1290 * when the inode update is complete (or obsolete). The main exception to
1291 * this is when an allocation occurs while a pending allocation dependency
1292 * (for the same block pointer) remains. This case is handled in the main
1293 * allocation dependency setup procedure by immediately freeing the
1294 * unreferenced fragments.
1295 */
1296void
1297softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
1298 struct inode *ip; /* inode to which block is being added */
1299 ufs_lbn_t lbn; /* block pointer within inode */
1300 ufs_daddr_t newblkno; /* disk block number being added */
1301 ufs_daddr_t oldblkno; /* previous block number, 0 unless frag */
1302 long newsize; /* size of new block */
1303 long oldsize; /* size of new block */
1304 struct buf *bp; /* bp for allocated block */
1305{
1306 struct allocdirect *adp, *oldadp;
1307 struct allocdirectlst *adphead;
1308 struct bmsafemap *bmsafemap;
1309 struct inodedep *inodedep;
1310 struct pagedep *pagedep;
1311 struct newblk *newblk;
1312
1313 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect),
1314 M_ALLOCDIRECT, M_SOFTDEP_FLAGS);
1315 bzero(adp, sizeof(struct allocdirect));
1316 adp->ad_list.wk_type = D_ALLOCDIRECT;
1317 adp->ad_lbn = lbn;
1318 adp->ad_newblkno = newblkno;
1319 adp->ad_oldblkno = oldblkno;
1320 adp->ad_newsize = newsize;
1321 adp->ad_oldsize = oldsize;
1322 adp->ad_state = ATTACHED;
1323 if (newblkno == oldblkno)
1324 adp->ad_freefrag = NULL;
1325 else
1326 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize);
1327
1328 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0)
1329 panic("softdep_setup_allocdirect: lost block");
1330
1331 ACQUIRE_LOCK(&lk);
1332 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep);
1333 adp->ad_inodedep = inodedep;
1334
1335 if (newblk->nb_state == DEPCOMPLETE) {
1336 adp->ad_state |= DEPCOMPLETE;
1337 adp->ad_buf = NULL;
1338 } else {
1339 bmsafemap = newblk->nb_bmsafemap;
1340 adp->ad_buf = bmsafemap->sm_buf;
1341 LIST_REMOVE(newblk, nb_deps);
1342 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps);
1343 }
1344 LIST_REMOVE(newblk, nb_hash);
1345 FREE(newblk, M_NEWBLK);
1346
1347 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list);
1348 if (lbn >= NDADDR) {
1349 /* allocating an indirect block */
1350 if (oldblkno != 0) {
1351 FREE_LOCK(&lk);
1352 panic("softdep_setup_allocdirect: non-zero indir");
1353 }
1354 } else {
1355 /*
1356 * Allocating a direct block.
1357 *
1358 * If we are allocating a directory block, then we must
1359 * allocate an associated pagedep to track additions and
1360 * deletions.
1361 */
1362 if ((ip->i_mode & IFMT) == IFDIR &&
1363 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
1364 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
1365 }
1366 /*
1367 * The list of allocdirects must be kept in sorted and ascending
1368 * order so that the rollback routines can quickly determine the
1369 * first uncommitted block (the size of the file stored on disk
1370 * ends at the end of the lowest committed fragment, or if there
1371 * are no fragments, at the end of the highest committed block).
1372 * Since files generally grow, the typical case is that the new
1373 * block is to be added at the end of the list. We speed this
1374 * special case by checking against the last allocdirect in the
1375 * list before laboriously traversing the list looking for the
1376 * insertion point.
1377 */
1378 adphead = &inodedep->id_newinoupdt;
1379 oldadp = TAILQ_LAST(adphead, allocdirectlst);
1380 if (oldadp == NULL || oldadp->ad_lbn <= lbn) {
1381 /* insert at end of list */
1382 TAILQ_INSERT_TAIL(adphead, adp, ad_next);
1383 if (oldadp != NULL && oldadp->ad_lbn == lbn)
1384 allocdirect_merge(adphead, adp, oldadp);
1385 FREE_LOCK(&lk);
1386 return;
1387 }
1388 TAILQ_FOREACH(oldadp, adphead, ad_next) {
1389 if (oldadp->ad_lbn >= lbn)
1390 break;
1391 }
1392 if (oldadp == NULL) {
1393 FREE_LOCK(&lk);
1394 panic("softdep_setup_allocdirect: lost entry");
1395 }
1396 /* insert in middle of list */
1397 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
1398 if (oldadp->ad_lbn == lbn)
1399 allocdirect_merge(adphead, adp, oldadp);
1400 FREE_LOCK(&lk);
1401}
1402
1403/*
1404 * Replace an old allocdirect dependency with a newer one.
1405 * This routine must be called with splbio interrupts blocked.
1406 */
1407static void
1408allocdirect_merge(adphead, newadp, oldadp)
1409 struct allocdirectlst *adphead; /* head of list holding allocdirects */
1410 struct allocdirect *newadp; /* allocdirect being added */
1411 struct allocdirect *oldadp; /* existing allocdirect being checked */
1412{
1413 struct freefrag *freefrag;
1414
1415#ifdef DEBUG
dadab5e9 1416 if (lk.lkt_held == NOHOLDER)
984263bc
MD
1417 panic("allocdirect_merge: lock not held");
1418#endif
1419 if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
1420 newadp->ad_oldsize != oldadp->ad_newsize ||
1421 newadp->ad_lbn >= NDADDR) {
1422 FREE_LOCK(&lk);
1423 panic("allocdirect_check: old %d != new %d || lbn %ld >= %d",
1424 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn,
1425 NDADDR);
1426 }
1427 newadp->ad_oldblkno = oldadp->ad_oldblkno;
1428 newadp->ad_oldsize = oldadp->ad_oldsize;
1429 /*
1430 * If the old dependency had a fragment to free or had never
1431 * previously had a block allocated, then the new dependency
1432 * can immediately post its freefrag and adopt the old freefrag.
1433 * This action is done by swapping the freefrag dependencies.
1434 * The new dependency gains the old one's freefrag, and the
1435 * old one gets the new one and then immediately puts it on
1436 * the worklist when it is freed by free_allocdirect. It is
1437 * not possible to do this swap when the old dependency had a
1438 * non-zero size but no previous fragment to free. This condition
1439 * arises when the new block is an extension of the old block.
1440 * Here, the first part of the fragment allocated to the new
1441 * dependency is part of the block currently claimed on disk by
1442 * the old dependency, so cannot legitimately be freed until the
1443 * conditions for the new dependency are fulfilled.
1444 */
1445 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
1446 freefrag = newadp->ad_freefrag;
1447 newadp->ad_freefrag = oldadp->ad_freefrag;
1448 oldadp->ad_freefrag = freefrag;
1449 }
1450 free_allocdirect(adphead, oldadp, 0);
1451}
1452
1453/*
1454 * Allocate a new freefrag structure if needed.
1455 */
1456static struct freefrag *
1457newfreefrag(ip, blkno, size)
1458 struct inode *ip;
1459 ufs_daddr_t blkno;
1460 long size;
1461{
1462 struct freefrag *freefrag;
1463 struct fs *fs;
1464
1465 if (blkno == 0)
1466 return (NULL);
1467 fs = ip->i_fs;
1468 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
1469 panic("newfreefrag: frag size");
1470 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag),
1471 M_FREEFRAG, M_SOFTDEP_FLAGS);
1472 freefrag->ff_list.wk_type = D_FREEFRAG;
1473 freefrag->ff_state = ip->i_uid & ~ONWORKLIST; /* XXX - used below */
1474 freefrag->ff_inum = ip->i_number;
1475 freefrag->ff_fs = fs;
1476 freefrag->ff_devvp = ip->i_devvp;
1477 freefrag->ff_blkno = blkno;
1478 freefrag->ff_fragsize = size;
1479 return (freefrag);
1480}
1481
1482/*
1483 * This workitem de-allocates fragments that were replaced during
1484 * file block allocation.
1485 */
1486static void
1487handle_workitem_freefrag(freefrag)
1488 struct freefrag *freefrag;
1489{
1490 struct inode tip;
1491
1492 tip.i_fs = freefrag->ff_fs;
1493 tip.i_devvp = freefrag->ff_devvp;
1494 tip.i_dev = freefrag->ff_devvp->v_rdev;
1495 tip.i_number = freefrag->ff_inum;
1496 tip.i_uid = freefrag->ff_state & ~ONWORKLIST; /* XXX - set above */
1497 ffs_blkfree(&tip, freefrag->ff_blkno, freefrag->ff_fragsize);
1498 FREE(freefrag, M_FREEFRAG);
1499}
1500
1501/*
1502 * Indirect block allocation dependencies.
1503 *
1504 * The same dependencies that exist for a direct block also exist when
1505 * a new block is allocated and pointed to by an entry in a block of
1506 * indirect pointers. The undo/redo states described above are also
1507 * used here. Because an indirect block contains many pointers that
1508 * may have dependencies, a second copy of the entire in-memory indirect
1509 * block is kept. The buffer cache copy is always completely up-to-date.
1510 * The second copy, which is used only as a source for disk writes,
1511 * contains only the safe pointers (i.e., those that have no remaining
1512 * update dependencies). The second copy is freed when all pointers
1513 * are safe. The cache is not allowed to replace indirect blocks with
1514 * pending update dependencies. If a buffer containing an indirect
1515 * block with dependencies is written, these routines will mark it
1516 * dirty again. It can only be successfully written once all the
1517 * dependencies are removed. The ffs_fsync routine in conjunction with
1518 * softdep_sync_metadata work together to get all the dependencies
1519 * removed so that a file can be successfully written to disk. Three
1520 * procedures are used when setting up indirect block pointer
1521 * dependencies. The division is necessary because of the organization
1522 * of the "balloc" routine and because of the distinction between file
1523 * pages and file metadata blocks.
1524 */
1525
1526/*
1527 * Allocate a new allocindir structure.
1528 */
1529static struct allocindir *
1530newallocindir(ip, ptrno, newblkno, oldblkno)
1531 struct inode *ip; /* inode for file being extended */
1532 int ptrno; /* offset of pointer in indirect block */
1533 ufs_daddr_t newblkno; /* disk block number being added */
1534 ufs_daddr_t oldblkno; /* previous block number, 0 if none */
1535{
1536 struct allocindir *aip;
1537
1538 MALLOC(aip, struct allocindir *, sizeof(struct allocindir),
1539 M_ALLOCINDIR, M_SOFTDEP_FLAGS);
1540 bzero(aip, sizeof(struct allocindir));
1541 aip->ai_list.wk_type = D_ALLOCINDIR;
1542 aip->ai_state = ATTACHED;
1543 aip->ai_offset = ptrno;
1544 aip->ai_newblkno = newblkno;
1545 aip->ai_oldblkno = oldblkno;
1546 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize);
1547 return (aip);
1548}
1549
1550/*
1551 * Called just before setting an indirect block pointer
1552 * to a newly allocated file page.
1553 */
1554void
1555softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
1556 struct inode *ip; /* inode for file being extended */
1557 ufs_lbn_t lbn; /* allocated block number within file */
1558 struct buf *bp; /* buffer with indirect blk referencing page */
1559 int ptrno; /* offset of pointer in indirect block */
1560 ufs_daddr_t newblkno; /* disk block number being added */
1561 ufs_daddr_t oldblkno; /* previous block number, 0 if none */
1562 struct buf *nbp; /* buffer holding allocated page */
1563{
1564 struct allocindir *aip;
1565 struct pagedep *pagedep;
1566
1567 aip = newallocindir(ip, ptrno, newblkno, oldblkno);
1568 ACQUIRE_LOCK(&lk);
1569 /*
1570 * If we are allocating a directory page, then we must
1571 * allocate an associated pagedep to track additions and
1572 * deletions.
1573 */
1574 if ((ip->i_mode & IFMT) == IFDIR &&
1575 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
1576 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list);
1577 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list);
1578 FREE_LOCK(&lk);
1579 setup_allocindir_phase2(bp, ip, aip);
1580}
1581
1582/*
1583 * Called just before setting an indirect block pointer to a
1584 * newly allocated indirect block.
1585 */
1586void
1587softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
1588 struct buf *nbp; /* newly allocated indirect block */
1589 struct inode *ip; /* inode for file being extended */
1590 struct buf *bp; /* indirect block referencing allocated block */
1591 int ptrno; /* offset of pointer in indirect block */
1592 ufs_daddr_t newblkno; /* disk block number being added */
1593{
1594 struct allocindir *aip;
1595
1596 aip = newallocindir(ip, ptrno, newblkno, 0);
1597 ACQUIRE_LOCK(&lk);
1598 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list);
1599 FREE_LOCK(&lk);
1600 setup_allocindir_phase2(bp, ip, aip);
1601}
1602
1603/*
1604 * Called to finish the allocation of the "aip" allocated
1605 * by one of the two routines above.
1606 */
1607static void
1608setup_allocindir_phase2(bp, ip, aip)
1609 struct buf *bp; /* in-memory copy of the indirect block */
1610 struct inode *ip; /* inode for file being extended */
1611 struct allocindir *aip; /* allocindir allocated by the above routines */
1612{
1613 struct worklist *wk;
1614 struct indirdep *indirdep, *newindirdep;
1615 struct bmsafemap *bmsafemap;
1616 struct allocindir *oldaip;
1617 struct freefrag *freefrag;
1618 struct newblk *newblk;
1619
1620 if (bp->b_lblkno >= 0)
1621 panic("setup_allocindir_phase2: not indir blk");
1622 for (indirdep = NULL, newindirdep = NULL; ; ) {
1623 ACQUIRE_LOCK(&lk);
1624 LIST_FOREACH(wk, &bp->b_dep, wk_list) {
1625 if (wk->wk_type != D_INDIRDEP)
1626 continue;
1627 indirdep = WK_INDIRDEP(wk);
1628 break;
1629 }
1630 if (indirdep == NULL && newindirdep) {
1631 indirdep = newindirdep;
1632 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
1633 newindirdep = NULL;
1634 }
1635 FREE_LOCK(&lk);
1636 if (indirdep) {
1637 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0,
1638 &newblk) == 0)
1639 panic("setup_allocindir: lost block");
1640 ACQUIRE_LOCK(&lk);
1641 if (newblk->nb_state == DEPCOMPLETE) {
1642 aip->ai_state |= DEPCOMPLETE;
1643 aip->ai_buf = NULL;
1644 } else {
1645 bmsafemap = newblk->nb_bmsafemap;
1646 aip->ai_buf = bmsafemap->sm_buf;
1647 LIST_REMOVE(newblk, nb_deps);
1648 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd,
1649 aip, ai_deps);
1650 }
1651 LIST_REMOVE(newblk, nb_hash);
1652 FREE(newblk, M_NEWBLK);
1653 aip->ai_indirdep = indirdep;
1654 /*
1655 * Check to see if there is an existing dependency
1656 * for this block. If there is, merge the old
1657 * dependency into the new one.
1658 */
1659 if (aip->ai_oldblkno == 0)
1660 oldaip = NULL;
1661 else
1662
1663 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next)
1664 if (oldaip->ai_offset == aip->ai_offset)
1665 break;
1666 if (oldaip != NULL) {
1667 if (oldaip->ai_newblkno != aip->ai_oldblkno) {
1668 FREE_LOCK(&lk);
1669 panic("setup_allocindir_phase2: blkno");
1670 }
1671 aip->ai_oldblkno = oldaip->ai_oldblkno;
1672 freefrag = oldaip->ai_freefrag;
1673 oldaip->ai_freefrag = aip->ai_freefrag;
1674 aip->ai_freefrag = freefrag;
1675 free_allocindir(oldaip, NULL);
1676 }
1677 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
1678 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)
1679 [aip->ai_offset] = aip->ai_oldblkno;
1680 FREE_LOCK(&lk);
1681 }
1682 if (newindirdep) {
7d618503 1683 /*
2ae68842
MD
1684 * Avoid any possibility of data corruption by
1685 * ensuring that our old version is thrown away.
7d618503 1686 */
2ae68842
MD
1687 newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
1688 brelse(newindirdep->ir_savebp);
984263bc
MD
1689 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP);
1690 }
1691 if (indirdep)
1692 break;
1693 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep),
1694 M_INDIRDEP, M_SOFTDEP_FLAGS);
1695 newindirdep->ir_list.wk_type = D_INDIRDEP;
1696 newindirdep->ir_state = ATTACHED;
1697 LIST_INIT(&newindirdep->ir_deplisthd);
1698 LIST_INIT(&newindirdep->ir_donehd);
1699 if (bp->b_blkno == bp->b_lblkno) {
1700 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1701 NULL, NULL);
1702 }
1703 newindirdep->ir_savebp =
1704 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0);
1705 BUF_KERNPROC(newindirdep->ir_savebp);
1706 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
1707 }
1708}
1709
1710/*
1711 * Block de-allocation dependencies.
1712 *
1713 * When blocks are de-allocated, the on-disk pointers must be nullified before
1714 * the blocks are made available for use by other files. (The true
1715 * requirement is that old pointers must be nullified before new on-disk
1716 * pointers are set. We chose this slightly more stringent requirement to
1717 * reduce complexity.) Our implementation handles this dependency by updating
1718 * the inode (or indirect block) appropriately but delaying the actual block
1719 * de-allocation (i.e., freemap and free space count manipulation) until
1720 * after the updated versions reach stable storage. After the disk is
1721 * updated, the blocks can be safely de-allocated whenever it is convenient.
1722 * This implementation handles only the common case of reducing a file's
1723 * length to zero. Other cases are handled by the conventional synchronous
1724 * write approach.
1725 *
1726 * The ffs implementation with which we worked double-checks
1727 * the state of the block pointers and file size as it reduces
1728 * a file's length. Some of this code is replicated here in our
1729 * soft updates implementation. The freeblks->fb_chkcnt field is
1730 * used to transfer a part of this information to the procedure
1731 * that eventually de-allocates the blocks.
1732 *
1733 * This routine should be called from the routine that shortens
1734 * a file's length, before the inode's size or block pointers
1735 * are modified. It will save the block pointer information for
1736 * later release and zero the inode so that the calling routine
1737 * can release it.
1738 */
6bae6177
MD
1739struct softdep_setup_freeblocks_info {
1740 struct fs *fs;
1741 struct inode *ip;
1742};
1743
1744static int softdep_setup_freeblocks_bp(struct buf *bp, void *data);
1745
984263bc
MD
1746void
1747softdep_setup_freeblocks(ip, length)
1748 struct inode *ip; /* The inode whose length is to be reduced */
1749 off_t length; /* The new length for the file */
1750{
6bae6177 1751 struct softdep_setup_freeblocks_info info;
984263bc
MD
1752 struct freeblks *freeblks;
1753 struct inodedep *inodedep;
1754 struct allocdirect *adp;
1755 struct vnode *vp;
1756 struct buf *bp;
1757 struct fs *fs;
1758 int i, error, delay;
6bae6177 1759 int count;
984263bc
MD
1760
1761 fs = ip->i_fs;
1762 if (length != 0)
1763 panic("softde_setup_freeblocks: non-zero length");
1764 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks),
1765 M_FREEBLKS, M_SOFTDEP_FLAGS);
1766 bzero(freeblks, sizeof(struct freeblks));
1767 freeblks->fb_list.wk_type = D_FREEBLKS;
89a5de29 1768 freeblks->fb_state = ATTACHED;
984263bc
MD
1769 freeblks->fb_uid = ip->i_uid;
1770 freeblks->fb_previousinum = ip->i_number;
1771 freeblks->fb_devvp = ip->i_devvp;
1772 freeblks->fb_fs = fs;
1773 freeblks->fb_oldsize = ip->i_size;
1774 freeblks->fb_newsize = length;
1775 freeblks->fb_chkcnt = ip->i_blocks;
1776 for (i = 0; i < NDADDR; i++) {
1777 freeblks->fb_dblks[i] = ip->i_db[i];
1778 ip->i_db[i] = 0;
1779 }
1780 for (i = 0; i < NIADDR; i++) {
1781 freeblks->fb_iblks[i] = ip->i_ib[i];
1782 ip->i_ib[i] = 0;
1783 }
1784 ip->i_blocks = 0;
1785 ip->i_size = 0;
1786 /*
1787 * Push the zero'ed inode to to its disk buffer so that we are free
1788 * to delete its dependencies below. Once the dependencies are gone
1789 * the buffer can be safely released.
1790 */
1791 if ((error = bread(ip->i_devvp,
1792 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
3b568787 1793 (int)fs->fs_bsize, &bp)) != 0)
984263bc
MD
1794 softdep_error("softdep_setup_freeblocks", error);
1795 *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) =
1796 ip->i_din;
1797 /*
1798 * Find and eliminate any inode dependencies.
1799 */
1800 ACQUIRE_LOCK(&lk);
1801 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep);
1802 if ((inodedep->id_state & IOSTARTED) != 0) {
1803 FREE_LOCK(&lk);
1804 panic("softdep_setup_freeblocks: inode busy");
1805 }
1806 /*
1807 * Add the freeblks structure to the list of operations that
1808 * must await the zero'ed inode being written to disk. If we
1809 * still have a bitmap dependency (delay == 0), then the inode
1810 * has never been written to disk, so we can process the
1811 * freeblks below once we have deleted the dependencies.
1812 */
1813 delay = (inodedep->id_state & DEPCOMPLETE);
1814 if (delay)
1815 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list);
1816 /*
1817 * Because the file length has been truncated to zero, any
1818 * pending block allocation dependency structures associated
1819 * with this inode are obsolete and can simply be de-allocated.
1820 * We must first merge the two dependency lists to get rid of
1821 * any duplicate freefrag structures, then purge the merged list.
1822 */
1823 merge_inode_lists(inodedep);
1824 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
1825 free_allocdirect(&inodedep->id_inoupdt, adp, 1);
1826 FREE_LOCK(&lk);
1827 bdwrite(bp);
1828 /*
1829 * We must wait for any I/O in progress to finish so that
1830 * all potential buffers on the dirty list will be visible.
1831 * Once they are all there, walk the list and get rid of
1832 * any dependencies.
1833 */
1834 vp = ITOV(ip);
1835 ACQUIRE_LOCK(&lk);
1836 drain_output(vp, 1);
6bae6177
MD
1837
1838 info.fs = fs;
1839 info.ip = ip;
1840 do {
1841 count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL,
1842 softdep_setup_freeblocks_bp, &info);
1843 } while (count > 0);
984263bc
MD
1844 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0)
1845 (void)free_inodedep(inodedep);
89a5de29
MD
1846
1847 if (delay) {
1848 freeblks->fb_state |= DEPCOMPLETE;
1849 /*
1850 * If the inode with zeroed block pointers is now on disk
1851 * we can start freeing blocks. Add freeblks to the worklist
1852 * instead of calling handle_workitem_freeblocks directly as
1853 * it is more likely that additional IO is needed to complete
1854 * the request here than in the !delay case.
1855 */
1856 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
1857 add_to_worklist(&freeblks->fb_list);
1858 }
1859
984263bc
MD
1860 FREE_LOCK(&lk);
1861 /*
1862 * If the inode has never been written to disk (delay == 0),
1863 * then we can process the freeblks now that we have deleted
1864 * the dependencies.
1865 */
1866 if (!delay)
1867 handle_workitem_freeblocks(freeblks);
1868}
1869
6bae6177
MD
1870static int
1871softdep_setup_freeblocks_bp(struct buf *bp, void *data)
1872{
1873 struct softdep_setup_freeblocks_info *info = data;
1874 struct inodedep *inodedep;
1875
1876 if (getdirtybuf(&bp, MNT_WAIT) == 0)
1877 return(-1);
1878 (void) inodedep_lookup(info->fs, info->ip->i_number, 0, &inodedep);
1879 deallocate_dependencies(bp, inodedep);
1880 bp->b_flags |= B_INVAL | B_NOCACHE;
1881 FREE_LOCK(&lk);
1882 brelse(bp);
1883 ACQUIRE_LOCK(&lk);
1884 return(1);
1885}
1886
984263bc
MD
1887/*
1888 * Reclaim any dependency structures from a buffer that is about to
1889 * be reallocated to a new vnode. The buffer must be locked, thus,
1890 * no I/O completion operations can occur while we are manipulating
1891 * its associated dependencies. The mutex is held so that other I/O's
1892 * associated with related dependencies do not occur.
1893 */
1894static void
1895deallocate_dependencies(bp, inodedep)
1896 struct buf *bp;
1897 struct inodedep *inodedep;
1898{
1899 struct worklist *wk;
1900 struct indirdep *indirdep;
1901 struct allocindir *aip;
1902 struct pagedep *pagedep;
1903 struct dirrem *dirrem;
1904 struct diradd *dap;
1905 int i;
1906
1907 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
1908 switch (wk->wk_type) {
1909
1910 case D_INDIRDEP:
1911 indirdep = WK_INDIRDEP(wk);
1912 /*
1913 * None of the indirect pointers will ever be visible,
1914 * so they can simply be tossed. GOINGAWAY ensures
1915 * that allocated pointers will be saved in the buffer
1916 * cache until they are freed. Note that they will
1917 * only be able to be found by their physical address
1918 * since the inode mapping the logical address will
1919 * be gone. The save buffer used for the safe copy
1920 * was allocated in setup_allocindir_phase2 using
1921 * the physical address so it could be used for this
1922 * purpose. Hence we swap the safe copy with the real
1923 * copy, allowing the safe copy to be freed and holding
1924 * on to the real copy for later use in indir_trunc.
1925 */
1926 if (indirdep->ir_state & GOINGAWAY) {
1927 FREE_LOCK(&lk);
1928 panic("deallocate_dependencies: already gone");
1929 }
1930 indirdep->ir_state |= GOINGAWAY;
1931 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
1932 free_allocindir(aip, inodedep);
1933 if (bp->b_lblkno >= 0 ||
1934 bp->b_blkno != indirdep->ir_savebp->b_lblkno) {
1935 FREE_LOCK(&lk);
1936 panic("deallocate_dependencies: not indir");
1937 }
1938 bcopy(bp->b_data, indirdep->ir_savebp->b_data,
1939 bp->b_bcount);
1940 WORKLIST_REMOVE(wk);
1941 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk);
1942 continue;
1943
1944 case D_PAGEDEP:
1945 pagedep = WK_PAGEDEP(wk);
1946 /*
1947 * None of the directory additions will ever be
1948 * visible, so they can simply be tossed.
1949 */
1950 for (i = 0; i < DAHASHSZ; i++)
1951 while ((dap =
1952 LIST_FIRST(&pagedep->pd_diraddhd[i])))
1953 free_diradd(dap);
1954 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0)
1955 free_diradd(dap);
1956 /*
1957 * Copy any directory remove dependencies to the list
1958 * to be processed after the zero'ed inode is written.
1959 * If the inode has already been written, then they
1960 * can be dumped directly onto the work list.
1961 */
1962 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
1963 LIST_REMOVE(dirrem, dm_next);
1964 dirrem->dm_dirinum = pagedep->pd_ino;
1965 if (inodedep == NULL ||
1966 (inodedep->id_state & ALLCOMPLETE) ==
1967 ALLCOMPLETE)
1968 add_to_worklist(&dirrem->dm_list);
1969 else
1970 WORKLIST_INSERT(&inodedep->id_bufwait,
1971 &dirrem->dm_list);
1972 }
1973 WORKLIST_REMOVE(&pagedep->pd_list);
1974 LIST_REMOVE(pagedep, pd_hash);
1975 WORKITEM_FREE(pagedep, D_PAGEDEP);
1976 continue;
1977
1978 case D_ALLOCINDIR:
1979 free_allocindir(WK_ALLOCINDIR(wk), inodedep);
1980 continue;
1981
1982 case D_ALLOCDIRECT:
1983 case D_INODEDEP:
1984 FREE_LOCK(&lk);
1985 panic("deallocate_dependencies: Unexpected type %s",
1986 TYPENAME(wk->wk_type));
1987 /* NOTREACHED */
1988
1989 default:
1990 FREE_LOCK(&lk);
1991 panic("deallocate_dependencies: Unknown type %s",
1992 TYPENAME(wk->wk_type));
1993 /* NOTREACHED */
1994 }
1995 }
1996}
1997
1998/*
1999 * Free an allocdirect. Generate a new freefrag work request if appropriate.
2000 * This routine must be called with splbio interrupts blocked.
2001 */
2002static void
2003free_allocdirect(adphead, adp, delay)
2004 struct allocdirectlst *adphead;
2005 struct allocdirect *adp;
2006 int delay;
2007{
2008
2009#ifdef DEBUG
dadab5e9 2010 if (lk.lkt_held == NOHOLDER)
984263bc
MD
2011 panic("free_allocdirect: lock not held");
2012#endif
2013 if ((adp->ad_state & DEPCOMPLETE) == 0)
2014 LIST_REMOVE(adp, ad_deps);
2015 TAILQ_REMOVE(adphead, adp, ad_next);
2016 if ((adp->ad_state & COMPLETE) == 0)
2017 WORKLIST_REMOVE(&adp->ad_list);
2018 if (adp->ad_freefrag != NULL) {
2019 if (delay)
2020 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait,
2021 &adp->ad_freefrag->ff_list);
2022 else
2023 add_to_worklist(&adp->ad_freefrag->ff_list);
2024 }
2025 WORKITEM_FREE(adp, D_ALLOCDIRECT);
2026}
2027
2028/*
2029 * Prepare an inode to be freed. The actual free operation is not
2030 * done until the zero'ed inode has been written to disk.
2031 */
2032void
2033softdep_freefile(pvp, ino, mode)
2034 struct vnode *pvp;
2035 ino_t ino;
2036 int mode;
2037{
2038 struct inode *ip = VTOI(pvp);
2039 struct inodedep *inodedep;
2040 struct freefile *freefile;
2041
2042 /*
2043 * This sets up the inode de-allocation dependency.
2044 */
2045 MALLOC(freefile, struct freefile *, sizeof(struct freefile),
2046 M_FREEFILE, M_SOFTDEP_FLAGS);
2047 freefile->fx_list.wk_type = D_FREEFILE;
2048 freefile->fx_list.wk_state = 0;
2049 freefile->fx_mode = mode;
2050 freefile->fx_oldinum = ino;
2051 freefile->fx_devvp = ip->i_devvp;
2052 freefile->fx_fs = ip->i_fs;
2053
2054 /*
2055 * If the inodedep does not exist, then the zero'ed inode has
2056 * been written to disk. If the allocated inode has never been
2057 * written to disk, then the on-disk inode is zero'ed. In either
2058 * case we can free the file immediately.
2059 */
2060 ACQUIRE_LOCK(&lk);
2061 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 ||
2062 check_inode_unwritten(inodedep)) {
2063 FREE_LOCK(&lk);
2064 handle_workitem_freefile(freefile);
2065 return;
2066 }
2067 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
2068 FREE_LOCK(&lk);
2069}
2070
2071/*
2072 * Check to see if an inode has never been written to disk. If
2073 * so free the inodedep and return success, otherwise return failure.
2074 * This routine must be called with splbio interrupts blocked.
2075 *
2076 * If we still have a bitmap dependency, then the inode has never
2077 * been written to disk. Drop the dependency as it is no longer
2078 * necessary since the inode is being deallocated. We set the
2079 * ALLCOMPLETE flags since the bitmap now properly shows that the
2080 * inode is not allocated. Even if the inode is actively being
2081 * written, it has been rolled back to its zero'ed state, so we
2082 * are ensured that a zero inode is what is on the disk. For short
2083 * lived files, this change will usually result in removing all the
2084 * dependencies from the inode so that it can be freed immediately.
2085 */
2086static int
2087check_inode_unwritten(inodedep)
2088 struct inodedep *inodedep;
2089{
2090
2091 if ((inodedep->id_state & DEPCOMPLETE) != 0 ||
2092 LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2093 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2094 LIST_FIRST(&inodedep->id_inowait) != NULL ||
2095 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2096 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2097 inodedep->id_nlinkdelta != 0)
2098 return (0);
2099 inodedep->id_state |= ALLCOMPLETE;
2100 LIST_REMOVE(inodedep, id_deps);
2101 inodedep->id_buf = NULL;
2102 if (inodedep->id_state & ONWORKLIST)
2103 WORKLIST_REMOVE(&inodedep->id_list);
2104 if (inodedep->id_savedino != NULL) {
2105 FREE(inodedep->id_savedino, M_INODEDEP);
2106 inodedep->id_savedino = NULL;
2107 }
2108 if (free_inodedep(inodedep) == 0) {
2109 FREE_LOCK(&lk);
2110 panic("check_inode_unwritten: busy inode");
2111 }
2112 return (1);
2113}
2114
2115/*
2116 * Try to free an inodedep structure. Return 1 if it could be freed.
2117 */
2118static int
2119free_inodedep(inodedep)
2120 struct inodedep *inodedep;
2121{
2122
2123 if ((inodedep->id_state & ONWORKLIST) != 0 ||
2124 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
2125 LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2126 LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2127 LIST_FIRST(&inodedep->id_inowait) != NULL ||
2128 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2129 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2130 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL)
2131 return (0);
2132 LIST_REMOVE(inodedep, id_hash);
2133 WORKITEM_FREE(inodedep, D_INODEDEP);
2134 num_inodedep -= 1;
2135 return (1);
2136}
2137
2138/*
2139 * This workitem routine performs the block de-allocation.
2140 * The workitem is added to the pending list after the updated
2141 * inode block has been written to disk. As mentioned above,
2142 * checks regarding the number of blocks de-allocated (compared
2143 * to the number of blocks allocated for the file) are also
2144 * performed in this function.
2145 */
2146static void
2147handle_workitem_freeblocks(freeblks)
2148 struct freeblks *freeblks;
2149{
2150 struct inode tip;
2151 ufs_daddr_t bn;
2152 struct fs *fs;
2153 int i, level, bsize;
2154 long nblocks, blocksreleased = 0;
2155 int error, allerror = 0;
2156 ufs_lbn_t baselbns[NIADDR], tmpval;
2157
2158 tip.i_number = freeblks->fb_previousinum;
2159 tip.i_devvp = freeblks->fb_devvp;
2160 tip.i_dev = freeblks->fb_devvp->v_rdev;
2161 tip.i_fs = freeblks->fb_fs;
2162 tip.i_size = freeblks->fb_oldsize;
2163 tip.i_uid = freeblks->fb_uid;
2164 fs = freeblks->fb_fs;
2165 tmpval = 1;
2166 baselbns[0] = NDADDR;
2167 for (i = 1; i < NIADDR; i++) {
2168 tmpval *= NINDIR(fs);
2169 baselbns[i] = baselbns[i - 1] + tmpval;
2170 }
2171 nblocks = btodb(fs->fs_bsize);
2172 blocksreleased = 0;
2173 /*
2174 * Indirect blocks first.
2175 */
2176 for (level = (NIADDR - 1); level >= 0; level--) {
2177 if ((bn = freeblks->fb_iblks[level]) == 0)
2178 continue;
2179 if ((error = indir_trunc(&tip, fsbtodb(fs, bn), level,
2180 baselbns[level], &blocksreleased)) == 0)
2181 allerror = error;
2182 ffs_blkfree(&tip, bn, fs->fs_bsize);
2183 blocksreleased += nblocks;
2184 }
2185 /*
2186 * All direct blocks or frags.
2187 */
2188 for (i = (NDADDR - 1); i >= 0; i--) {
2189 if ((bn = freeblks->fb_dblks[i]) == 0)
2190 continue;
2191 bsize = blksize(fs, &tip, i);
2192 ffs_blkfree(&tip, bn, bsize);
2193 blocksreleased += btodb(bsize);
2194 }
2195
2196#ifdef DIAGNOSTIC
2197 if (freeblks->fb_chkcnt != blocksreleased)
2198 printf("handle_workitem_freeblocks: block count\n");
2199 if (allerror)
2200 softdep_error("handle_workitem_freeblks", allerror);
2201#endif /* DIAGNOSTIC */
2202 WORKITEM_FREE(freeblks, D_FREEBLKS);
2203}
2204
2205/*
2206 * Release blocks associated with the inode ip and stored in the indirect
2207 * block dbn. If level is greater than SINGLE, the block is an indirect block
2208 * and recursive calls to indirtrunc must be used to cleanse other indirect
2209 * blocks.
2210 */
2211static int
2212indir_trunc(ip, dbn, level, lbn, countp)
2213 struct inode *ip;
2214 ufs_daddr_t dbn;
2215 int level;
2216 ufs_lbn_t lbn;
2217 long *countp;
2218{
2219 struct buf *bp;
2220 ufs_daddr_t *bap;
2221 ufs_daddr_t nb;
2222 struct fs *fs;
2223 struct worklist *wk;
2224 struct indirdep *indirdep;
2225 int i, lbnadd, nblocks;
2226 int error, allerror = 0;
2227
2228 fs = ip->i_fs;
2229 lbnadd = 1;
2230 for (i = level; i > 0; i--)
2231 lbnadd *= NINDIR(fs);
2232 /*
2233 * Get buffer of block pointers to be freed. This routine is not
2234 * called until the zero'ed inode has been written, so it is safe
2235 * to free blocks as they are encountered. Because the inode has
2236 * been zero'ed, calls to bmap on these blocks will fail. So, we
2237 * have to use the on-disk address and the block device for the
2238 * filesystem to look them up. If the file was deleted before its
2239 * indirect blocks were all written to disk, the routine that set
2240 * us up (deallocate_dependencies) will have arranged to leave
2241 * a complete copy of the indirect block in memory for our use.
2242 * Otherwise we have to read the blocks in from the disk.
2243 */
2244 ACQUIRE_LOCK(&lk);
2245 if ((bp = incore(ip->i_devvp, dbn)) != NULL &&
2246 (wk = LIST_FIRST(&bp->b_dep)) != NULL) {
2247 if (wk->wk_type != D_INDIRDEP ||
2248 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp ||
2249 (indirdep->ir_state & GOINGAWAY) == 0) {
2250 FREE_LOCK(&lk);
2251 panic("indir_trunc: lost indirdep");
2252 }
2253 WORKLIST_REMOVE(wk);
2254 WORKITEM_FREE(indirdep, D_INDIRDEP);
2255 if (LIST_FIRST(&bp->b_dep) != NULL) {
2256 FREE_LOCK(&lk);
2257 panic("indir_trunc: dangling dep");
2258 }
2259 FREE_LOCK(&lk);
2260 } else {
2261 FREE_LOCK(&lk);
3b568787 2262 error = bread(ip->i_devvp, dbn, (int)fs->fs_bsize, &bp);
984263bc
MD
2263 if (error)
2264 return (error);
2265 }
2266 /*
2267 * Recursively free indirect blocks.
2268 */
2269 bap = (ufs_daddr_t *)bp->b_data;
2270 nblocks = btodb(fs->fs_bsize);
2271 for (i = NINDIR(fs) - 1; i >= 0; i--) {
2272 if ((nb = bap[i]) == 0)
2273 continue;
2274 if (level != 0) {
2275 if ((error = indir_trunc(ip, fsbtodb(fs, nb),
2276 level - 1, lbn + (i * lbnadd), countp)) != 0)
2277 allerror = error;
2278 }
2279 ffs_blkfree(ip, nb, fs->fs_bsize);
2280 *countp += nblocks;
2281 }
2282 bp->b_flags |= B_INVAL | B_NOCACHE;
2283 brelse(bp);
2284 return (allerror);
2285}
2286
2287/*
2288 * Free an allocindir.
2289 * This routine must be called with splbio interrupts blocked.
2290 */
2291static void
2292free_allocindir(aip, inodedep)
2293 struct allocindir *aip;
2294 struct inodedep *inodedep;
2295{
2296 struct freefrag *freefrag;
2297
2298#ifdef DEBUG
dadab5e9 2299 if (lk.lkt_held == NOHOLDER)
984263bc
MD
2300 panic("free_allocindir: lock not held");
2301#endif
2302 if ((aip->ai_state & DEPCOMPLETE) == 0)
2303 LIST_REMOVE(aip, ai_deps);
2304 if (aip->ai_state & ONWORKLIST)
2305 WORKLIST_REMOVE(&aip->ai_list);
2306 LIST_REMOVE(aip, ai_next);
2307 if ((freefrag = aip->ai_freefrag) != NULL) {
2308 if (inodedep == NULL)
2309 add_to_worklist(&freefrag->ff_list);
2310 else
2311 WORKLIST_INSERT(&inodedep->id_bufwait,
2312 &freefrag->ff_list);
2313 }
2314 WORKITEM_FREE(aip, D_ALLOCINDIR);
2315}
2316
2317/*
2318 * Directory entry addition dependencies.
2319 *
2320 * When adding a new directory entry, the inode (with its incremented link
2321 * count) must be written to disk before the directory entry's pointer to it.
2322 * Also, if the inode is newly allocated, the corresponding freemap must be
2323 * updated (on disk) before the directory entry's pointer. These requirements
2324 * are met via undo/redo on the directory entry's pointer, which consists
2325 * simply of the inode number.
2326 *
2327 * As directory entries are added and deleted, the free space within a
f719c866 2328 * directory block can become fragmented. The ufs filesystem will compact
984263bc
MD
2329 * a fragmented directory block to make space for a new entry. When this
2330 * occurs, the offsets of previously added entries change. Any "diradd"
2331 * dependency structures corresponding to these entries must be updated with
2332 * the new offsets.
2333 */
2334
2335/*
2336 * This routine is called after the in-memory inode's link
2337 * count has been incremented, but before the directory entry's
2338 * pointer to the inode has been set.
2339 */
2340void
2341softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp)
2342 struct buf *bp; /* buffer containing directory block */
2343 struct inode *dp; /* inode for directory */
2344 off_t diroffset; /* offset of new entry in directory */
f719c866 2345 ino_t newinum; /* inode referenced by new directory entry */
984263bc
MD
2346 struct buf *newdirbp; /* non-NULL => contents of new mkdir */
2347{
2348 int offset; /* offset of new entry within directory block */
2349 ufs_lbn_t lbn; /* block in directory containing new entry */
2350 struct fs *fs;
2351 struct diradd *dap;
2352 struct pagedep *pagedep;
2353 struct inodedep *inodedep;
2354 struct mkdir *mkdir1, *mkdir2;
2355
2356 /*
2357 * Whiteouts have no dependencies.
2358 */
2359 if (newinum == WINO) {
2360 if (newdirbp != NULL)
2361 bdwrite(newdirbp);
2362 return;
2363 }
2364
2365 fs = dp->i_fs;
2366 lbn = lblkno(fs, diroffset);
2367 offset = blkoff(fs, diroffset);
2368 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD,
2369 M_SOFTDEP_FLAGS);
2370 bzero(dap, sizeof(struct diradd));
2371 dap->da_list.wk_type = D_DIRADD;
2372 dap->da_offset = offset;
2373 dap->da_newinum = newinum;
2374 dap->da_state = ATTACHED;
2375 if (newdirbp == NULL) {
2376 dap->da_state |= DEPCOMPLETE;
2377 ACQUIRE_LOCK(&lk);
2378 } else {
2379 dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
2380 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR,
2381 M_SOFTDEP_FLAGS);
2382 mkdir1->md_list.wk_type = D_MKDIR;
2383 mkdir1->md_state = MKDIR_BODY;
2384 mkdir1->md_diradd = dap;
2385 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR,
2386 M_SOFTDEP_FLAGS);
2387 mkdir2->md_list.wk_type = D_MKDIR;
2388 mkdir2->md_state = MKDIR_PARENT;
2389 mkdir2->md_diradd = dap;
2390 /*
2391 * Dependency on "." and ".." being written to disk.
2392 */
2393 mkdir1->md_buf = newdirbp;
2394 ACQUIRE_LOCK(&lk);
2395 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
2396 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list);
2397 FREE_LOCK(&lk);
2398 bdwrite(newdirbp);
2399 /*
2400 * Dependency on link count increase for parent directory
2401 */
2402 ACQUIRE_LOCK(&lk);
2403 if (inodedep_lookup(dp->i_fs, dp->i_number, 0, &inodedep) == 0
2404 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
2405 dap->da_state &= ~MKDIR_PARENT;
2406 WORKITEM_FREE(mkdir2, D_MKDIR);
2407 } else {
2408 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
2409 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list);
2410 }
2411 }
2412 /*
2413 * Link into parent directory pagedep to await its being written.
2414 */
2415 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
2416 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2417 dap->da_pagedep = pagedep;
2418 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
2419 da_pdlist);
2420 /*
2421 * Link into its inodedep. Put it on the id_bufwait list if the inode
2422 * is not yet written. If it is written, do the post-inode write
2423 * processing to put it on the id_pendinghd list.
2424 */
2425 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep);
2426 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
2427 diradd_inode_written(dap, inodedep);
2428 else
2429 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
2430 FREE_LOCK(&lk);
2431}
2432
2433/*
2434 * This procedure is called to change the offset of a directory
2435 * entry when compacting a directory block which must be owned
2436 * exclusively by the caller. Note that the actual entry movement
2437 * must be done in this procedure to ensure that no I/O completions
2438 * occur while the move is in progress.
2439 */
2440void
2441softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize)
2442 struct inode *dp; /* inode for directory */
2443 caddr_t base; /* address of dp->i_offset */
2444 caddr_t oldloc; /* address of old directory location */
2445 caddr_t newloc; /* address of new directory location */
2446 int entrysize; /* size of directory entry */
2447{
2448 int offset, oldoffset, newoffset;
2449 struct pagedep *pagedep;
2450 struct diradd *dap;
2451 ufs_lbn_t lbn;
2452
2453 ACQUIRE_LOCK(&lk);
2454 lbn = lblkno(dp->i_fs, dp->i_offset);
2455 offset = blkoff(dp->i_fs, dp->i_offset);
2456 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0)
2457 goto done;
2458 oldoffset = offset + (oldloc - base);
2459 newoffset = offset + (newloc - base);
2460
2461 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) {
2462 if (dap->da_offset != oldoffset)
2463 continue;
2464 dap->da_offset = newoffset;
2465 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset))
2466 break;
2467 LIST_REMOVE(dap, da_pdlist);
2468 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)],
2469 dap, da_pdlist);
2470 break;
2471 }
2472 if (dap == NULL) {
2473
2474 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) {
2475 if (dap->da_offset == oldoffset) {
2476 dap->da_offset = newoffset;
2477 break;
2478 }
2479 }
2480 }
2481done:
2482 bcopy(oldloc, newloc, entrysize);
2483 FREE_LOCK(&lk);
2484}
2485
2486/*
2487 * Free a diradd dependency structure. This routine must be called
2488 * with splbio interrupts blocked.
2489 */
2490static void
2491free_diradd(dap)
2492 struct diradd *dap;
2493{
2494 struct dirrem *dirrem;
2495 struct pagedep *pagedep;
2496 struct inodedep *inodedep;
2497 struct mkdir *mkdir, *nextmd;
2498
2499#ifdef DEBUG
dadab5e9 2500 if (lk.lkt_held == NOHOLDER)
984263bc
MD
2501 panic("free_diradd: lock not held");
2502#endif
2503 WORKLIST_REMOVE(&dap->da_list);
2504 LIST_REMOVE(dap, da_pdlist);
2505 if ((dap->da_state & DIRCHG) == 0) {
2506 pagedep = dap->da_pagedep;
2507 } else {
2508 dirrem = dap->da_previous;
2509 pagedep = dirrem->dm_pagedep;
2510 dirrem->dm_dirinum = pagedep->pd_ino;
2511 add_to_worklist(&dirrem->dm_list);
2512 }
2513 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum,
2514 0, &inodedep) != 0)
2515 (void) free_inodedep(inodedep);
2516 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
2517 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
2518 nextmd = LIST_NEXT(mkdir, md_mkdirs);
2519 if (mkdir->md_diradd != dap)
2520 continue;
2521 dap->da_state &= ~mkdir->md_state;
2522 WORKLIST_REMOVE(&mkdir->md_list);
2523 LIST_REMOVE(mkdir, md_mkdirs);
2524 WORKITEM_FREE(mkdir, D_MKDIR);
2525 }
2526 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
2527 FREE_LOCK(&lk);
2528 panic("free_diradd: unfound ref");
2529 }
2530 }
2531 WORKITEM_FREE(dap, D_DIRADD);
2532}
2533
2534/*
2535 * Directory entry removal dependencies.
2536 *
2537 * When removing a directory entry, the entry's inode pointer must be
2538 * zero'ed on disk before the corresponding inode's link count is decremented
2539 * (possibly freeing the inode for re-use). This dependency is handled by
2540 * updating the directory entry but delaying the inode count reduction until
2541 * after the directory block has been written to disk. After this point, the
2542 * inode count can be decremented whenever it is convenient.
2543 */
2544
2545/*
2546 * This routine should be called immediately after removing
2547 * a directory entry. The inode's link count should not be
2548 * decremented by the calling procedure -- the soft updates
2549 * code will do this task when it is safe.
2550 */
2551void
2552softdep_setup_remove(bp, dp, ip, isrmdir)
2553 struct buf *bp; /* buffer containing directory block */
2554 struct inode *dp; /* inode for the directory being modified */
2555 struct inode *ip; /* inode for directory entry being removed */
2556 int isrmdir; /* indicates if doing RMDIR */
2557{
2558 struct dirrem *dirrem, *prevdirrem;
2559
2560 /*
2561 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.
2562 */
2563 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
2564
2565 /*
2566 * If the COMPLETE flag is clear, then there were no active
2567 * entries and we want to roll back to a zeroed entry until
2568 * the new inode is committed to disk. If the COMPLETE flag is
2569 * set then we have deleted an entry that never made it to
2570 * disk. If the entry we deleted resulted from a name change,
2571 * then the old name still resides on disk. We cannot delete
2572 * its inode (returned to us in prevdirrem) until the zeroed
2573 * directory entry gets to disk. The new inode has never been
2574 * referenced on the disk, so can be deleted immediately.
2575 */
2576 if ((dirrem->dm_state & COMPLETE) == 0) {
2577 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
2578 dm_next);
2579 FREE_LOCK(&lk);
2580 } else {
2581 if (prevdirrem != NULL)
2582 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
2583 prevdirrem, dm_next);
2584 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
2585 FREE_LOCK(&lk);
2586 handle_workitem_remove(dirrem);
2587 }
2588}
2589
2590/*
2591 * Allocate a new dirrem if appropriate and return it along with
2592 * its associated pagedep. Called without a lock, returns with lock.
2593 */
2594static long num_dirrem; /* number of dirrem allocated */
2595static struct dirrem *
2596newdirrem(bp, dp, ip, isrmdir, prevdirremp)
2597 struct buf *bp; /* buffer containing directory block */
2598 struct inode *dp; /* inode for the directory being modified */
2599 struct inode *ip; /* inode for directory entry being removed */
2600 int isrmdir; /* indicates if doing RMDIR */
2601 struct dirrem **prevdirremp; /* previously referenced inode, if any */
2602{
2603 int offset;
2604 ufs_lbn_t lbn;
2605 struct diradd *dap;
2606 struct dirrem *dirrem;
2607 struct pagedep *pagedep;
2608
2609 /*
2610 * Whiteouts have no deletion dependencies.
2611 */
2612 if (ip == NULL)
2613 panic("newdirrem: whiteout");
2614 /*
2615 * If we are over our limit, try to improve the situation.
2616 * Limiting the number of dirrem structures will also limit
2617 * the number of freefile and freeblks structures.
2618 */
2619 if (num_dirrem > max_softdeps / 2 && speedup_syncer() == 0)
2620 (void) request_cleanup(FLUSH_REMOVE, 0);
2621 num_dirrem += 1;
2622 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem),
2623 M_DIRREM, M_SOFTDEP_FLAGS);
2624 bzero(dirrem, sizeof(struct dirrem));
2625 dirrem->dm_list.wk_type = D_DIRREM;
2626 dirrem->dm_state = isrmdir ? RMDIR : 0;
2627 dirrem->dm_mnt = ITOV(ip)->v_mount;
2628 dirrem->dm_oldinum = ip->i_number;
2629 *prevdirremp = NULL;
2630
2631 ACQUIRE_LOCK(&lk);
2632 lbn = lblkno(dp->i_fs, dp->i_offset);
2633 offset = blkoff(dp->i_fs, dp->i_offset);
2634 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
2635 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2636 dirrem->dm_pagedep = pagedep;
2637 /*
2638 * Check for a diradd dependency for the same directory entry.
2639 * If present, then both dependencies become obsolete and can
2640 * be de-allocated. Check for an entry on both the pd_dirraddhd
2641 * list and the pd_pendinghd list.
2642 */
2643
2644 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
2645 if (dap->da_offset == offset)
2646 break;
2647 if (dap == NULL) {
2648
2649 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
2650 if (dap->da_offset == offset)
2651 break;
2652 if (dap == NULL)
2653 return (dirrem);
2654 }
2655 /*
2656 * Must be ATTACHED at this point.
2657 */
2658 if ((dap->da_state & ATTACHED) == 0) {
2659 FREE_LOCK(&lk);
2660 panic("newdirrem: not ATTACHED");
2661 }
2662 if (dap->da_newinum != ip->i_number) {
2663 FREE_LOCK(&lk);
f91a71dd 2664 panic("newdirrem: inum %"PRId64" should be %"PRId64,
984263bc
MD
2665 ip->i_number, dap->da_newinum);
2666 }
2667 /*
2668 * If we are deleting a changed name that never made it to disk,
2669 * then return the dirrem describing the previous inode (which
2670 * represents the inode currently referenced from this entry on disk).
2671 */
2672 if ((dap->da_state & DIRCHG) != 0) {
2673 *prevdirremp = dap->da_previous;
2674 dap->da_state &= ~DIRCHG;
2675 dap->da_pagedep = pagedep;
2676 }
2677 /*
2678 * We are deleting an entry that never made it to disk.
2679 * Mark it COMPLETE so we can delete its inode immediately.
2680 */
2681 dirrem->dm_state |= COMPLETE;
2682 free_diradd(dap);
2683 return (dirrem);
2684}
2685
2686/*
2687 * Directory entry change dependencies.
2688 *
2689 * Changing an existing directory entry requires that an add operation
2690 * be completed first followed by a deletion. The semantics for the addition
2691 * are identical to the description of adding a new entry above except
2692 * that the rollback is to the old inode number rather than zero. Once
2693 * the addition dependency is completed, the removal is done as described
2694 * in the removal routine above.
2695 */
2696
2697/*
2698 * This routine should be called immediately after changing
2699 * a directory entry. The inode's link count should not be
2700 * decremented by the calling procedure -- the soft updates
2701 * code will perform this task when it is safe.
2702 */
2703void
2704softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
2705 struct buf *bp; /* buffer containing directory block */
2706 struct inode *dp; /* inode for the directory being modified */
2707 struct inode *ip; /* inode for directory entry being removed */
f719c866 2708 ino_t newinum; /* new inode number for changed entry */
984263bc
MD
2709 int isrmdir; /* indicates if doing RMDIR */
2710{
2711 int offset;
2712 struct diradd *dap = NULL;
2713 struct dirrem *dirrem, *prevdirrem;
2714 struct pagedep *pagedep;
2715 struct inodedep *inodedep;
2716
2717 offset = blkoff(dp->i_fs, dp->i_offset);
2718
2719 /*
2720 * Whiteouts do not need diradd dependencies.
2721 */
2722 if (newinum != WINO) {
2723 MALLOC(dap, struct diradd *, sizeof(struct diradd),
2724 M_DIRADD, M_SOFTDEP_FLAGS);
2725 bzero(dap, sizeof(struct diradd));
2726 dap->da_list.wk_type = D_DIRADD;
2727 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
2728 dap->da_offset = offset;
2729 dap->da_newinum = newinum;
2730 }
2731
2732 /*
2733 * Allocate a new dirrem and ACQUIRE_LOCK.
2734 */
2735 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
2736 pagedep = dirrem->dm_pagedep;
2737 /*
2738 * The possible values for isrmdir:
2739 * 0 - non-directory file rename
2740 * 1 - directory rename within same directory
2741 * inum - directory rename to new directory of given inode number
2742 * When renaming to a new directory, we are both deleting and
2743 * creating a new directory entry, so the link count on the new
2744 * directory should not change. Thus we do not need the followup
2745 * dirrem which is usually done in handle_workitem_remove. We set
2746 * the DIRCHG flag to tell handle_workitem_remove to skip the
2747 * followup dirrem.
2748 */
2749 if (isrmdir > 1)
2750 dirrem->dm_state |= DIRCHG;
2751
2752 /*
2753 * Whiteouts have no additional dependencies,
2754 * so just put the dirrem on the correct list.
2755 */
2756 if (newinum == WINO) {
2757 if ((dirrem->dm_state & COMPLETE) == 0) {
2758 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
2759 dm_next);
2760 } else {
2761 dirrem->dm_dirinum = pagedep->pd_ino;
2762 add_to_worklist(&dirrem->dm_list);
2763 }
2764 FREE_LOCK(&lk);
2765 return;
2766 }
2767
2768 /*
2769 * If the COMPLETE flag is clear, then there were no active
2770 * entries and we want to roll back to the previous inode until
2771 * the new inode is committed to disk. If the COMPLETE flag is
2772 * set, then we have deleted an entry that never made it to disk.
2773 * If the entry we deleted resulted from a name change, then the old
2774 * inode reference still resides on disk. Any rollback that we do
2775 * needs to be to that old inode (returned to us in prevdirrem). If
2776 * the entry we deleted resulted from a create, then there is
2777 * no entry on the disk, so we want to roll back to zero rather
2778 * than the uncommitted inode. In either of the COMPLETE cases we
2779 * want to immediately free the unwritten and unreferenced inode.
2780 */
2781 if ((dirrem->dm_state & COMPLETE) == 0) {
2782 dap->da_previous = dirrem;
2783 } else {
2784 if (prevdirrem != NULL) {
2785 dap->da_previous = prevdirrem;
2786 } else {
2787 dap->da_state &= ~DIRCHG;
2788 dap->da_pagedep = pagedep;
2789 }
2790 dirrem->dm_dirinum = pagedep->pd_ino;
2791 add_to_worklist(&dirrem->dm_list);
2792 }
2793 /*
2794 * Link into its inodedep. Put it on the id_bufwait list if the inode
2795 * is not yet written. If it is written, do the post-inode write
2796 * processing to put it on the id_pendinghd list.
2797 */
2798 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 ||
2799 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
2800 dap->da_state |= COMPLETE;
2801 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
2802 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
2803 } else {
2804 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
2805 dap, da_pdlist);
2806 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
2807 }
2808 FREE_LOCK(&lk);
2809}
2810
2811/*
2812 * Called whenever the link count on an inode is changed.
2813 * It creates an inode dependency so that the new reference(s)
2814 * to the inode cannot be committed to disk until the updated
2815 * inode has been written.
2816 */
2817void
2818softdep_change_linkcnt(ip)
2819 struct inode *ip; /* the inode with the increased link count */
2820{
2821 struct inodedep *inodedep;
2822
2823 ACQUIRE_LOCK(&lk);
2824 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep);
2825 if (ip->i_nlink < ip->i_effnlink) {
2826 FREE_LOCK(&lk);
2827 panic("softdep_change_linkcnt: bad delta");
2828 }
2829 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2830 FREE_LOCK(&lk);
2831}
2832
2833/*
2834 * This workitem decrements the inode's link count.
2835 * If the link count reaches zero, the file is removed.
2836 */
2837static void
2838handle_workitem_remove(dirrem)
2839 struct dirrem *dirrem;
2840{
dadab5e9 2841 struct thread *td = curthread; /* XXX */
984263bc
MD
2842 struct inodedep *inodedep;
2843 struct vnode *vp;
2844 struct inode *ip;
2845 ino_t oldinum;
2846 int error;
2847
2848 if ((error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, &vp)) != 0) {
2849 softdep_error("handle_workitem_remove: vget", error);
2850 return;
2851 }
2852 ip = VTOI(vp);
2853 ACQUIRE_LOCK(&lk);
2854 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){
2855 FREE_LOCK(&lk);
2856 panic("handle_workitem_remove: lost inodedep");
2857 }
2858 /*
2859 * Normal file deletion.
2860 */
2861 if ((dirrem->dm_state & RMDIR) == 0) {
2862 ip->i_nlink--;
2863 ip->i_flag |= IN_CHANGE;
2864 if (ip->i_nlink < ip->i_effnlink) {
2865 FREE_LOCK(&lk);
2866 panic("handle_workitem_remove: bad file delta");
2867 }
2868 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2869 FREE_LOCK(&lk);
2870 vput(vp);
2871 num_dirrem -= 1;
2872 WORKITEM_FREE(dirrem, D_DIRREM);
2873 return;
2874 }
2875 /*
2876 * Directory deletion. Decrement reference count for both the
2877 * just deleted parent directory entry and the reference for ".".
2878 * Next truncate the directory to length zero. When the
2879 * truncation completes, arrange to have the reference count on
2880 * the parent decremented to account for the loss of "..".
2881 */
2882 ip->i_nlink -= 2;
2883 ip->i_flag |= IN_CHANGE;
2884 if (ip->i_nlink < ip->i_effnlink) {
2885 FREE_LOCK(&lk);
2886 panic("handle_workitem_remove: bad dir delta");
2887 }
2888 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
2889 FREE_LOCK(&lk);
0cfcada1 2890 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, proc0.p_ucred, td)) != 0)
984263bc
MD
2891 softdep_error("handle_workitem_remove: truncate", error);
2892 /*
2893 * Rename a directory to a new parent. Since, we are both deleting
2894 * and creating a new directory entry, the link count on the new
2895 * directory should not change. Thus we skip the followup dirrem.
2896 */
2897 if (dirrem->dm_state & DIRCHG) {
2898 vput(vp);
2899 num_dirrem -= 1;
2900 WORKITEM_FREE(dirrem, D_DIRREM);
2901 return;
2902 }
2903 /*
2904 * If the inodedep does not exist, then the zero'ed inode has
2905 * been written to disk. If the allocated inode has never been
2906 * written to disk, then the on-disk inode is zero'ed. In either
2907 * case we can remove the file immediately.
2908 */
2909 ACQUIRE_LOCK(&lk);
2910 dirrem->dm_state = 0;
2911 oldinum = dirrem->dm_oldinum;
2912 dirrem->dm_oldinum = dirrem->dm_dirinum;
2913 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 ||
2914 check_inode_unwritten(inodedep)) {
2915 FREE_LOCK(&lk);
2916 vput(vp);
2917 handle_workitem_remove(dirrem);
2918 return;
2919 }
2920 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
2921 FREE_LOCK(&lk);
a0ff2e99
MD
2922 ip->i_flag |= IN_CHANGE;
2923 ffs_update(vp, 0);
984263bc
MD
2924 vput(vp);
2925}
2926
2927/*
2928 * Inode de-allocation dependencies.
2929 *
2930 * When an inode's link count is reduced to zero, it can be de-allocated. We
2931 * found it convenient to postpone de-allocation until after the inode is
2932 * written to disk with its new link count (zero). At this point, all of the
2933 * on-disk inode's block pointers are nullified and, with careful dependency
2934 * list ordering, all dependencies related to the inode will be satisfied and
2935 * the corresponding dependency structures de-allocated. So, if/when the
2936 * inode is reused, there will be no mixing of old dependencies with new
2937 * ones. This artificial dependency is set up by the block de-allocation
2938 * procedure above (softdep_setup_freeblocks) and completed by the
2939 * following procedure.
2940 */
2941static void
2942handle_workitem_freefile(freefile)
2943 struct freefile *freefile;
2944{
2945 struct vnode vp;
2946 struct inode tip;
2947 struct inodedep *idp;
2948 int error;
2949
2950#ifdef DEBUG
2951 ACQUIRE_LOCK(&lk);
2952 error = inodedep_lookup(freefile->fx_fs, freefile->fx_oldinum, 0, &idp);
2953 FREE_LOCK(&lk);
2954 if (error)
2955 panic("handle_workitem_freefile: inodedep survived");
2956#endif
2957 tip.i_devvp = freefile->fx_devvp;
2958 tip.i_dev = freefile->fx_devvp->v_rdev;
2959 tip.i_fs = freefile->fx_fs;
2960 vp.v_data = &tip;
2961 if ((error = ffs_freefile(&vp, freefile->fx_oldinum, freefile->fx_mode)) != 0)
2962 softdep_error("handle_workitem_freefile", error);
2963 WORKITEM_FREE(freefile, D_FREEFILE);
2964}
2965
2966/*
2967 * Disk writes.
2968 *
2969 * The dependency structures constructed above are most actively used when file
2970 * system blocks are written to disk. No constraints are placed on when a
2971 * block can be written, but unsatisfied update dependencies are made safe by
2972 * modifying (or replacing) the source memory for the duration of the disk
2973 * write. When the disk write completes, the memory block is again brought
2974 * up-to-date.
2975 *
2976 * In-core inode structure reclamation.
2977 *
2978 * Because there are a finite number of "in-core" inode structures, they are
2979 * reused regularly. By transferring all inode-related dependencies to the
2980 * in-memory inode block and indexing them separately (via "inodedep"s), we
2981 * can allow "in-core" inode structures to be reused at any time and avoid
2982 * any increase in contention.
2983 *
2984 * Called just before entering the device driver to initiate a new disk I/O.
2985 * The buffer must be locked, thus, no I/O completion operations can occur
2986 * while we are manipulating its associated dependencies.
2987 */
2988static void
2989softdep_disk_io_initiation(bp)
2990 struct buf *bp; /* structure describing disk write to occur */
2991{
2992 struct worklist *wk, *nextwk;
2993 struct indirdep *indirdep;
2994
2995 /*
2996 * We only care about write operations. There should never
2997 * be dependencies for reads.
2998 */
2999 if (bp->b_flags & B_READ)
3000 panic("softdep_disk_io_initiation: read");
3001 /*
3002 * Do any necessary pre-I/O processing.
3003 */
3004 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) {
3005 nextwk = LIST_NEXT(wk, wk_list);
3006 switch (wk->wk_type) {
3007
3008 case D_PAGEDEP:
3009 initiate_write_filepage(WK_PAGEDEP(wk), bp);
3010 continue;
3011
3012 case D_INODEDEP:
3013 initiate_write_inodeblock(WK_INODEDEP(wk), bp);
3014 continue;
3015
3016 case D_INDIRDEP:
3017 indirdep = WK_INDIRDEP(wk);
3018 if (indirdep->ir_state & GOINGAWAY)
3019 panic("disk_io_initiation: indirdep gone");
3020 /*
3021 * If there are no remaining dependencies, this
3022 * will be writing the real pointers, so the
3023 * dependency can be freed.
3024 */
3025 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) {
3026 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
3027 brelse(indirdep->ir_savebp);
3028 /* inline expand WORKLIST_REMOVE(wk); */
3029 wk->wk_state &= ~ONWORKLIST;
3030 LIST_REMOVE(wk, wk_list);
3031 WORKITEM_FREE(indirdep, D_INDIRDEP);
3032 continue;
3033 }
3034 /*
3035 * Replace up-to-date version with safe version.
3036 */
3037 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount,
3038 M_INDIRDEP, M_SOFTDEP_FLAGS);
3039 ACQUIRE_LOCK(&lk);
3040 indirdep->ir_state &= ~ATTACHED;
3041 indirdep->ir_state |= UNDONE;
3042 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
3043 bcopy(indirdep->ir_savebp->b_data, bp->b_data,
3044 bp->b_bcount);
3045 FREE_LOCK(&lk);
3046 continue;
3047
3048 case D_MKDIR:
3049 case D_BMSAFEMAP:
3050 case D_ALLOCDIRECT:
3051 case D_ALLOCINDIR:
3052 continue;
3053
3054 default:
3055 panic("handle_disk_io_initiation: Unexpected type %s",
3056 TYPENAME(wk->wk_type));
3057 /* NOTREACHED */
3058 }
3059 }
3060}
3061
3062/*
3063 * Called from within the procedure above to deal with unsatisfied
3064 * allocation dependencies in a directory. The buffer must be locked,
3065 * thus, no I/O completion operations can occur while we are
3066 * manipulating its associated dependencies.
3067 */
3068static void
3069initiate_write_filepage(pagedep, bp)
3070 struct pagedep *pagedep;
3071 struct buf *bp;
3072{
3073 struct diradd *dap;
3074 struct direct *ep;
3075 int i;
3076
3077 if (pagedep->pd_state & IOSTARTED) {
3078 /*
3079 * This can only happen if there is a driver that does not
3080 * understand chaining. Here biodone will reissue the call
3081 * to strategy for the incomplete buffers.
3082 */
3083 printf("initiate_write_filepage: already started\n");
3084 return;
3085 }
3086 pagedep->pd_state |= IOSTARTED;
3087 ACQUIRE_LOCK(&lk);
3088 for (i = 0; i < DAHASHSZ; i++) {
3089 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
3090 ep = (struct direct *)
3091 ((char *)bp->b_data + dap->da_offset);
3092 if (ep->d_ino != dap->da_newinum) {
3093 FREE_LOCK(&lk);
f91a71dd 3094 panic("%s: dir inum %d != new %"PRId64,
984263bc
MD
3095 "initiate_write_filepage",
3096 ep->d_ino, dap->da_newinum);
3097 }
3098 if (dap->da_state & DIRCHG)
3099 ep->d_ino = dap->da_previous->dm_oldinum;
3100 else
3101 ep->d_ino = 0;
3102 dap->da_state &= ~ATTACHED;
3103 dap->da_state |= UNDONE;
3104 }
3105 }
3106 FREE_LOCK(&lk);
3107}
3108
3109/*
3110 * Called from within the procedure above to deal with unsatisfied
3111 * allocation dependencies in an inodeblock. The buffer must be
3112 * locked, thus, no I/O completion operations can occur while we
3113 * are manipulating its associated dependencies.
3114 */
3115static void
3116initiate_write_inodeblock(inodedep, bp)
3117 struct inodedep *inodedep;
3118 struct buf *bp; /* The inode block */
3119{
3120 struct allocdirect *adp, *lastadp;
3121 struct dinode *dp;
3122 struct fs *fs;
3123 ufs_lbn_t prevlbn = 0;
3124 int i, deplist;
3125
3126 if (inodedep->id_state & IOSTARTED)
3127 panic("initiate_write_inodeblock: already started");
3128 inodedep->id_state |= IOSTARTED;
3129 fs = inodedep->id_fs;
3130 dp = (struct dinode *)bp->b_data +
3131 ino_to_fsbo(fs, inodedep->id_ino);
3132 /*
3133 * If the bitmap is not yet written, then the allocated
3134 * inode cannot be written to disk.
3135 */
3136 if ((inodedep->id_state & DEPCOMPLETE) == 0) {
3137 if (inodedep->id_savedino != NULL)
3138 panic("initiate_write_inodeblock: already doing I/O");
3139 MALLOC(inodedep->id_savedino, struct dinode *,
3140 sizeof(struct dinode), M_INODEDEP, M_SOFTDEP_FLAGS);
3141 *inodedep->id_savedino = *dp;
3142 bzero((caddr_t)dp, sizeof(struct dinode));
3143 return;
3144 }
3145 /*
3146 * If no dependencies, then there is nothing to roll back.
3147 */
3148 inodedep->id_savedsize = dp->di_size;
3149 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL)
3150 return;
3151 /*
3152 * Set the dependencies to busy.
3153 */
3154 ACQUIRE_LOCK(&lk);
3155 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3156 adp = TAILQ_NEXT(adp, ad_next)) {
3157#ifdef DIAGNOSTIC
3158 if (deplist != 0 && prevlbn >= adp->ad_lbn) {
3159 FREE_LOCK(&lk);
3160 panic("softdep_write_inodeblock: lbn order");
3161 }
3162 prevlbn = adp->ad_lbn;
3163 if (adp->ad_lbn < NDADDR &&
3164 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) {
3165 FREE_LOCK(&lk);
3166 panic("%s: direct pointer #%ld mismatch %d != %d",
3167 "softdep_write_inodeblock", adp->ad_lbn,
3168 dp->di_db[adp->ad_lbn], adp->ad_newblkno);
3169 }
3170 if (adp->ad_lbn >= NDADDR &&
3171 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) {
3172 FREE_LOCK(&lk);
3173 panic("%s: indirect pointer #%ld mismatch %d != %d",
3174 "softdep_write_inodeblock", adp->ad_lbn - NDADDR,
3175 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno);
3176 }
3177 deplist |= 1 << adp->ad_lbn;
3178 if ((adp->ad_state & ATTACHED) == 0) {
3179 FREE_LOCK(&lk);
3180 panic("softdep_write_inodeblock: Unknown state 0x%x",
3181 adp->ad_state);
3182 }
3183#endif /* DIAGNOSTIC */
3184 adp->ad_state &= ~ATTACHED;
3185 adp->ad_state |= UNDONE;
3186 }
3187 /*
3188 * The on-disk inode cannot claim to be any larger than the last
3189 * fragment that has been written. Otherwise, the on-disk inode
3190 * might have fragments that were not the last block in the file
3191 * which would corrupt the filesystem.
3192 */
3193 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3194 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
3195 if (adp->ad_lbn >= NDADDR)
3196 break;
3197 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno;
3198 /* keep going until hitting a rollback to a frag */
3199 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
3200 continue;
3201 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize;
3202 for (i = adp->ad_lbn + 1; i < NDADDR; i++) {
3203#ifdef DIAGNOSTIC
3204 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) {
3205 FREE_LOCK(&lk);
3206 panic("softdep_write_inodeblock: lost dep1");
3207 }
3208#endif /* DIAGNOSTIC */
3209 dp->di_db[i] = 0;
3210 }
3211 for (i = 0; i < NIADDR; i++) {
3212#ifdef DIAGNOSTIC
3213 if (dp->di_ib[i] != 0 &&
3214 (deplist & ((1 << NDADDR) << i)) == 0) {
3215 FREE_LOCK(&lk);
3216 panic("softdep_write_inodeblock: lost dep2");
3217 }
3218#endif /* DIAGNOSTIC */
3219 dp->di_ib[i] = 0;
3220 }
3221 FREE_LOCK(&lk);
3222 return;
3223 }
3224 /*
3225 * If we have zero'ed out the last allocated block of the file,
3226 * roll back the size to the last currently allocated block.
3227 * We know that this last allocated block is a full-sized as
3228 * we already checked for fragments in the loop above.
3229 */
3230 if (lastadp != NULL &&
3231 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) {
3232 for (i = lastadp->ad_lbn; i >= 0; i--)
3233 if (dp->di_db[i] != 0)
3234 break;
3235 dp->di_size = (i + 1) * fs->fs_bsize;
3236 }
3237 /*
3238 * The only dependencies are for indirect blocks.
3239 *
3240 * The file size for indirect block additions is not guaranteed.
3241 * Such a guarantee would be non-trivial to achieve. The conventional
3242 * synchronous write implementation also does not make this guarantee.
3243 * Fsck should catch and fix discrepancies. Arguably, the file size
3244 * can be over-estimated without destroying integrity when the file
3245 * moves into the indirect blocks (i.e., is large). If we want to
3246 * postpone fsck, we are stuck with this argument.
3247 */
3248 for (; adp; adp = TAILQ_NEXT(adp, ad_next))
3249 dp->di_ib[adp->ad_lbn - NDADDR] = 0;
3250 FREE_LOCK(&lk);
3251}
3252
3253/*
3254 * This routine is called during the completion interrupt
3255 * service routine for a disk write (from the procedure called
f719c866 3256 * by the device driver to inform the filesystem caches of
984263bc
MD
3257 * a request completion). It should be called early in this
3258 * procedure, before the block is made available to other
3259 * processes or other routines are called.
3260 */
3261static void
3262softdep_disk_write_complete(bp)
3263 struct buf *bp; /* describes the completed disk write */
3264{
3265 struct worklist *wk;
3266 struct workhead reattach;
3267 struct newblk *newblk;
3268 struct allocindir *aip;
3269 struct allocdirect *adp;
3270 struct indirdep *indirdep;
3271 struct inodedep *inodedep;
3272 struct bmsafemap *bmsafemap;
3273
3274#ifdef DEBUG
dadab5e9 3275 if (lk.lkt_held != NOHOLDER)
984263bc 3276 panic("softdep_disk_write_complete: lock is held");
dadab5e9 3277 lk.lkt_held = SPECIAL_FLAG;
984263bc
MD
3278#endif
3279 LIST_INIT(&reattach);
3280 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
3281 WORKLIST_REMOVE(wk);
3282 switch (wk->wk_type) {
3283
3284 case D_PAGEDEP:
3285 if (handle_written_filepage(WK_PAGEDEP(wk), bp))
3286 WORKLIST_INSERT(&reattach, wk);
3287 continue;
3288
3289 case D_INODEDEP:
3290 if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
3291 WORKLIST_INSERT(&reattach, wk);
3292 continue;
3293
3294 case D_BMSAFEMAP:
3295 bmsafemap = WK_BMSAFEMAP(wk);
3296 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) {
3297 newblk->nb_state |= DEPCOMPLETE;
3298 newblk->nb_bmsafemap = NULL;
3299 LIST_REMOVE(newblk, nb_deps);
3300 }
3301 while ((adp =
3302 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) {
3303 adp->ad_state |= DEPCOMPLETE;
3304 adp->ad_buf = NULL;
3305 LIST_REMOVE(adp, ad_deps);
3306 handle_allocdirect_partdone(adp);
3307 }
3308 while ((aip =
3309 LIST_FIRST(&bmsafemap->sm_allocindirhd))) {
3310 aip->ai_state |= DEPCOMPLETE;
3311 aip->ai_buf = NULL;
3312 LIST_REMOVE(aip, ai_deps);
3313 handle_allocindir_partdone(aip);
3314 }
3315 while ((inodedep =
3316 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) {
3317 inodedep->id_state |= DEPCOMPLETE;
3318 LIST_REMOVE(inodedep, id_deps);
3319 inodedep->id_buf = NULL;
3320 }
3321 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
3322 continue;
3323
3324 case D_MKDIR:
3325 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
3326 continue;
3327
3328 case D_ALLOCDIRECT:
3329 adp = WK_ALLOCDIRECT(wk);
3330 adp->ad_state |= COMPLETE;
3331 handle_allocdirect_partdone(adp);
3332 continue;
3333
3334 case D_ALLOCINDIR:
3335 aip = WK_ALLOCINDIR(wk);
3336 aip->ai_state |= COMPLETE;
3337 handle_allocindir_partdone(aip);
3338 continue;
3339
3340 case D_INDIRDEP:
3341 indirdep = WK_INDIRDEP(wk);
3342 if (indirdep->ir_state & GOINGAWAY) {
dadab5e9 3343 lk.lkt_held = NOHOLDER;
984263bc
MD
3344 panic("disk_write_complete: indirdep gone");
3345 }
3346 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
3347 FREE(indirdep->ir_saveddata, M_INDIRDEP);
3348 indirdep->ir_saveddata = 0;
3349 indirdep->ir_state &= ~UNDONE;
3350 indirdep->ir_state |= ATTACHED;
3351 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
3352 handle_allocindir_partdone(aip);
3353 if (aip == LIST_FIRST(&indirdep->ir_donehd)) {
dadab5e9 3354 lk.lkt_held = NOHOLDER;
984263bc
MD
3355 panic("disk_write_complete: not gone");
3356 }
3357 }
3358 WORKLIST_INSERT(&reattach, wk);
3359 if ((bp->b_flags & B_DELWRI) == 0)
3360 stat_indir_blk_ptrs++;
3361 bdirty(bp);
3362 continue;
3363
3364 default:
dadab5e9 3365 lk.lkt_held = NOHOLDER;
984263bc
MD
3366 panic("handle_disk_write_complete: Unknown type %s",
3367 TYPENAME(wk->wk_type));
3368 /* NOTREACHED */
3369 }
3370 }
3371 /*
3372 * Reattach any requests that must be redone.
3373 */
3374 while ((wk = LIST_FIRST(&reattach)) != NULL) {
3375 WORKLIST_REMOVE(wk);
3376 WORKLIST_INSERT(&bp->b_dep, wk);
3377 }
3378#ifdef DEBUG
dadab5e9 3379 if (lk.lkt_held != SPECIAL_FLAG)
984263bc 3380 panic("softdep_disk_write_complete: lock lost");
dadab5e9 3381 lk.lkt_held = NOHOLDER;
984263bc
MD
3382#endif
3383}
3384
3385/*
3386 * Called from within softdep_disk_write_complete above. Note that
3387 * this routine is always called from interrupt level with further
3388 * splbio interrupts blocked.
3389 */
3390static void
3391handle_allocdirect_partdone(adp)
3392 struct allocdirect *adp; /* the completed allocdirect */
3393{
3394 struct allocdirect *listadp;
3395 struct inodedep *inodedep;
3396 long bsize;
3397
3398 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
3399 return;
3400 if (adp->ad_buf != NULL) {
dadab5e9 3401 lk.lkt_held = NOHOLDER;
984263bc
MD
3402 panic("handle_allocdirect_partdone: dangling dep");
3403 }
3404 /*
3405 * The on-disk inode cannot claim to be any larger than the last
3406 * fragment that has been written. Otherwise, the on-disk inode
3407 * might have fragments that were not the last block in the file
3408 * which would corrupt the filesystem. Thus, we cannot free any
3409 * allocdirects after one whose ad_oldblkno claims a fragment as
3410 * these blocks must be rolled back to zero before writing the inode.
3411 * We check the currently active set of allocdirects in id_inoupdt.
3412 */
3413 inodedep = adp->ad_inodedep;
3414 bsize = inodedep->id_fs->fs_bsize;
3415 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) {
3416 /* found our block */
3417 if (listadp == adp)
3418 break;
3419 /* continue if ad_oldlbn is not a fragment */
3420 if (listadp->ad_oldsize == 0 ||
3421 listadp->ad_oldsize == bsize)
3422 continue;
3423 /* hit a fragment */
3424 return;
3425 }
3426 /*
3427 * If we have reached the end of the current list without
3428 * finding the just finished dependency, then it must be
3429 * on the future dependency list. Future dependencies cannot
3430 * be freed until they are moved to the current list.
3431 */
3432 if (listadp == NULL) {
3433#ifdef DEBUG
3434 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next)
3435 /* found our block */
3436 if (listadp == adp)
3437 break;
3438 if (listadp == NULL) {
dadab5e9 3439 lk.lkt_held = NOHOLDER;
984263bc
MD
3440 panic("handle_allocdirect_partdone: lost dep");
3441 }
3442#endif /* DEBUG */
3443 return;
3444 }
3445 /*
3446 * If we have found the just finished dependency, then free
3447 * it along with anything that follows it that is complete.
3448 */
3449 for (; adp; adp = listadp) {
3450 listadp = TAILQ_NEXT(adp, ad_next);
3451 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
3452 return;
3453 free_allocdirect(&inodedep->id_inoupdt, adp, 1);
3454 }
3455}
3456
3457/*
3458 * Called from within softdep_disk_write_complete above. Note that
3459 * this routine is always called from interrupt level with further
3460 * splbio interrupts blocked.
3461 */
3462static void
3463handle_allocindir_partdone(aip)
3464 struct allocindir *aip; /* the completed allocindir */
3465{
3466 struct indirdep *indirdep;
3467
3468 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
3469 return;
3470 if (aip->ai_buf != NULL) {
dadab5e9 3471 lk.lkt_held = NOHOLDER;
984263bc
MD
3472 panic("handle_allocindir_partdone: dangling dependency");
3473 }
3474 indirdep = aip->ai_indirdep;
3475 if (indirdep->ir_state & UNDONE) {
3476 LIST_REMOVE(aip, ai_next);
3477 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
3478 return;
3479 }
3480 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
3481 aip->ai_newblkno;
3482 LIST_REMOVE(aip, ai_next);
3483 if (aip->ai_freefrag != NULL)
3484 add_to_worklist(&aip->ai_freefrag->ff_list);
3485 WORKITEM_FREE(aip, D_ALLOCINDIR);
3486}
3487
3488/*
3489 * Called from within softdep_disk_write_complete above to restore
3490 * in-memory inode block contents to their most up-to-date state. Note
3491 * that this routine is always called from interrupt level with further
3492 * splbio interrupts blocked.
3493 */
3494static int
3495handle_written_inodeblock(inodedep, bp)
3496 struct inodedep *inodedep;
3497 struct buf *bp; /* buffer containing the inode block */
3498{
3499 struct worklist *wk, *filefree;
3500 struct allocdirect *adp, *nextadp;
3501 struct dinode *dp;
3502 int hadchanges;
3503
3504 if ((inodedep->id_state & IOSTARTED) == 0) {
dadab5e9 3505 lk.lkt_held = NOHOLDER;
984263bc
MD
3506 panic("handle_written_inodeblock: not started");
3507 }
3508 inodedep->id_state &= ~IOSTARTED;
3509 inodedep->id_state |= COMPLETE;
3510 dp = (struct dinode *)bp->b_data +
3511 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
3512 /*
3513 * If we had to rollback the inode allocation because of
3514 * bitmaps being incomplete, then simply restore it.
3515 * Keep the block dirty so that it will not be reclaimed until
3516 * all associated dependencies have been cleared and the
3517 * corresponding updates written to disk.
3518 */
3519 if (inodedep->id_savedino != NULL) {
3520 *dp = *inodedep->id_savedino;
3521 FREE(inodedep->id_savedino, M_INODEDEP);
3522 inodedep->id_savedino = NULL;
3523 if ((bp->b_flags & B_DELWRI) == 0)
3524 stat_inode_bitmap++;
3525 bdirty(bp);
3526 return (1);
3527 }
3528 /*
3529 * Roll forward anything that had to be rolled back before
3530 * the inode could be updated.
3531 */
3532 hadchanges = 0;
3533 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
3534 nextadp = TAILQ_NEXT(adp, ad_next);
3535 if (adp->ad_state & ATTACHED) {
dadab5e9 3536 lk.lkt_held = NOHOLDER;
984263bc
MD
3537 panic("handle_written_inodeblock: new entry");
3538 }
3539 if (adp->ad_lbn < NDADDR) {
3540 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) {
dadab5e9 3541 lk.lkt_held = NOHOLDER;
984263bc
MD
3542 panic("%s: %s #%ld mismatch %d != %d",
3543 "handle_written_inodeblock",
3544 "direct pointer", adp->ad_lbn,
3545 dp->di_db[adp->ad_lbn], adp->ad_oldblkno);
3546 }
3547 dp->di_db[adp->ad_lbn] = adp->ad_newblkno;
3548 } else {
3549 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) {
dadab5e9 3550 lk.lkt_held = NOHOLDER;
984263bc
MD
3551 panic("%s: %s #%ld allocated as %d",
3552 "handle_written_inodeblock",
3553 "indirect pointer", adp->ad_lbn - NDADDR,
3554 dp->di_ib[adp->ad_lbn - NDADDR]);
3555 }
3556 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno;
3557 }
3558 adp->ad_state &= ~UNDONE;
3559 adp->ad_state |= ATTACHED;
3560 hadchanges = 1;
3561 }
3562 if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
3563 stat_direct_blk_ptrs++;
3564 /*
3565 * Reset the file size to its most up-to-date value.
3566 */
3567 if (inodedep->id_savedsize == -1) {
dadab5e9 3568 lk.lkt_held = NOHOLDER;
984263bc
MD
3569 panic("handle_written_inodeblock: bad size");
3570 }
3571 if (dp->di_size != inodedep->id_savedsize) {
3572 dp->di_size = inodedep->id_savedsize;
3573 hadchanges = 1;
3574 }
3575 inodedep->id_savedsize = -1;
3576 /*
3577 * If there were any rollbacks in the inode block, then it must be
3578 * marked dirty so that its will eventually get written back in
3579 * its correct form.
3580 */
3581 if (hadchanges)
3582 bdirty(bp);
3583 /*
3584 * Process any allocdirects that completed during the update.
3585 */
3586 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
3587 handle_allocdirect_partdone(adp);
3588 /*
3589 * Process deallocations that were held pending until the
3590 * inode had been written to disk. Freeing of the inode
3591 * is delayed until after all blocks have been freed to
3592 * avoid creation of new <vfsid, inum, lbn> triples
3593 * before the old ones have been deleted.
3594 */
3595 filefree = NULL;
3596 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
3597 WORKLIST_REMOVE(wk);
3598 switch (wk->wk_type) {
3599
3600 case D_FREEFILE:
3601 /*
3602 * We defer adding filefree to the worklist until
3603 * all other additions have been made to ensure
3604 * that it will be done after all the old blocks
3605 * have been freed.
3606 */
3607 if (filefree != NULL) {
dadab5e9 3608 lk.lkt_held = NOHOLDER;
984263bc
MD
3609 panic("handle_written_inodeblock: filefree");
3610 }
3611 filefree = wk;
3612 continue;
3613
3614 case D_MKDIR:
3615 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
3616 continue;
3617
3618 case D_DIRADD:
3619 diradd_inode_written(WK_DIRADD(wk), inodedep);
3620 continue;
3621
3622 case D_FREEBLKS:
89a5de29
MD
3623 wk->wk_state |= COMPLETE;
3624 if ((wk->wk_state & ALLCOMPLETE) != ALLCOMPLETE)
3625 continue;
3626 /* -- fall through -- */
984263bc
MD
3627 case D_FREEFRAG:
3628 case D_DIRREM:
3629 add_to_worklist(wk);
3630 continue;
3631
3632 default:
dadab5e9 3633 lk.lkt_held = NOHOLDER;
984263bc
MD
3634 panic("handle_written_inodeblock: Unknown type %s",
3635 TYPENAME(wk->wk_type));
3636 /* NOTREACHED */
3637 }
3638 }
3639 if (filefree != NULL) {
3640 if (free_inodedep(inodedep) == 0) {
dadab5e9 3641 lk.lkt_held = NOHOLDER;
984263bc
MD
3642 panic("handle_written_inodeblock: live inodedep");
3643 }
3644 add_to_worklist(filefree);
3645 return (0);
3646 }
3647
3648 /*
3649 * If no outstanding dependencies, free it.
3650 */
3651 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0)
3652 return (0);
3653 return (hadchanges);
3654}
3655
3656/*
3657 * Process a diradd entry after its dependent inode has been written.
3658 * This routine must be called with splbio interrupts blocked.
3659 */
3660static void
3661diradd_inode_written(dap, inodedep)
3662 struct diradd *dap;
3663 struct inodedep *inodedep;
3664{
3665 struct pagedep *pagedep;
3666
3667 dap->da_state |= COMPLETE;
3668 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
3669 if (dap->da_state & DIRCHG)
3670 pagedep = dap->da_previous->dm_pagedep;
3671 else
3672 pagedep = dap->da_pagedep;
3673 LIST_REMOVE(dap, da_pdlist);
3674 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
3675 }
3676 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
3677}
3678
3679/*
3680 * Handle the completion of a mkdir dependency.
3681 */
3682static void
3683handle_written_mkdir(mkdir, type)
3684 struct mkdir *mkdir;
3685 int type;
3686{
3687 struct diradd *dap;
3688 struct pagedep *pagedep;
3689
3690 if (mkdir->md_state != type) {
dadab5e9 3691 lk.lkt_held = NOHOLDER;
984263bc
MD
3692 panic("handle_written_mkdir: bad type");
3693 }
3694 dap = mkdir->md_diradd;
3695 dap->da_state &= ~type;
3696 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
3697 dap->da_state |= DEPCOMPLETE;
3698 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
3699 if (dap->da_state & DIRCHG)
3700 pagedep = dap->da_previous->dm_pagedep;
3701 else
3702 pagedep = dap->da_pagedep;
3703 LIST_REMOVE(dap, da_pdlist);
3704 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
3705 }
3706 LIST_REMOVE(mkdir, md_mkdirs);
3707 WORKITEM_FREE(mkdir, D_MKDIR);
3708}
3709
3710/*
3711 * Called from within softdep_disk_write_complete above.
3712 * A write operation was just completed. Removed inodes can
3713 * now be freed and associated block pointers may be committed.
3714 * Note that this routine is always called from interrupt level
3715 * with further splbio interrupts blocked.
3716 */
3717static int
3718handle_written_filepage(pagedep, bp)
3719 struct pagedep *pagedep;
3720 struct buf *bp; /* buffer containing the written page */
3721{
3722 struct dirrem *dirrem;
3723 struct diradd *dap, *nextdap;
3724 struct direct *ep;
3725 int i, chgs;
3726
3727 if ((pagedep->pd_state & IOSTARTED) == 0) {
dadab5e9 3728 lk.lkt_held = NOHOLDER;
984263bc
MD
3729 panic("handle_written_filepage: not started");
3730 }
3731 pagedep->pd_state &= ~IOSTARTED;
3732 /*
3733 * Process any directory removals that have been committed.
3734 */
3735 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
3736 LIST_REMOVE(dirrem, dm_next);
3737 dirrem->dm_dirinum = pagedep->pd_ino;
3738 add_to_worklist(&dirrem->dm_list);
3739 }
3740 /*
3741 * Free any directory additions that have been committed.
3742 */
3743 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
3744 free_diradd(dap);
3745 /*
3746 * Uncommitted directory entries must be restored.
3747 */
3748 for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
3749 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
3750 dap = nextdap) {
3751 nextdap = LIST_NEXT(dap, da_pdlist);
3752 if (dap->da_state & ATTACHED) {
dadab5e9 3753 lk.lkt_held = NOHOLDER;
984263bc
MD
3754 panic("handle_written_filepage: attached");
3755 }
3756 ep = (struct direct *)
3757 ((char *)bp->b_data + dap->da_offset);
3758 ep->d_ino = dap->da_newinum;
3759 dap->da_state &= ~UNDONE;
3760 dap->da_state |= ATTACHED;
3761 chgs = 1;
3762 /*
3763 * If the inode referenced by the directory has
3764 * been written out, then the dependency can be
3765 * moved to the pending list.
3766 */
3767 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
3768 LIST_REMOVE(dap, da_pdlist);
3769 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
3770 da_pdlist);
3771 }
3772 }
3773 }
3774 /*
3775 * If there were any rollbacks in the directory, then it must be
3776 * marked dirty so that its will eventually get written back in
3777 * its correct form.
3778 */
3779 if (chgs) {
3780 if ((bp->b_flags & B_DELWRI) == 0)
3781 stat_dir_entry++;
3782 bdirty(bp);
3783 }
3784 /*
3785 * If no dependencies remain, the pagedep will be freed.
3786 * Otherwise it will remain to update the page before it
3787 * is written back to disk.
3788 */
3789 if (LIST_FIRST(&pagedep->pd_pendinghd) == 0) {
3790 for (i = 0; i < DAHASHSZ; i++)
3791 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL)
3792 break;
3793 if (i == DAHASHSZ) {
3794 LIST_REMOVE(pagedep, pd_hash);
3795 WORKITEM_FREE(pagedep, D_PAGEDEP);
3796 return (0);
3797 }
3798 }
3799 return (1);
3800}
3801
3802/*
3803 * Writing back in-core inode structures.
3804 *
f719c866 3805 * The filesystem only accesses an inode's contents when it occupies an
984263bc
MD
3806 * "in-core" inode structure. These "in-core" structures are separate from
3807 * the page frames used to cache inode blocks. Only the latter are
3808 * transferred to/from the disk. So, when the updated contents of the
3809 * "in-core" inode structure are copied to the corresponding in-memory inode
3810 * block, the dependencies are also transferred. The following procedure is
3811 * called when copying a dirty "in-core" inode to a cached inode block.
3812 */
3813
3814/*
3815 * Called when an inode is loaded from disk. If the effective link count
3816 * differed from the actual link count when it was last flushed, then we
3817 * need to ensure that the correct effective link count is put back.
3818 */
3819void
3820softdep_load_inodeblock(ip)
3821 struct inode *ip; /* the "in_core" copy of the inode */
3822{
3823 struct inodedep *inodedep;
3824