hammer2 - Change from splay -> red-black tree
[dragonfly.git] / sys / vfs / hammer2 / hammer2_chain.c
1 /*
2  * Copyright (c) 2011-2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * This subsystem handles direct and indirect block searches, recursions,
37  * creation, and deletion.  Chains of blockrefs are tracked and modifications
38  * are flag for propagation... eventually all the way back to the volume
39  * header.
40  */
41
42 #include <sys/cdefs.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
46 #include <sys/lock.h>
47 #include <sys/uuid.h>
48
49 #include "hammer2.h"
50
51 static int hammer2_indirect_optimize;   /* XXX SYSCTL */
52
53 static hammer2_chain_t *hammer2_chain_create_indirect(
54                         hammer2_mount_t *hmp, hammer2_chain_t *parent,
55                         hammer2_key_t key, int keybits);
56
57 /*
58  * We use a red-black tree to guarantee safe lookups under shared locks.
59  */
60 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
61
62 int
63 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
64 {
65         return(chain2->index - chain1->index);
66 }
67
68 /*
69  * Recursively mark the parent chain elements so flushes can find
70  * modified elements.  Stop when we hit a chain already flagged
71  * SUBMODIFIED, but ignore the SUBMODIFIED bit that might be set
72  * in chain itself.
73  *
74  * SUBMODIFIED is not set on the chain passed in.
75  *
76  * XXX rename of parent can create a SMP race
77  */
78 static void
79 hammer2_chain_parent_setsubmod(hammer2_mount_t *hmp, hammer2_chain_t *chain)
80 {
81         hammer2_chain_t *parent;
82
83         parent = chain->parent;
84         while (parent && (parent->flags & HAMMER2_CHAIN_SUBMODIFIED) == 0) {
85                 atomic_set_int(&parent->flags, HAMMER2_CHAIN_SUBMODIFIED);
86                 parent = parent->parent;
87         }
88 }
89
90 /*
91  * Allocate a new disconnected chain element representing the specified
92  * bref.  The chain element is locked exclusively and refs is set to 1.
93  *
94  * This essentially allocates a system memory structure representing one
95  * of the media structure types, including inodes.
96  */
97 hammer2_chain_t *
98 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_blockref_t *bref)
99 {
100         hammer2_chain_t *chain;
101         hammer2_inode_t *ip;
102         hammer2_indblock_t *np;
103         hammer2_data_t *dp;
104         u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
105
106         /*
107          * Construct the appropriate system structure.
108          */
109         switch(bref->type) {
110         case HAMMER2_BREF_TYPE_INODE:
111                 ip = kmalloc(sizeof(*ip), hmp->minode, M_WAITOK | M_ZERO);
112                 chain = &ip->chain;
113                 chain->u.ip = ip;
114                 ip->hmp = hmp;
115                 break;
116         case HAMMER2_BREF_TYPE_INDIRECT:
117                 np = kmalloc(sizeof(*np), hmp->mchain, M_WAITOK | M_ZERO);
118                 chain = &np->chain;
119                 chain->u.np = np;
120                 break;
121         case HAMMER2_BREF_TYPE_DATA:
122                 dp = kmalloc(sizeof(*dp), hmp->mchain, M_WAITOK | M_ZERO);
123                 chain = &dp->chain;
124                 chain->u.dp = dp;
125                 break;
126         case HAMMER2_BREF_TYPE_VOLUME:
127                 chain = NULL;
128                 panic("hammer2_chain_alloc volume type illegal for op");
129         default:
130                 chain = NULL;
131                 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
132                       bref->type);
133         }
134
135         /*
136          * Only set bref_flush if the bref has a real media offset, otherwise
137          * the caller has to wait for the chain to be modified/block-allocated
138          * before a blockref can be synchronized with its (future) parent.
139          */
140         chain->bref = *bref;
141         if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX)
142                 chain->bref_flush = *bref;
143         chain->index = -1;              /* not yet assigned */
144         chain->refs = 1;
145         chain->bytes = bytes;
146         ccms_cst_init(&chain->cst, chain);
147         ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
148
149         return (chain);
150 }
151
152 /*
153  * Free a disconnected chain element
154  */
155 void
156 hammer2_chain_free(hammer2_mount_t *hmp, hammer2_chain_t *chain)
157 {
158         void *mem;
159
160         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
161             chain->bref.type == HAMMER2_BREF_TYPE_VOLUME) {
162                 chain->data = NULL;
163         }
164
165         KKASSERT(chain->bp == NULL);
166         KKASSERT(chain->data == NULL);
167         KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
168                  chain->u.ip->vp == NULL);
169
170         if ((mem = chain->u.mem) != NULL) {
171                 chain->u.mem = NULL;
172                 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
173                         kfree(mem, hmp->minode);
174                 else
175                         kfree(mem, hmp->mchain);
176         }
177 }
178
179 /*
180  * Add a reference to a chain element (for shared access).  The chain
181  * element must already have at least 1 ref controlled by the caller.
182  */
183 void
184 hammer2_chain_ref(hammer2_mount_t *hmp, hammer2_chain_t *chain)
185 {
186         KKASSERT(chain->refs > 0);
187         atomic_add_int(&chain->refs, 1);
188 }
189
190 /*
191  * Drop the callers reference to the chain element.  If the ref count
192  * reaches zero the chain element and its related structure (typically an
193  * inode or indirect block) will be freed and the parent will be
194  * recursively dropped.
195  *
196  * MOVED and MODIFIED elements hold additional references so it should not
197  * be possible for the count on a modified element to drop to 0.
198  *
199  * The chain element must NOT be locked by the caller.
200  *
201  * The parent might or might not be locked by the caller but if so it
202  * will also be referenced so we shouldn't recurse upward.
203  */
204 void
205 hammer2_chain_drop(hammer2_mount_t *hmp, hammer2_chain_t *chain)
206 {
207         hammer2_chain_t *parent;
208         hammer2_inode_t *ip;
209         u_int refs;
210
211         while (chain) {
212                 refs = chain->refs;
213                 cpu_ccfence();
214                 KKASSERT(refs > 0);
215                 if (refs == 1) {
216                         KKASSERT(chain != &hmp->vchain);
217                         parent = chain->parent;
218                         if (parent) {
219                                 ccms_thread_lock(&parent->cst,
220                                                 CCMS_STATE_EXCLUSIVE);
221                         }
222                         if (atomic_cmpset_int(&chain->refs, 1, 0)) {
223                                 /*
224                                  * Succeeded, recurse and drop parent.
225                                  * These chain elements should be synchronized
226                                  * so no delta data or inode count updates
227                                  * should be needed.
228                                  */
229                                 KKASSERT((chain->flags &
230                                           (HAMMER2_CHAIN_MOVED |
231                                            HAMMER2_CHAIN_MODIFIED)) == 0);
232
233                                 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
234                                         ip = chain->u.ip;
235                                 else
236                                         ip = NULL;
237
238                                 /*
239                                  * Delete interlock
240                                  */
241                                 if (!(chain->flags & HAMMER2_CHAIN_DELETED)) {
242                                         /*
243                                          * Disconnect the chain and clear
244                                          * pip if it was an inode.
245                                          */
246                                         RB_REMOVE(hammer2_chain_tree,
247                                                   &parent->rbhead, chain);
248                                         atomic_set_int(&chain->flags,
249                                                        HAMMER2_CHAIN_DELETED);
250                                         if (ip)
251                                                 ip->pip = NULL;
252                                         /* parent refs dropped via recursion */
253                                 }
254
255                                 /*
256                                  * When cleaning out a hammer2_inode we must
257                                  * also clean out the related ccms_inode.
258                                  */
259                                 if (ip)
260                                         ccms_cst_uninit(&ip->topo_cst);
261                                 chain->parent = NULL;
262                                 if (parent)
263                                         ccms_thread_unlock(&parent->cst);
264                                 hammer2_chain_free(hmp, chain);
265                                 chain = parent;
266                                 /* recurse on parent */
267                         } else {
268                                 if (parent)
269                                         ccms_thread_unlock(&parent->cst);
270                                 /* retry the same chain */
271                         }
272                 } else {
273                         if (atomic_cmpset_int(&chain->refs, refs, refs - 1)) {
274                                 /*
275                                  * Succeeded, count did not reach zero so
276                                  * cut out of the loop.
277                                  */
278                                 break;
279                         }
280                         /* retry the same chain */
281                 }
282         }
283 }
284
285 /*
286  * Ref and lock a chain element, acquiring its data with I/O if necessary,
287  * and specify how you would like the data to be resolved.
288  *
289  * Returns 0 on success or an error code if the data could not be acquired.
290  * The chain element is locked either way.
291  *
292  * The lock is allowed to recurse, multiple locking ops will aggregate
293  * the requested resolve types.  Once data is assigned it will not be
294  * removed until the last unlock.
295  *
296  * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
297  *                         (typically used to avoid device/logical buffer
298  *                          aliasing for data)
299  *
300  * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
301  *                         the INITIAL-create state (indirect blocks only).
302  *
303  *                         Do not resolve data elements for DATA chains.
304  *                         (typically used to avoid device/logical buffer
305  *                          aliasing for data)
306  *
307  * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
308  *
309  *
310  * NOTE: Embedded elements (volume header, inodes) are always resolved
311  *       regardless.
312  *
313  * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
314  *       element will instantiate and zero its buffer, and flush it on
315  *       release.
316  *
317  * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
318  *       so as not to instantiate a device buffer, which could alias against
319  *       a logical file buffer.  However, if ALWAYS is specified the
320  *       device buffer will be instantiated anyway.
321  */
322 int
323 hammer2_chain_lock(hammer2_mount_t *hmp, hammer2_chain_t *chain, int how)
324 {
325         hammer2_blockref_t *bref;
326         hammer2_off_t pbase;
327         hammer2_off_t peof;
328         size_t boff;
329         size_t bbytes;
330         int error;
331         char *bdata;
332
333         /*
334          * Lock the element.  Under certain conditions this might end up
335          * being a recursive lock.
336          */
337         KKASSERT(chain->refs > 0);
338         atomic_add_int(&chain->refs, 1);
339         ccms_thread_lock(&chain->cst, CCMS_STATE_EXCLUSIVE);
340
341         /*
342          * If we already have a valid data pointer no further action is
343          * necessary.
344          */
345         if (chain->data)
346                 return (0);
347
348         /*
349          * Do we have to resolve the data?
350          */
351         switch(how) {
352         case HAMMER2_RESOLVE_NEVER:
353                 return(0);
354         case HAMMER2_RESOLVE_MAYBE:
355                 if (chain->flags & HAMMER2_CHAIN_INITIAL)
356                         return(0);
357                 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
358                         return(0);
359                 /* fall through */
360         case HAMMER2_RESOLVE_ALWAYS:
361                 break;
362         }
363
364         /*
365          * We must resolve to a device buffer, either by issuing I/O or
366          * by creating a zero-fill element.  We do not mark the buffer
367          * dirty when creating a zero-fill element (the hammer2_chain_modify()
368          * API must still be used to do that).
369          *
370          * The device buffer is variable-sized in powers of 2 down
371          * to HAMMER2_MINALLOCSIZE (typically 1K).  A 64K physical storage
372          * chunk always contains buffers of the same size. (XXX)
373          *
374          * The minimum physical IO size may be larger than the variable
375          * block size.
376          */
377         bref = &chain->bref;
378
379         if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
380                 bbytes = HAMMER2_MINIOSIZE;
381         pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
382         peof = (pbase + HAMMER2_PBUFSIZE64) & ~HAMMER2_PBUFMASK64;
383         boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
384         KKASSERT(pbase != 0);
385
386         /*
387          * The getblk() optimization can only be used on newly created
388          * elements if the physical block size matches the request.
389          */
390         if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
391             chain->bytes == bbytes) {
392                 chain->bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
393                 error = 0;
394         } else if (hammer2_cluster_enable) {
395                 error = cluster_read(hmp->devvp, peof, pbase, bbytes,
396                                      HAMMER2_PBUFSIZE, HAMMER2_PBUFSIZE,
397                                      &chain->bp);
398         } else {
399                 error = bread(hmp->devvp, pbase, bbytes, &chain->bp);
400         }
401
402         if (error) {
403                 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
404                         (intmax_t)pbase, error);
405                 bqrelse(chain->bp);
406                 chain->bp = NULL;
407                 return (error);
408         }
409
410         /*
411          * Zero the data area if the chain is in the INITIAL-create state.
412          * Mark the buffer for bdwrite().
413          */
414         bdata = (char *)chain->bp->b_data + boff;
415         if (chain->flags & HAMMER2_CHAIN_INITIAL) {
416                 bzero(bdata, chain->bytes);
417                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
418         }
419
420         /*
421          * Setup the data pointer, either pointing it to an embedded data
422          * structure and copying the data from the buffer, or pointing it
423          * into the buffer.
424          *
425          * The buffer is not retained when copying to an embedded data
426          * structure in order to avoid potential deadlocks or recursions
427          * on the same physical buffer.
428          */
429         switch (bref->type) {
430         case HAMMER2_BREF_TYPE_VOLUME:
431                 /*
432                  * Copy data from bp to embedded buffer
433                  */
434                 panic("hammer2_chain_lock: called on unresolved volume header");
435 #if 0
436                 /* NOT YET */
437                 KKASSERT(pbase == 0);
438                 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
439                 bcopy(bdata, &hmp->voldata, chain->bytes);
440                 chain->data = (void *)&hmp->voldata;
441                 bqrelse(chain->bp);
442                 chain->bp = NULL;
443 #endif
444                 break;
445         case HAMMER2_BREF_TYPE_INODE:
446                 /*
447                  * Copy data from bp to embedded buffer, do not retain the
448                  * device buffer.
449                  */
450                 bcopy(bdata, &chain->u.ip->ip_data, chain->bytes);
451                 chain->data = (void *)&chain->u.ip->ip_data;
452                 bqrelse(chain->bp);
453                 chain->bp = NULL;
454                 break;
455         case HAMMER2_BREF_TYPE_INDIRECT:
456         case HAMMER2_BREF_TYPE_DATA:
457         default:
458                 /*
459                  * Point data at the device buffer and leave bp intact.
460                  */
461                 chain->data = (void *)bdata;
462                 break;
463         }
464         return (0);
465 }
466
467 /*
468  * Unlock and deref a chain element.
469  *
470  * On the last lock release any non-embedded data (chain->bp) will be
471  * retired.
472  */
473 void
474 hammer2_chain_unlock(hammer2_mount_t *hmp, hammer2_chain_t *chain)
475 {
476         long *counterp;
477
478         /*
479          * Release the CST lock but with a special 1->0 transition case.
480          *
481          * Returns non-zero if lock references remain.  When zero is
482          * returned the last lock reference is retained and any shared
483          * lock is upgraded to an exclusive lock for final disposition.
484          */
485         if (ccms_thread_unlock_zero(&chain->cst)) {
486                 KKASSERT(chain->refs > 1);
487                 atomic_add_int(&chain->refs, -1);
488                 return;
489         }
490
491         /*
492          * Shortcut the case if the data is embedded or not resolved.
493          *
494          * Do NOT null-out pointers to embedded data (e.g. inode).
495          *
496          * The DIRTYBP flag is non-applicable in this situation and can
497          * be cleared to keep the flags state clean.
498          */
499         if (chain->bp == NULL) {
500                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
501                 ccms_thread_unlock(&chain->cst);
502                 hammer2_chain_drop(hmp, chain);
503                 return;
504         }
505
506         /*
507          * Statistics
508          */
509         if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
510                 ;
511         } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
512                 switch(chain->bref.type) {
513                 case HAMMER2_BREF_TYPE_DATA:
514                         counterp = &hammer2_ioa_file_write;
515                         break;
516                 case HAMMER2_BREF_TYPE_INODE:
517                         counterp = &hammer2_ioa_meta_write;
518                         break;
519                 case HAMMER2_BREF_TYPE_INDIRECT:
520                         counterp = &hammer2_ioa_indr_write;
521                         break;
522                 default:
523                         counterp = &hammer2_ioa_volu_write;
524                         break;
525                 }
526                 ++*counterp;
527         } else {
528                 switch(chain->bref.type) {
529                 case HAMMER2_BREF_TYPE_DATA:
530                         counterp = &hammer2_iod_file_write;
531                         break;
532                 case HAMMER2_BREF_TYPE_INODE:
533                         counterp = &hammer2_iod_meta_write;
534                         break;
535                 case HAMMER2_BREF_TYPE_INDIRECT:
536                         counterp = &hammer2_iod_indr_write;
537                         break;
538                 default:
539                         counterp = &hammer2_iod_volu_write;
540                         break;
541                 }
542                 ++*counterp;
543         }
544
545         /*
546          * Clean out the bp.
547          *
548          * If a device buffer was used for data be sure to destroy the
549          * buffer when we are done to avoid aliases (XXX what about the
550          * underlying VM pages?).
551          */
552         if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
553                 chain->bp->b_flags |= B_RELBUF;
554
555         /*
556          * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
557          * or not.  The flag will get re-set when chain_modify() is called,
558          * even if MODIFIED is already set, allowing the OS to retire the
559          * buffer independent of a hammer2 flus.
560          */
561         chain->data = NULL;
562         if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
563                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
564                 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
565                         atomic_clear_int(&chain->flags,
566                                          HAMMER2_CHAIN_IOFLUSH);
567                         chain->bp->b_flags |= B_RELBUF;
568                         cluster_awrite(chain->bp);
569                 } else {
570                         chain->bp->b_flags |= B_CLUSTEROK;
571                         bdwrite(chain->bp);
572                 }
573         } else {
574                 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
575                         atomic_clear_int(&chain->flags,
576                                          HAMMER2_CHAIN_IOFLUSH);
577                         chain->bp->b_flags |= B_RELBUF;
578                         brelse(chain->bp);
579                 } else {
580                         /* bp might still be dirty */
581                         bqrelse(chain->bp);
582                 }
583         }
584         chain->bp = NULL;
585         ccms_thread_unlock(&chain->cst);
586         hammer2_chain_drop(hmp, chain);
587 }
588
589 /*
590  * Resize the chain's physical storage allocation.  Chains can be resized
591  * smaller without reallocating the storage.  Resizing larger will reallocate
592  * the storage.
593  *
594  * Must be passed a locked chain.
595  *
596  * If you want the resize code to copy the data to the new block then the
597  * caller should lock the chain RESOLVE_MAYBE or RESOLVE_ALWAYS.
598  *
599  * If the caller already holds a logical buffer containing the data and
600  * intends to bdwrite() that buffer resolve with RESOLVE_NEVER.  The resize
601  * operation will then not copy the data.
602  *
603  * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
604  * to avoid instantiating a device buffer that conflicts with the vnode
605  * data buffer.
606  *
607  * XXX flags currently ignored, uses chain->bp to detect data/no-data.
608  */
609 void
610 hammer2_chain_resize(hammer2_inode_t *ip, hammer2_chain_t *chain,
611                      int nradix, int flags)
612 {
613         hammer2_mount_t *hmp = ip->hmp;
614         struct buf *nbp;
615         hammer2_off_t pbase;
616         size_t obytes;
617         size_t nbytes;
618         size_t bbytes;
619         int boff;
620         char *bdata;
621         int error;
622
623         /*
624          * Only data and indirect blocks can be resized for now
625          */
626         KKASSERT(chain != &hmp->vchain);
627         KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
628                  chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
629
630         /*
631          * Nothing to do if the element is already the proper size
632          */
633         obytes = chain->bytes;
634         nbytes = 1U << nradix;
635         if (obytes == nbytes)
636                 return;
637
638         /*
639          * Set MODIFIED and add a chain ref to prevent destruction.  Both
640          * modified flags share the same ref.
641          *
642          * If the chain is already marked MODIFIED then we can safely
643          * return the previous allocation to the pool without having to
644          * worry about snapshots.
645          */
646         if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
647                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
648                                               HAMMER2_CHAIN_MODIFY_TID);
649                 hammer2_chain_ref(hmp, chain);
650         } else {
651                 hammer2_freemap_free(hmp, chain->bref.data_off,
652                                      chain->bref.type);
653         }
654
655         /*
656          * Relocate the block, even if making it smaller (because different
657          * block sizes may be in different regions).
658          */
659         chain->bref.data_off = hammer2_freemap_alloc(hmp, chain->bref.type,
660                                                      nbytes);
661         chain->bytes = nbytes;
662         ip->delta_dcount += (ssize_t)(nbytes - obytes); /* XXX atomic */
663
664         /*
665          * The device buffer may be larger than the allocation size.
666          */
667         if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
668                 bbytes = HAMMER2_MINIOSIZE;
669         pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
670         boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
671
672         /*
673          * Only copy the data if resolved, otherwise the caller is
674          * responsible.
675          */
676         if (chain->bp) {
677                 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
678                          chain->bref.type == HAMMER2_BREF_TYPE_DATA);
679                 KKASSERT(chain != &hmp->vchain);        /* safety */
680
681                 /*
682                  * The getblk() optimization can only be used if the
683                  * physical block size matches the request.
684                  */
685                 if (nbytes == bbytes) {
686                         nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
687                         error = 0;
688                 } else {
689                         error = bread(hmp->devvp, pbase, bbytes, &nbp);
690                         KKASSERT(error == 0);
691                 }
692                 bdata = (char *)nbp->b_data + boff;
693
694                 if (nbytes < obytes) {
695                         bcopy(chain->data, bdata, nbytes);
696                 } else {
697                         bcopy(chain->data, bdata, obytes);
698                         bzero(bdata + obytes, nbytes - obytes);
699                 }
700
701                 /*
702                  * NOTE: The INITIAL state of the chain is left intact.
703                  *       We depend on hammer2_chain_modify() to do the
704                  *       right thing.
705                  *
706                  * NOTE: We set B_NOCACHE to throw away the previous bp and
707                  *       any VM backing store, even if it was dirty.
708                  *       Otherwise we run the risk of a logical/device
709                  *       conflict on reallocation.
710                  */
711                 chain->bp->b_flags |= B_RELBUF | B_NOCACHE;
712                 brelse(chain->bp);
713                 chain->bp = nbp;
714                 chain->data = (void *)bdata;
715                 hammer2_chain_modify(hmp, chain, 0);
716         }
717
718         /*
719          * Make sure the chain is marked MOVED and SUBMOD is set in the
720          * parent(s) so the adjustments are picked up by flush.
721          */
722         if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
723                 hammer2_chain_ref(hmp, chain);
724                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
725         }
726         hammer2_chain_parent_setsubmod(hmp, chain);
727 }
728
729 /*
730  * Convert a locked chain that was retrieved read-only to read-write.
731  *
732  * If not already marked modified a new physical block will be allocated
733  * and assigned to the bref.
734  *
735  * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
736  *                   level or the COW operation will not work.
737  *
738  * Data blocks     - The chain is usually locked RESOLVE_NEVER so as not to
739  *                   run the data through the device buffers.
740  */
741 void
742 hammer2_chain_modify(hammer2_mount_t *hmp, hammer2_chain_t *chain, int flags)
743 {
744         struct buf *nbp;
745         int error;
746         hammer2_off_t pbase;
747         size_t bbytes;
748         size_t boff;
749         void *bdata;
750
751         /*
752          * Tells flush that modify_tid must be updated, otherwise only
753          * mirror_tid is updated.  This is the default.
754          */
755         if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
756                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFY_TID);
757
758         /*
759          * If the chain is already marked MODIFIED we can just return.
760          *
761          * However, it is possible that a prior lock/modify sequence
762          * retired the buffer.  During this lock/modify sequence MODIFIED
763          * may still be set but the buffer could wind up clean.  Since
764          * the caller is going to modify the buffer further we have to
765          * be sure that DIRTYBP is set again.
766          */
767         if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
768                 if ((flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
769                     chain->bp == NULL) {
770                         goto skip1;
771                 }
772                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
773                 return;
774         }
775
776         /*
777          * Set MODIFIED and add a chain ref to prevent destruction.  Both
778          * modified flags share the same ref.
779          */
780         atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
781         hammer2_chain_ref(hmp, chain);
782
783         /*
784          * We must allocate the copy-on-write block.
785          *
786          * If the data is embedded no other action is required.
787          *
788          * If the data is not embedded we acquire and clear the
789          * new block.  If chain->data is not NULL we then do the
790          * copy-on-write.  chain->data will then be repointed to the new
791          * buffer and the old buffer will be released.
792          *
793          * For newly created elements with no prior allocation we go
794          * through the copy-on-write steps except without the copying part.
795          */
796         if (chain != &hmp->vchain) {
797                 if ((hammer2_debug & 0x0001) &&
798                     (chain->bref.data_off & HAMMER2_OFF_MASK)) {
799                         kprintf("Replace %d\n", chain->bytes);
800                 }
801                 chain->bref.data_off =
802                         hammer2_freemap_alloc(hmp, chain->bref.type,
803                                               chain->bytes);
804                 /* XXX failed allocation */
805         }
806
807         /*
808          * If data instantiation is optional and the chain has no current
809          * data association (typical for DATA and newly-created INDIRECT
810          * elements), don't instantiate the buffer now.
811          */
812         if ((flags & HAMMER2_MODIFY_OPTDATA) && chain->bp == NULL)
813                 goto skip2;
814
815 skip1:
816         /*
817          * Setting the DIRTYBP flag will cause the buffer to be dirtied or
818          * written-out on unlock.  This bit is independent of the MODIFIED
819          * bit because the chain may still need meta-data adjustments done
820          * by virtue of MODIFIED for its parent, and the buffer can be
821          * flushed out (possibly multiple times) by the OS before that.
822          *
823          * Clearing the INITIAL flag (for indirect blocks) indicates that
824          * a zero-fill buffer has been instantiated.
825          */
826         atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
827         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
828
829         /*
830          * We currently should never instantiate a device buffer for a
831          * data chain.
832          */
833         KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
834
835         /*
836          * Execute COW operation
837          */
838         switch(chain->bref.type) {
839         case HAMMER2_BREF_TYPE_VOLUME:
840         case HAMMER2_BREF_TYPE_INODE:
841                 /*
842                  * The data is embedded, no copy-on-write operation is
843                  * needed.
844                  */
845                 KKASSERT(chain->bp == NULL);
846                 break;
847         case HAMMER2_BREF_TYPE_DATA:
848         case HAMMER2_BREF_TYPE_INDIRECT:
849                 /*
850                  * Perform the copy-on-write operation
851                  */
852                 KKASSERT(chain != &hmp->vchain);        /* safety */
853                 /*
854                  * The device buffer may be larger than the allocation size.
855                  */
856                 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
857                         bbytes = HAMMER2_MINIOSIZE;
858                 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
859                 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
860
861                 /*
862                  * The getblk() optimization can only be used if the
863                  * physical block size matches the request.
864                  */
865                 if (chain->bytes == bbytes) {
866                         nbp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
867                         error = 0;
868                 } else {
869                         error = bread(hmp->devvp, pbase, bbytes, &nbp);
870                         KKASSERT(error == 0);
871                 }
872                 bdata = (char *)nbp->b_data + boff;
873
874                 /*
875                  * Copy or zero-fill on write depending on whether
876                  * chain->data exists or not.
877                  */
878                 if (chain->data) {
879                         bcopy(chain->data, bdata, chain->bytes);
880                         KKASSERT(chain->bp != NULL);
881                 } else {
882                         bzero(bdata, chain->bytes);
883                 }
884                 if (chain->bp) {
885                         chain->bp->b_flags |= B_RELBUF;
886                         brelse(chain->bp);
887                 }
888                 chain->bp = nbp;
889                 chain->data = bdata;
890                 break;
891         default:
892                 panic("hammer2_chain_modify: illegal non-embedded type %d",
893                       chain->bref.type);
894                 break;
895
896         }
897 skip2:
898         if ((flags & HAMMER2_MODIFY_NOSUB) == 0)
899                 hammer2_chain_parent_setsubmod(hmp, chain);
900 }
901
902 /*
903  * Mark the volume as having been modified.  This short-cut version
904  * does not have to lock the volume's chain, which allows the ioctl
905  * code to make adjustments to connections without deadlocking.
906  */
907 void
908 hammer2_modify_volume(hammer2_mount_t *hmp)
909 {
910         hammer2_voldata_lock(hmp);
911         atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
912         hammer2_voldata_unlock(hmp);
913 }
914
915 /*
916  * Locate an in-memory chain.  The parent must be locked.  The in-memory
917  * chain is returned or NULL if no in-memory chain is present.
918  *
919  * NOTE: A chain on-media might exist for this index when NULL is returned.
920  */
921 hammer2_chain_t *
922 hammer2_chain_find(hammer2_mount_t *hmp, hammer2_chain_t *parent, int index)
923 {
924         hammer2_chain_t dummy;
925         hammer2_chain_t *chain;
926
927         dummy.index = index;
928         chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
929         return (chain);
930 }
931
932 /*
933  * Return a locked chain structure with all associated data acquired.
934  *
935  * Caller must lock the parent on call, the returned child will be locked.
936  */
937 hammer2_chain_t *
938 hammer2_chain_get(hammer2_mount_t *hmp, hammer2_chain_t *parent,
939                   int index, int flags)
940 {
941         hammer2_blockref_t *bref;
942         hammer2_inode_t *ip;
943         hammer2_chain_t *chain;
944         hammer2_chain_t dummy;
945         int how;
946
947         /*
948          * Figure out how to lock.  MAYBE can be used to optimized
949          * the initial-create state for indirect blocks.
950          */
951         if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
952                 how = HAMMER2_RESOLVE_NEVER;
953         else
954                 how = HAMMER2_RESOLVE_MAYBE;
955
956         /*
957          * First see if we have a (possibly modified) chain element cached
958          * for this (parent, index).  Acquire the data if necessary.
959          *
960          * If chain->data is non-NULL the chain should already be marked
961          * modified.
962          */
963         dummy.index = index;
964         chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
965         if (chain) {
966                 if (flags & HAMMER2_LOOKUP_NOLOCK)
967                         hammer2_chain_ref(hmp, chain);
968                 else
969                         hammer2_chain_lock(hmp, chain, how);
970                 return(chain);
971         }
972
973         /*
974          * The get function must always succeed, panic if there's no
975          * data to index.
976          */
977         if (parent->flags & HAMMER2_CHAIN_INITIAL) {
978                 panic("hammer2_chain_get: Missing bref(1)");
979                 /* NOT REACHED */
980         }
981
982         /*
983          * Otherwise lookup the bref and issue I/O (switch on the parent)
984          */
985         switch(parent->bref.type) {
986         case HAMMER2_BREF_TYPE_INODE:
987                 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
988                 bref = &parent->data->ipdata.u.blockset.blockref[index];
989                 break;
990         case HAMMER2_BREF_TYPE_INDIRECT:
991                 KKASSERT(parent->data != NULL);
992                 KKASSERT(index >= 0 &&
993                          index < parent->bytes / sizeof(hammer2_blockref_t));
994                 bref = &parent->data->npdata.blockref[index];
995                 break;
996         case HAMMER2_BREF_TYPE_VOLUME:
997                 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
998                 bref = &hmp->voldata.sroot_blockset.blockref[index];
999                 break;
1000         default:
1001                 bref = NULL;
1002                 panic("hammer2_chain_get: unrecognized blockref type: %d",
1003                       parent->bref.type);
1004         }
1005         if (bref->type == 0) {
1006                 panic("hammer2_chain_get: Missing bref(2)");
1007                 /* NOT REACHED */
1008         }
1009
1010         /*
1011          * Allocate a chain structure representing the existing media
1012          * entry.
1013          *
1014          * The locking operation we do later will issue I/O to read it.
1015          */
1016         chain = hammer2_chain_alloc(hmp, bref);
1017
1018         /*
1019          * Link the chain into its parent.  Caller is expected to hold an
1020          * exclusive lock on the parent.
1021          */
1022         chain->parent = parent;
1023         chain->index = index;
1024         if (RB_INSERT(hammer2_chain_tree, &parent->rbhead, chain))
1025                 panic("hammer2_chain_link: collision");
1026         KKASSERT(parent->refs > 0);
1027         atomic_add_int(&parent->refs, 1);       /* for splay entry */
1028
1029         /*
1030          * Additional linkage for inodes.  Reuse the parent pointer to
1031          * find the parent directory.
1032          *
1033          * The ccms_inode is initialized from its parent directory.  The
1034          * chain of ccms_inode's is seeded by the mount code.
1035          */
1036         if (bref->type == HAMMER2_BREF_TYPE_INODE) {
1037                 ip = chain->u.ip;
1038                 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1039                         parent = parent->parent;
1040                 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE) {
1041                         ip->pip = parent->u.ip;
1042                         ip->pmp = parent->u.ip->pmp;
1043                         ip->depth = parent->u.ip->depth + 1;
1044                         ccms_cst_init(&ip->topo_cst, &ip->chain);
1045                 }
1046         }
1047
1048         /*
1049          * Our new chain structure has already been referenced and locked
1050          * but the lock code handles the I/O so call it to resolve the data.
1051          * Then release one of our two exclusive locks.
1052          *
1053          * If NOLOCK is set the release will release the one-and-only lock.
1054          */
1055         if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1056                 hammer2_chain_lock(hmp, chain, how);    /* recusive lock */
1057                 hammer2_chain_drop(hmp, chain);         /* excess ref */
1058         }
1059         ccms_thread_unlock(&chain->cst);                        /* from alloc */
1060
1061         return (chain);
1062 }
1063
1064 /*
1065  * Locate any key between key_beg and key_end inclusive.  (*parentp)
1066  * typically points to an inode but can also point to a related indirect
1067  * block and this function will recurse upwards and find the inode again.
1068  *
1069  * WARNING!  THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER!  ANY KEY
1070  *           WITHIN THE RANGE CAN BE RETURNED.  HOWEVER, AN ITERATION
1071  *           WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN.
1072  *
1073  * (*parentp) must be exclusively locked and referenced and can be an inode
1074  * or an existing indirect block within the inode.
1075  *
1076  * On return (*parentp) will be modified to point at the deepest parent chain
1077  * element encountered during the search, as a helper for an insertion or
1078  * deletion.   The new (*parentp) will be locked and referenced and the old
1079  * will be unlocked and dereferenced (no change if they are both the same).
1080  *
1081  * The matching chain will be returned exclusively locked and referenced.
1082  *
1083  * NULL is returned if no match was found, but (*parentp) will still
1084  * potentially be adjusted.
1085  *
1086  * This function will also recurse up the chain if the key is not within the
1087  * current parent's range.  (*parentp) can never be set to NULL.  An iteration
1088  * can simply allow (*parentp) to float inside the loop.
1089  */
1090 hammer2_chain_t *
1091 hammer2_chain_lookup(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1092                      hammer2_key_t key_beg, hammer2_key_t key_end,
1093                      int flags)
1094 {
1095         hammer2_chain_t *parent;
1096         hammer2_chain_t *chain;
1097         hammer2_chain_t *tmp;
1098         hammer2_blockref_t *base;
1099         hammer2_blockref_t *bref;
1100         hammer2_key_t scan_beg;
1101         hammer2_key_t scan_end;
1102         int count = 0;
1103         int i;
1104
1105         /*
1106          * Recurse (*parentp) upward if necessary until the parent completely
1107          * encloses the key range or we hit the inode.
1108          */
1109         parent = *parentp;
1110         while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1111                 scan_beg = parent->bref.key;
1112                 scan_end = scan_beg +
1113                            ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1114                 if (key_beg >= scan_beg && key_end <= scan_end)
1115                         break;
1116                 hammer2_chain_ref(hmp, parent);         /* ref old parent */
1117                 hammer2_chain_unlock(hmp, parent);      /* unlock old parent */
1118                 parent = parent->parent;
1119                                                         /* lock new parent */
1120                 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1121                 hammer2_chain_drop(hmp, *parentp);      /* drop old parent */
1122                 *parentp = parent;                      /* new parent */
1123         }
1124
1125 again:
1126         /*
1127          * Locate the blockref array.  Currently we do a fully associative
1128          * search through the array.
1129          */
1130         switch(parent->bref.type) {
1131         case HAMMER2_BREF_TYPE_INODE:
1132                 /*
1133                  * Special shortcut for embedded data returns the inode
1134                  * itself.  Callers must detect this condition and access
1135                  * the embedded data (the strategy code does this for us).
1136                  *
1137                  * This is only applicable to regular files and softlinks.
1138                  */
1139                 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1140                         if (flags & HAMMER2_LOOKUP_NOLOCK)
1141                                 hammer2_chain_ref(hmp, parent);
1142                         else
1143                                 hammer2_chain_lock(hmp, parent,
1144                                                    HAMMER2_RESOLVE_ALWAYS);
1145                         return (parent);
1146                 }
1147                 base = &parent->data->ipdata.u.blockset.blockref[0];
1148                 count = HAMMER2_SET_COUNT;
1149                 break;
1150         case HAMMER2_BREF_TYPE_INDIRECT:
1151                 /*
1152                  * Optimize indirect blocks in the INITIAL state to avoid
1153                  * I/O.
1154                  */
1155                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1156                         base = NULL;
1157                 } else {
1158                         if (parent->data == NULL)
1159                                 panic("parent->data is NULL");
1160                         base = &parent->data->npdata.blockref[0];
1161                 }
1162                 count = parent->bytes / sizeof(hammer2_blockref_t);
1163                 break;
1164         case HAMMER2_BREF_TYPE_VOLUME:
1165                 base = &hmp->voldata.sroot_blockset.blockref[0];
1166                 count = HAMMER2_SET_COUNT;
1167                 break;
1168         default:
1169                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1170                       parent->bref.type);
1171                 base = NULL;    /* safety */
1172                 count = 0;      /* safety */
1173         }
1174
1175         /*
1176          * If the element and key overlap we use the element.
1177          */
1178         bref = NULL;
1179         for (i = 0; i < count; ++i) {
1180                 tmp = hammer2_chain_find(hmp, parent, i);
1181                 if (tmp) {
1182                         bref = &tmp->bref;
1183                         KKASSERT(bref->type != 0);
1184                 } else if (base == NULL || base[i].type == 0) {
1185                         continue;
1186                 } else {
1187                         bref = &base[i];
1188                 }
1189                 scan_beg = bref->key;
1190                 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1191                 if (key_beg <= scan_end && key_end >= scan_beg)
1192                         break;
1193         }
1194         if (i == count) {
1195                 if (key_beg == key_end)
1196                         return (NULL);
1197                 return (hammer2_chain_next(hmp, parentp, NULL,
1198                                            key_beg, key_end, flags));
1199         }
1200
1201         /*
1202          * Acquire the new chain element.  If the chain element is an
1203          * indirect block we must search recursively.
1204          */
1205         chain = hammer2_chain_get(hmp, parent, i, flags);
1206         if (chain == NULL)
1207                 return (NULL);
1208
1209         /*
1210          * If the chain element is an indirect block it becomes the new
1211          * parent and we loop on it.
1212          *
1213          * The parent always has to be locked with at least RESOLVE_MAYBE,
1214          * so it might need a fixup if the caller passed incompatible flags.
1215          */
1216         if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1217                 hammer2_chain_unlock(hmp, parent);
1218                 *parentp = parent = chain;
1219                 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1220                         hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
1221                         hammer2_chain_drop(hmp, chain); /* excess ref */
1222                 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1223                         hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_MAYBE);
1224                         hammer2_chain_unlock(hmp, chain);
1225                 }
1226                 goto again;
1227         }
1228
1229         /*
1230          * All done, return chain
1231          */
1232         return (chain);
1233 }
1234
1235 /*
1236  * After having issued a lookup we can iterate all matching keys.
1237  *
1238  * If chain is non-NULL we continue the iteration from just after it's index.
1239  *
1240  * If chain is NULL we assume the parent was exhausted and continue the
1241  * iteration at the next parent.
1242  *
1243  * parent must be locked on entry and remains locked throughout.  chain's
1244  * lock status must match flags.
1245  */
1246 hammer2_chain_t *
1247 hammer2_chain_next(hammer2_mount_t *hmp, hammer2_chain_t **parentp,
1248                    hammer2_chain_t *chain,
1249                    hammer2_key_t key_beg, hammer2_key_t key_end,
1250                    int flags)
1251 {
1252         hammer2_chain_t *parent;
1253         hammer2_chain_t *tmp;
1254         hammer2_blockref_t *base;
1255         hammer2_blockref_t *bref;
1256         hammer2_key_t scan_beg;
1257         hammer2_key_t scan_end;
1258         int i;
1259         int count;
1260
1261         parent = *parentp;
1262
1263 again:
1264         /*
1265          * Calculate the next index and recalculate the parent if necessary.
1266          */
1267         if (chain) {
1268                 /*
1269                  * Continue iteration within current parent.  If not NULL
1270                  * the passed-in chain may or may not be locked, based on
1271                  * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1272                  * or a prior next).
1273                  */
1274                 i = chain->index + 1;
1275                 if (flags & HAMMER2_LOOKUP_NOLOCK)
1276                         hammer2_chain_drop(hmp, chain);
1277                 else
1278                         hammer2_chain_unlock(hmp, chain);
1279
1280                 /*
1281                  * Any scan where the lookup returned degenerate data embedded
1282                  * in the inode has an invalid index and must terminate.
1283                  */
1284                 if (chain == parent)
1285                         return(NULL);
1286                 chain = NULL;
1287         } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT) {
1288                 /*
1289                  * We reached the end of the iteration.
1290                  */
1291                 return (NULL);
1292         } else {
1293                 /*
1294                  * Continue iteration with next parent unless the current
1295                  * parent covers the range.
1296                  */
1297                 hammer2_chain_t *nparent;
1298
1299                 scan_beg = parent->bref.key;
1300                 scan_end = scan_beg +
1301                             ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1302                 if (key_beg >= scan_beg && key_end <= scan_end)
1303                         return (NULL);
1304
1305                 i = parent->index + 1;
1306                 nparent = parent->parent;
1307                 hammer2_chain_ref(hmp, nparent);        /* ref new parent */
1308                 hammer2_chain_unlock(hmp, parent);      /* unlock old parent */
1309                                                         /* lock new parent */
1310                 hammer2_chain_lock(hmp, nparent, HAMMER2_RESOLVE_MAYBE);
1311                 hammer2_chain_drop(hmp, nparent);       /* drop excess ref */
1312                 *parentp = parent = nparent;
1313         }
1314
1315 again2:
1316         /*
1317          * Locate the blockref array.  Currently we do a fully associative
1318          * search through the array.
1319          */
1320         switch(parent->bref.type) {
1321         case HAMMER2_BREF_TYPE_INODE:
1322                 base = &parent->data->ipdata.u.blockset.blockref[0];
1323                 count = HAMMER2_SET_COUNT;
1324                 break;
1325         case HAMMER2_BREF_TYPE_INDIRECT:
1326                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1327                         base = NULL;
1328                 } else {
1329                         KKASSERT(parent->data != NULL);
1330                         base = &parent->data->npdata.blockref[0];
1331                 }
1332                 count = parent->bytes / sizeof(hammer2_blockref_t);
1333                 break;
1334         case HAMMER2_BREF_TYPE_VOLUME:
1335                 base = &hmp->voldata.sroot_blockset.blockref[0];
1336                 count = HAMMER2_SET_COUNT;
1337                 break;
1338         default:
1339                 panic("hammer2_chain_next: unrecognized blockref type: %d",
1340                       parent->bref.type);
1341                 base = NULL;    /* safety */
1342                 count = 0;      /* safety */
1343                 break;
1344         }
1345         KKASSERT(i <= count);
1346
1347         /*
1348          * Look for the key.  If we are unable to find a match and an exact
1349          * match was requested we return NULL.  If a range was requested we
1350          * run hammer2_chain_next() to iterate.
1351          */
1352         bref = NULL;
1353         while (i < count) {
1354                 tmp = hammer2_chain_find(hmp, parent, i);
1355                 if (tmp) {
1356                         bref = &tmp->bref;
1357                 } else if (base == NULL || base[i].type == 0) {
1358                         ++i;
1359                         continue;
1360                 } else {
1361                         bref = &base[i];
1362                 }
1363                 scan_beg = bref->key;
1364                 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1365                 if (key_beg <= scan_end && key_end >= scan_beg)
1366                         break;
1367                 ++i;
1368         }
1369
1370         /*
1371          * If we couldn't find a match recurse up a parent to continue the
1372          * search.
1373          */
1374         if (i == count)
1375                 goto again;
1376
1377         /*
1378          * Acquire the new chain element.  If the chain element is an
1379          * indirect block we must search recursively.
1380          */
1381         chain = hammer2_chain_get(hmp, parent, i, flags);
1382         if (chain == NULL)
1383                 return (NULL);
1384
1385         /*
1386          * If the chain element is an indirect block it becomes the new
1387          * parent and we loop on it.
1388          *
1389          * The parent always has to be locked with at least RESOLVE_MAYBE,
1390          * so it might need a fixup if the caller passed incompatible flags.
1391          */
1392         if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1393                 hammer2_chain_unlock(hmp, parent);
1394                 *parentp = parent = chain;
1395                 chain = NULL;
1396                 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1397                         hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1398                         hammer2_chain_drop(hmp, parent);        /* excess ref */
1399                 } else if (flags & HAMMER2_LOOKUP_NODATA) {
1400                         hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
1401                         hammer2_chain_unlock(hmp, parent);
1402                 }
1403                 i = 0;
1404                 goto again2;
1405         }
1406
1407         /*
1408          * All done, return chain
1409          */
1410         return (chain);
1411 }
1412
1413 /*
1414  * Create and return a new hammer2 system memory structure of the specified
1415  * key, type and size and insert it RELATIVE TO (PARENT).
1416  *
1417  * (parent) is typically either an inode or an indirect block, acquired
1418  * acquired as a side effect of issuing a prior failed lookup.  parent
1419  * must be locked and held.  Do not pass the inode chain to this function
1420  * unless that is the chain returned by the failed lookup.
1421  *
1422  * Non-indirect types will automatically allocate indirect blocks as required
1423  * if the new item does not fit in the current (parent).
1424  *
1425  * Indirect types will move a portion of the existing blockref array in
1426  * (parent) into the new indirect type and then use one of the free slots
1427  * to emplace the new indirect type.
1428  *
1429  * A new locked, referenced chain element is returned of the specified type.
1430  * The element may or may not have a data area associated with it:
1431  *
1432  *      VOLUME          not allowed here
1433  *      INODE           embedded data are will be set-up
1434  *      INDIRECT        not allowed here
1435  *      DATA            no data area will be set-up (caller is expected
1436  *                      to have logical buffers, we don't want to alias
1437  *                      the data onto device buffers!).
1438  */
1439 hammer2_chain_t *
1440 hammer2_chain_create(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1441                      hammer2_chain_t *chain,
1442                      hammer2_key_t key, int keybits, int type, size_t bytes)
1443 {
1444         hammer2_blockref_t dummy;
1445         hammer2_blockref_t *base;
1446         hammer2_chain_t dummy_chain;
1447         int unlock_parent = 0;
1448         int allocated = 0;
1449         int count;
1450         int i;
1451
1452         if (chain == NULL) {
1453                 /*
1454                  * First allocate media space and construct the dummy bref,
1455                  * then allocate the in-memory chain structure.
1456                  */
1457                 bzero(&dummy, sizeof(dummy));
1458                 dummy.type = type;
1459                 dummy.key = key;
1460                 dummy.keybits = keybits;
1461                 dummy.data_off = hammer2_bytes_to_radix(bytes);
1462                 chain = hammer2_chain_alloc(hmp, &dummy);
1463                 allocated = 1;
1464
1465                 /*
1466                  * We do NOT set INITIAL here (yet).  INITIAL is only
1467                  * used for indirect blocks.
1468                  *
1469                  * Recalculate bytes to reflect the actual media block
1470                  * allocation.
1471                  */
1472                 bytes = (hammer2_off_t)1 <<
1473                         (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
1474                 chain->bytes = bytes;
1475
1476                 switch(type) {
1477                 case HAMMER2_BREF_TYPE_VOLUME:
1478                         panic("hammer2_chain_create: called with volume type");
1479                         break;
1480                 case HAMMER2_BREF_TYPE_INODE:
1481                         KKASSERT(bytes == HAMMER2_INODE_BYTES);
1482                         chain->data = (void *)&chain->u.ip->ip_data;
1483                         break;
1484                 case HAMMER2_BREF_TYPE_INDIRECT:
1485                         panic("hammer2_chain_create: cannot be used to"
1486                               "create indirect block");
1487                         break;
1488                 case HAMMER2_BREF_TYPE_DATA:
1489                 default:
1490                         /* leave chain->data NULL */
1491                         KKASSERT(chain->data == NULL);
1492                         break;
1493                 }
1494         } else {
1495                 /*
1496                  * Potentially update the chain's key/keybits.
1497                  */
1498                 chain->bref.key = key;
1499                 chain->bref.keybits = keybits;
1500         }
1501
1502 again:
1503         /*
1504          * Locate a free blockref in the parent's array
1505          */
1506         switch(parent->bref.type) {
1507         case HAMMER2_BREF_TYPE_INODE:
1508                 KKASSERT((parent->u.ip->ip_data.op_flags &
1509                           HAMMER2_OPFLAG_DIRECTDATA) == 0);
1510                 KKASSERT(parent->data != NULL);
1511                 base = &parent->data->ipdata.u.blockset.blockref[0];
1512                 count = HAMMER2_SET_COUNT;
1513                 break;
1514         case HAMMER2_BREF_TYPE_INDIRECT:
1515                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1516                         base = NULL;
1517                 } else {
1518                         KKASSERT(parent->data != NULL);
1519                         base = &parent->data->npdata.blockref[0];
1520                 }
1521                 count = parent->bytes / sizeof(hammer2_blockref_t);
1522                 break;
1523         case HAMMER2_BREF_TYPE_VOLUME:
1524                 KKASSERT(parent->data != NULL);
1525                 base = &hmp->voldata.sroot_blockset.blockref[0];
1526                 count = HAMMER2_SET_COUNT;
1527                 break;
1528         default:
1529                 panic("hammer2_chain_create: unrecognized blockref type: %d",
1530                       parent->bref.type);
1531                 count = 0;
1532                 break;
1533         }
1534
1535         /*
1536          * Scan for an unallocated bref, also skipping any slots occupied
1537          * by in-memory chain elements that may not yet have been updated
1538          * in the parent's bref array.
1539          */
1540         bzero(&dummy_chain, sizeof(dummy_chain));
1541         for (i = 0; i < count; ++i) {
1542                 if (base == NULL) {
1543                         dummy_chain.index = i;
1544                         if (RB_FIND(hammer2_chain_tree,
1545                                     &parent->rbhead, &dummy_chain) == NULL) {
1546                                 break;
1547                         }
1548                 } else if (base[i].type == 0) {
1549                         dummy_chain.index = i;
1550                         if (RB_FIND(hammer2_chain_tree,
1551                                     &parent->rbhead, &dummy_chain) == NULL) {
1552                                 break;
1553                         }
1554                 }
1555         }
1556
1557         /*
1558          * If no free blockref could be found we must create an indirect
1559          * block and move a number of blockrefs into it.  With the parent
1560          * locked we can safely lock each child in order to move it without
1561          * causing a deadlock.
1562          *
1563          * This may return the new indirect block or the old parent depending
1564          * on where the key falls.
1565          */
1566         if (i == count) {
1567                 hammer2_chain_t *nparent;
1568
1569                 nparent = hammer2_chain_create_indirect(hmp, parent,
1570                                                         key, keybits);
1571                 if (nparent == NULL) {
1572                         if (allocated)
1573                                 hammer2_chain_free(hmp, chain);
1574                         chain = NULL;
1575                         goto done;
1576                 }
1577                 if (parent != nparent) {
1578                         if (unlock_parent)
1579                                 hammer2_chain_unlock(hmp, parent);
1580                         parent = nparent;
1581                         unlock_parent = 1;
1582                 }
1583                 goto again;
1584         }
1585
1586         /*
1587          * Link the chain into its parent.  Later on we will have to set
1588          * the MOVED bit in situations where we don't mark the new chain
1589          * as being modified.
1590          */
1591         if (chain->parent != NULL)
1592                 panic("hammer2: hammer2_chain_create: chain already connected");
1593         KKASSERT(chain->parent == NULL);
1594         chain->parent = parent;
1595         chain->index = i;
1596         if (RB_INSERT(hammer2_chain_tree, &parent->rbhead, chain))
1597                 panic("hammer2_chain_link: collision");
1598         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
1599         KKASSERT(parent->refs > 0);
1600         atomic_add_int(&parent->refs, 1);
1601
1602         /*
1603          * Additional linkage for inodes.  Reuse the parent pointer to
1604          * find the parent directory.
1605          *
1606          * Cumulative adjustments are inherited on [re]attach and will
1607          * propagate up the tree on the next flush.
1608          *
1609          * The ccms_inode is initialized from its parent directory.  The
1610          * chain of ccms_inode's is seeded by the mount code.
1611          */
1612         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1613                 hammer2_chain_t *scan = parent;
1614                 hammer2_inode_t *ip = chain->u.ip;
1615
1616                 while (scan->bref.type == HAMMER2_BREF_TYPE_INDIRECT)
1617                         scan = scan->parent;
1618                 if (scan->bref.type == HAMMER2_BREF_TYPE_INODE) {
1619                         ip->pip = scan->u.ip;
1620                         ip->pmp = scan->u.ip->pmp;
1621                         ip->depth = scan->u.ip->depth + 1;
1622                         ip->pip->delta_icount += ip->ip_data.inode_count;
1623                         ip->pip->delta_dcount += ip->ip_data.data_count;
1624                         ++ip->pip->delta_icount;
1625                         ccms_cst_init(&ip->topo_cst, &ip->chain);
1626                 }
1627         }
1628
1629         /*
1630          * (allocated) indicates that this is a newly-created chain element
1631          * rather than a renamed chain element.  In this situation we want
1632          * to place the chain element in the MODIFIED state.
1633          *
1634          * The data area will be set up as follows:
1635          *
1636          *      VOLUME          not allowed here.
1637          *
1638          *      INODE           embedded data are will be set-up.
1639          *
1640          *      INDIRECT        not allowed here.
1641          *
1642          *      DATA            no data area will be set-up (caller is expected
1643          *                      to have logical buffers, we don't want to alias
1644          *                      the data onto device buffers!).
1645          */
1646         if (allocated) {
1647                 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1648                         hammer2_chain_modify(hmp, chain,
1649                                              HAMMER2_MODIFY_OPTDATA);
1650                 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT) {
1651                         /* not supported in this function */
1652                         panic("hammer2_chain_create: bad type");
1653                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1654                         hammer2_chain_modify(hmp, chain,
1655                                              HAMMER2_MODIFY_OPTDATA);
1656                 } else {
1657                         hammer2_chain_modify(hmp, chain, 0);
1658                 }
1659         } else {
1660                 /*
1661                  * When reconnecting inodes we have to call setsubmod()
1662                  * to ensure that its state propagates up the newly
1663                  * connected parent.
1664                  *
1665                  * Make sure MOVED is set but do not update bref_flush.  If
1666                  * the chain is undergoing modification bref_flush will be
1667                  * updated when it gets flushed.  If it is not then the
1668                  * bref may not have been flushed yet and we do not want to
1669                  * set MODIFIED here as this could result in unnecessary
1670                  * reallocations.
1671                  */
1672                 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1673                         hammer2_chain_ref(hmp, chain);
1674                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1675                 }
1676                 hammer2_chain_parent_setsubmod(hmp, chain);
1677         }
1678
1679 done:
1680         if (unlock_parent)
1681                 hammer2_chain_unlock(hmp, parent);
1682         return (chain);
1683 }
1684
1685 /*
1686  * Create an indirect block that covers one or more of the elements in the
1687  * current parent.  Either returns the existing parent with no locking or
1688  * ref changes or returns the new indirect block locked and referenced
1689  * and leaving the original parent lock/ref intact as well.
1690  *
1691  * The returned chain depends on where the specified key falls.
1692  *
1693  * The key/keybits for the indirect mode only needs to follow three rules:
1694  *
1695  * (1) That all elements underneath it fit within its key space and
1696  *
1697  * (2) That all elements outside it are outside its key space.
1698  *
1699  * (3) When creating the new indirect block any elements in the current
1700  *     parent that fit within the new indirect block's keyspace must be
1701  *     moved into the new indirect block.
1702  *
1703  * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
1704  *     keyspace the the current parent, but lookup/iteration rules will
1705  *     ensure (and must ensure) that rule (2) for all parents leading up
1706  *     to the nearest inode or the root volume header is adhered to.  This
1707  *     is accomplished by always recursing through matching keyspaces in
1708  *     the hammer2_chain_lookup() and hammer2_chain_next() API.
1709  *
1710  * The current implementation calculates the current worst-case keyspace by
1711  * iterating the current parent and then divides it into two halves, choosing
1712  * whichever half has the most elements (not necessarily the half containing
1713  * the requested key).
1714  *
1715  * We can also opt to use the half with the least number of elements.  This
1716  * causes lower-numbered keys (aka logical file offsets) to recurse through
1717  * fewer indirect blocks and higher-numbered keys to recurse through more.
1718  * This also has the risk of not moving enough elements to the new indirect
1719  * block and being forced to create several indirect blocks before the element
1720  * can be inserted.
1721  */
1722 static
1723 hammer2_chain_t *
1724 hammer2_chain_create_indirect(hammer2_mount_t *hmp, hammer2_chain_t *parent,
1725                               hammer2_key_t create_key, int create_bits)
1726 {
1727         hammer2_blockref_t *base;
1728         hammer2_blockref_t *bref;
1729         hammer2_chain_t *chain;
1730         hammer2_chain_t *ichain;
1731         hammer2_chain_t dummy;
1732         hammer2_key_t key = create_key;
1733         int keybits = create_bits;
1734         int locount = 0;
1735         int hicount = 0;
1736         int count;
1737         int nbytes;
1738         int i;
1739
1740         /*
1741          * Calculate the base blockref pointer or NULL if the chain
1742          * is known to be empty.  We need to calculate the array count
1743          * for RB lookups either way.
1744          */
1745         hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA);
1746         if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1747                 base = NULL;
1748
1749                 switch(parent->bref.type) {
1750                 case HAMMER2_BREF_TYPE_INODE:
1751                         count = HAMMER2_SET_COUNT;
1752                         break;
1753                 case HAMMER2_BREF_TYPE_INDIRECT:
1754                         count = parent->bytes / sizeof(hammer2_blockref_t);
1755                         break;
1756                 case HAMMER2_BREF_TYPE_VOLUME:
1757                         count = HAMMER2_SET_COUNT;
1758                         break;
1759                 default:
1760                         panic("hammer2_chain_create_indirect: "
1761                               "unrecognized blockref type: %d",
1762                               parent->bref.type);
1763                         count = 0;
1764                         break;
1765                 }
1766         } else {
1767                 switch(parent->bref.type) {
1768                 case HAMMER2_BREF_TYPE_INODE:
1769                         base = &parent->data->ipdata.u.blockset.blockref[0];
1770                         count = HAMMER2_SET_COUNT;
1771                         break;
1772                 case HAMMER2_BREF_TYPE_INDIRECT:
1773                         base = &parent->data->npdata.blockref[0];
1774                         count = parent->bytes / sizeof(hammer2_blockref_t);
1775                         break;
1776                 case HAMMER2_BREF_TYPE_VOLUME:
1777                         base = &hmp->voldata.sroot_blockset.blockref[0];
1778                         count = HAMMER2_SET_COUNT;
1779                         break;
1780                 default:
1781                         panic("hammer2_chain_create_indirect: "
1782                               "unrecognized blockref type: %d",
1783                               parent->bref.type);
1784                         count = 0;
1785                         break;
1786                 }
1787         }
1788
1789         /*
1790          * Scan for an unallocated bref, also skipping any slots occupied
1791          * by in-memory chain elements which may not yet have been updated
1792          * in the parent's bref array.
1793          */
1794         bzero(&dummy, sizeof(dummy));
1795         for (i = 0; i < count; ++i) {
1796                 int nkeybits;
1797
1798                 dummy.index = i;
1799                 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
1800                 if (chain) {
1801                         bref = &chain->bref;
1802                 } else if (base && base[i].type) {
1803                         bref = &base[i];
1804                 } else {
1805                         continue;
1806                 }
1807
1808                 /*
1809                  * Expand our calculated key range (key, keybits) to fit
1810                  * the scanned key.  nkeybits represents the full range
1811                  * that we will later cut in half (two halves @ nkeybits - 1).
1812                  */
1813                 nkeybits = keybits;
1814                 if (nkeybits < bref->keybits)
1815                         nkeybits = bref->keybits;
1816                 while (nkeybits < 64 &&
1817                        (~(((hammer2_key_t)1 << nkeybits) - 1) &
1818                         (key ^ bref->key)) != 0) {
1819                         ++nkeybits;
1820                 }
1821
1822                 /*
1823                  * If the new key range is larger we have to determine
1824                  * which side of the new key range the existing keys fall
1825                  * under by checking the high bit, then collapsing the
1826                  * locount into the hicount or vise-versa.
1827                  */
1828                 if (keybits != nkeybits) {
1829                         if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
1830                                 hicount += locount;
1831                                 locount = 0;
1832                         } else {
1833                                 locount += hicount;
1834                                 hicount = 0;
1835                         }
1836                         keybits = nkeybits;
1837                 }
1838
1839                 /*
1840                  * The newly scanned key will be in the lower half or the
1841                  * higher half of the (new) key range.
1842                  */
1843                 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
1844                         ++hicount;
1845                 else
1846                         ++locount;
1847         }
1848
1849         /*
1850          * Adjust keybits to represent half of the full range calculated
1851          * above (radix 63 max)
1852          */
1853         --keybits;
1854
1855         /*
1856          * Select whichever half contains the most elements.  Theoretically
1857          * we can select either side as long as it contains at least one
1858          * element (in order to ensure that a free slot is present to hold
1859          * the indirect block).
1860          */
1861         key &= ~(((hammer2_key_t)1 << keybits) - 1);
1862         if (hammer2_indirect_optimize) {
1863                 /*
1864                  * Insert node for least number of keys, this will arrange
1865                  * the first few blocks of a large file or the first few
1866                  * inodes in a directory with fewer indirect blocks when
1867                  * created linearly.
1868                  */
1869                 if (hicount < locount && hicount != 0)
1870                         key |= (hammer2_key_t)1 << keybits;
1871                 else
1872                         key &= ~(hammer2_key_t)1 << keybits;
1873         } else {
1874                 /*
1875                  * Insert node for most number of keys, best for heavily
1876                  * fragmented files.
1877                  */
1878                 if (hicount > locount)
1879                         key |= (hammer2_key_t)1 << keybits;
1880                 else
1881                         key &= ~(hammer2_key_t)1 << keybits;
1882         }
1883
1884         /*
1885          * How big should our new indirect block be?  It has to be at least
1886          * as large as its parent.
1887          */
1888         if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
1889                 nbytes = HAMMER2_IND_BYTES_MIN;
1890         else
1891                 nbytes = HAMMER2_IND_BYTES_MAX;
1892         if (nbytes < count * sizeof(hammer2_blockref_t))
1893                 nbytes = count * sizeof(hammer2_blockref_t);
1894
1895         /*
1896          * Ok, create our new indirect block
1897          */
1898         dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
1899         dummy.bref.key = key;
1900         dummy.bref.keybits = keybits;
1901         dummy.bref.data_off = hammer2_bytes_to_radix(nbytes);
1902         ichain = hammer2_chain_alloc(hmp, &dummy.bref);
1903         atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
1904
1905         /*
1906          * Iterate the original parent and move the matching brefs into
1907          * the new indirect block.
1908          */
1909         for (i = 0; i < count; ++i) {
1910                 /*
1911                  * For keying purposes access the bref from the media or
1912                  * from our in-memory cache.  In cases where the in-memory
1913                  * cache overrides the media the keyrefs will be the same
1914                  * anyway so we can avoid checking the cache when the media
1915                  * has a key.
1916                  */
1917                 dummy.index = i;
1918                 chain = RB_FIND(hammer2_chain_tree, &parent->rbhead, &dummy);
1919                 if (chain) {
1920                         bref = &chain->bref;
1921                 } else if (base && base[i].type) {
1922                         bref = &base[i];
1923                 } else {
1924                         if (ichain->index < 0)
1925                                 ichain->index = i;
1926                         continue;
1927                 }
1928
1929                 /*
1930                  * Skip keys not in the chosen half (low or high), only bit
1931                  * (keybits - 1) needs to be compared but for safety we
1932                  * will compare all msb bits plus that bit again.
1933                  */
1934                 if ((~(((hammer2_key_t)1 << keybits) - 1) &
1935                     (key ^ bref->key)) != 0) {
1936                         continue;
1937                 }
1938
1939                 /*
1940                  * This element is being moved from the parent, its slot
1941                  * is available for our new indirect block.
1942                  */
1943                 if (ichain->index < 0)
1944                         ichain->index = i;
1945
1946                 /*
1947                  * Load the new indirect block by acquiring or allocating
1948                  * the related chain entries, then simply move them to the
1949                  * new parent (ichain).
1950                  *
1951                  * When adjusting the parent/child relationship we must
1952                  * set the MOVED bit but we do NOT update bref_flush
1953                  * because otherwise we might synchronize a bref that has
1954                  * not yet been flushed.  We depend on chain's bref_flush
1955                  * either being correct or the chain being in a MODIFIED
1956                  * state.
1957                  *
1958                  * We do not want to set MODIFIED here as this would result
1959                  * in unnecessary reallocations.
1960                  *
1961                  * We must still set SUBMODIFIED in the parent but we do
1962                  * that after the loop.
1963                  *
1964                  * XXX we really need a lock here but we don't need the
1965                  *     data.  NODATA feature needed.
1966                  */
1967                 chain = hammer2_chain_get(hmp, parent, i,
1968                                           HAMMER2_LOOKUP_NODATA);
1969                 RB_REMOVE(hammer2_chain_tree, &parent->rbhead, chain);
1970                 if (RB_INSERT(hammer2_chain_tree, &ichain->rbhead, chain))
1971                         panic("hammer2_chain_create_indirect: collision");
1972                 chain->parent = ichain;
1973                 if (base)
1974                         bzero(&base[i], sizeof(base[i]));
1975                 atomic_add_int(&parent->refs, -1);
1976                 atomic_add_int(&ichain->refs, 1);
1977                 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
1978                         hammer2_chain_ref(hmp, chain);
1979                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
1980                 }
1981                 hammer2_chain_unlock(hmp, chain);
1982                 KKASSERT(parent->refs > 0);
1983                 chain = NULL;
1984         }
1985
1986         /*
1987          * Insert the new indirect block into the parent now that we've
1988          * cleared out some entries in the parent.  We calculated a good
1989          * insertion index in the loop above (ichain->index).
1990          *
1991          * We don't have to set MOVED here because we mark ichain modified
1992          * down below (so the normal modified -> flush -> set-moved sequence
1993          * applies).
1994          */
1995         KKASSERT(ichain->index >= 0);
1996         if (RB_INSERT(hammer2_chain_tree, &parent->rbhead, ichain))
1997                 panic("hammer2_chain_create_indirect: ichain insertion");
1998         ichain->parent = parent;
1999         atomic_add_int(&parent->refs, 1);
2000
2001         /*
2002          * Mark the new indirect block modified after insertion, which
2003          * will propagate up through parent all the way to the root and
2004          * also allocate the physical block in ichain for our caller,
2005          * and assign ichain->data to a pre-zero'd space (because there
2006          * is not prior data to copy into it).
2007          *
2008          * We have to set SUBMODIFIED in ichain's flags manually so the
2009          * flusher knows it has to recurse through it to get to all of
2010          * our moved blocks, then call setsubmod() to set the bit
2011          * recursively.
2012          */
2013         hammer2_chain_modify(hmp, ichain, HAMMER2_MODIFY_OPTDATA);
2014         hammer2_chain_parent_setsubmod(hmp, ichain);
2015         atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2016
2017         /*
2018          * Figure out what to return.
2019          */
2020         if (create_bits > keybits) {
2021                 /*
2022                  * Key being created is way outside the key range,
2023                  * return the original parent.
2024                  */
2025                 hammer2_chain_unlock(hmp, ichain);
2026         } else if (~(((hammer2_key_t)1 << keybits) - 1) &
2027                    (create_key ^ key)) {
2028                 /*
2029                  * Key being created is outside the key range,
2030                  * return the original parent.
2031                  */
2032                 hammer2_chain_unlock(hmp, ichain);
2033         } else {
2034                 /*
2035                  * Otherwise its in the range, return the new parent.
2036                  * (leave both the new and old parent locked).
2037                  */
2038                 parent = ichain;
2039         }
2040
2041         return(parent);
2042 }
2043
2044 /*
2045  * Physically delete the specified chain element.  Note that inodes with
2046  * open descriptors should not be deleted (as with other filesystems) until
2047  * the last open descriptor is closed.
2048  *
2049  * This routine will remove the chain element from its parent and potentially
2050  * also recurse upward and delete indirect blocks which become empty as a
2051  * side effect.
2052  *
2053  * The caller must pass a pointer to the chain's parent, also locked and
2054  * referenced.  (*parentp) will be modified in a manner similar to a lookup
2055  * or iteration when indirect blocks are also deleted as a side effect.
2056  *
2057  * XXX This currently does not adhere to the MOVED flag protocol in that
2058  *     the removal is immediately indicated in the parent's blockref[]
2059  *     array.
2060  */
2061 void
2062 hammer2_chain_delete(hammer2_mount_t *hmp, hammer2_chain_t *parent,
2063                      hammer2_chain_t *chain, int retain)
2064 {
2065         hammer2_blockref_t *base;
2066         hammer2_inode_t *ip;
2067         int count;
2068
2069         if (chain->parent != parent)
2070                 panic("hammer2_chain_delete: parent mismatch");
2071
2072         /*
2073          * Mark the parent modified so our base[] pointer remains valid
2074          * while we move entries.  For the optimized indirect block
2075          * case mark the parent moved instead.
2076          *
2077          * Calculate the blockref reference in the parent
2078          */
2079         switch(parent->bref.type) {
2080         case HAMMER2_BREF_TYPE_INODE:
2081                 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2082                 base = &parent->data->ipdata.u.blockset.blockref[0];
2083                 count = HAMMER2_SET_COUNT;
2084                 break;
2085         case HAMMER2_BREF_TYPE_INDIRECT:
2086                 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_OPTDATA |
2087                                                   HAMMER2_MODIFY_NO_MODIFY_TID);
2088                 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2089                         base = NULL;
2090                 else
2091                         base = &parent->data->npdata.blockref[0];
2092                 count = parent->bytes / sizeof(hammer2_blockref_t);
2093                 break;
2094         case HAMMER2_BREF_TYPE_VOLUME:
2095                 hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2096                 base = &hmp->voldata.sroot_blockset.blockref[0];
2097                 count = HAMMER2_SET_COUNT;
2098                 break;
2099         default:
2100                 panic("hammer2_chain_delete: unrecognized blockref type: %d",
2101                       parent->bref.type);
2102                 count = 0;
2103                 break;
2104         }
2105
2106         /*
2107          * Disconnect the bref in the parent, remove the chain, and
2108          * disconnect in-memory fields from the parent.
2109          */
2110         KKASSERT(chain->index >= 0 && chain->index < count);
2111         if (base)
2112                 bzero(&base[chain->index], sizeof(*base));
2113
2114         RB_REMOVE(hammer2_chain_tree, &parent->rbhead, chain);
2115         atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2116         atomic_add_int(&parent->refs, -1);      /* for splay entry */
2117         chain->index = -1;
2118         chain->parent = NULL;
2119
2120         /*
2121          * Cumulative adjustments must be propagated to the parent inode
2122          * when deleting and synchronized to ip.
2123          *
2124          * NOTE:  We do not propagate ip->delta_*count to the parent because
2125          *        these represent adjustments that have not yet been
2126          *        propagated upward, so we don't need to remove them from
2127          *        the parent.
2128          *
2129          * Clear the pointer to the parent inode.
2130          */
2131         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2132                 ip = chain->u.ip;
2133                 if (ip->pip) {
2134                         ip->pip->delta_icount -= ip->ip_data.inode_count;
2135                         ip->pip->delta_dcount -= ip->ip_data.data_count;
2136                         ip->ip_data.inode_count += ip->delta_icount;
2137                         ip->ip_data.data_count += ip->delta_dcount;
2138                         ip->delta_icount = 0;
2139                         ip->delta_dcount = 0;
2140                         --ip->pip->delta_icount;
2141                         ip->pip = NULL;
2142                 }
2143                 chain->u.ip->depth = 0;
2144         }
2145
2146         /*
2147          * If retain is 0 the deletion is permanent.  Because the chain is
2148          * no longer connected to the topology a flush will have no
2149          * visibility into it.  We must dispose of the references related
2150          * to the MODIFIED and MOVED flags, otherwise the ref count will
2151          * never transition to 0.
2152          *
2153          * If retain is non-zero the deleted element is likely an inode
2154          * which the vnops frontend will mark DESTROYED and flush.  In that
2155          * situation we must retain the flags for any open file descriptors
2156          * on the (removed) inode.  The final close will destroy the
2157          * disconnected chain.
2158          */
2159         if (retain == 0) {
2160                 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2161                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
2162                         hammer2_chain_drop(hmp, chain);
2163                 }
2164                 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2165                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2166                         hammer2_chain_drop(hmp, chain);
2167                 }
2168         }
2169
2170         /*
2171          * The chain is still likely referenced, possibly even by a vnode
2172          * (if an inode), so defer further action until the chain gets
2173          * dropped.
2174          */
2175 }
2176
2177 /*
2178  * Recursively flush the specified chain.  The chain is locked and
2179  * referenced by the caller and will remain so on return.  The chain
2180  * will remain referenced throughout but can temporarily lose its
2181  * lock during the recursion to avoid unnecessarily stalling user
2182  * processes.
2183  *
2184  *
2185  */
2186 TAILQ_HEAD(flush_deferral_list, hammer2_chain);
2187
2188 struct hammer2_flush_info {
2189         struct flush_deferral_list flush_list;
2190         int             depth;
2191         hammer2_tid_t   modify_tid;
2192 };
2193
2194 typedef struct hammer2_flush_info hammer2_flush_info_t;
2195
2196 static void
2197 hammer2_chain_flush_pass1(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2198                           hammer2_flush_info_t *info)
2199 {
2200         hammer2_blockref_t *bref;
2201         hammer2_off_t pbase;
2202         size_t bbytes;
2203         size_t boff;
2204         char *bdata;
2205         struct buf *bp;
2206         int error;
2207         int wasmodified;
2208
2209         /*
2210          * If we hit the stack recursion depth limit defer the operation.
2211          * The controller of the info structure will execute the deferral
2212          * list and then retry.
2213          *
2214          * This is only applicable if SUBMODIFIED is set.  After a reflush
2215          * SUBMODIFIED will probably be cleared and we want to drop through
2216          * to finish processing the current element so our direct parent
2217          * can process the results.
2218          */
2219         if (info->depth == HAMMER2_FLUSH_DEPTH_LIMIT &&
2220             (chain->flags & HAMMER2_CHAIN_SUBMODIFIED)) {
2221                 if ((chain->flags & HAMMER2_CHAIN_DEFERRED) == 0) {
2222                         hammer2_chain_ref(hmp, chain);
2223                         TAILQ_INSERT_TAIL(&info->flush_list,
2224                                           chain, flush_node);
2225                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEFERRED);
2226                 }
2227                 return;
2228         }
2229
2230         if (hammer2_debug & 0x0008)
2231                 kprintf("%*.*sCHAIN type=%d@%08jx %p/%d %04x {\n",
2232                         info->depth, info->depth, "",
2233                         chain->bref.type, chain->bref.data_off,
2234                         chain, chain->refs, chain->flags);
2235
2236         /*
2237          * If SUBMODIFIED is set we recurse the flush and adjust the
2238          * blockrefs accordingly.
2239          *
2240          * NOTE: Looping on SUBMODIFIED can prevent a flush from ever
2241          *       finishing in the face of filesystem activity.
2242          */
2243         if (chain->flags & HAMMER2_CHAIN_SUBMODIFIED) {
2244                 hammer2_chain_t *child;
2245                 hammer2_chain_t *next;
2246                 hammer2_blockref_t *base;
2247                 int count;
2248
2249                 /*
2250                  * Clear SUBMODIFIED to catch races.  Note that if any
2251                  * child has to be flushed SUBMODIFIED will wind up being
2252                  * set again (for next time), but this does not stop us from
2253                  * synchronizing block updates which occurred.
2254                  *
2255                  * We don't want to set our chain to MODIFIED gratuitously.
2256                  */
2257                 /* XXX SUBMODIFIED not interlocked, can race */
2258                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2259
2260                 /*
2261                  * Flush the children and update the blockrefs in the chain.
2262                  * Be careful of ripouts during the loop.
2263                  */
2264                 next = RB_MIN(hammer2_chain_tree, &chain->rbhead);
2265                 if (next)
2266                         hammer2_chain_ref(hmp, next);
2267                 while ((child = next) != NULL) {
2268                         next = RB_NEXT(hammer2_chain_tree,
2269                                        &chain->rbhead, child);
2270                         if (next)
2271                                 hammer2_chain_ref(hmp, next);
2272                         /*
2273                          * We only recurse if SUBMODIFIED (internal node)
2274                          * or MODIFIED (internal node or leaf) is set.
2275                          * However, we must still track whether any MOVED
2276                          * entries are present to determine if the chain's
2277                          * blockref's need updating or not.
2278                          */
2279                         if ((child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2280                                              HAMMER2_CHAIN_MODIFIED |
2281                                             HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2282                                 hammer2_chain_drop(hmp, child);
2283                                 continue;
2284                         }
2285                         hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_MAYBE);
2286                         hammer2_chain_drop(hmp, child);
2287                         if (child->parent != chain ||
2288                             (child->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2289                                              HAMMER2_CHAIN_MODIFIED |
2290                                             HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2291                                 hammer2_chain_unlock(hmp, child);
2292                                 continue;
2293                         }
2294
2295                         /*
2296                          * Propagate the DESTROYED flag if found set, then
2297                          * recurse the flush.
2298                          */
2299                         if ((chain->flags & HAMMER2_CHAIN_DESTROYED) &&
2300                             (child->flags & HAMMER2_CHAIN_DESTROYED) == 0) {
2301                                 atomic_set_int(&child->flags,
2302                                                HAMMER2_CHAIN_DESTROYED |
2303                                                HAMMER2_CHAIN_SUBMODIFIED);
2304                         }
2305                         ++info->depth;
2306                         hammer2_chain_flush_pass1(hmp, child, info);
2307                         --info->depth;
2308                         hammer2_chain_unlock(hmp, child);
2309                 }
2310
2311                 /*
2312                  * Now synchronize any block updates.
2313                  */
2314                 next = RB_MIN(hammer2_chain_tree, &chain->rbhead);
2315                 if (next)
2316                         hammer2_chain_ref(hmp, next);
2317                 while ((child = next) != NULL) {
2318                         next = RB_NEXT(hammer2_chain_tree,
2319                                        &chain->rbhead, child);
2320                         if (next)
2321                                 hammer2_chain_ref(hmp, next);
2322                         if ((child->flags & HAMMER2_CHAIN_MOVED) == 0) {
2323                                 hammer2_chain_drop(hmp, child);
2324                                 continue;
2325                         }
2326                         hammer2_chain_lock(hmp, child, HAMMER2_RESOLVE_NEVER);
2327                         hammer2_chain_drop(hmp, child);
2328                         if (child->parent != chain ||
2329                             (child->flags & HAMMER2_CHAIN_MOVED) == 0) {
2330                                 hammer2_chain_unlock(hmp, child);
2331                                 continue;
2332                         }
2333
2334                         hammer2_chain_modify(hmp, chain,
2335                                              HAMMER2_MODIFY_NO_MODIFY_TID);
2336
2337                         switch(chain->bref.type) {
2338                         case HAMMER2_BREF_TYPE_INODE:
2339                                 KKASSERT((chain->data->ipdata.op_flags &
2340                                           HAMMER2_OPFLAG_DIRECTDATA) == 0);
2341                                 base = &chain->data->ipdata.u.blockset.
2342                                         blockref[0];
2343                                 count = HAMMER2_SET_COUNT;
2344                                 break;
2345                         case HAMMER2_BREF_TYPE_INDIRECT:
2346                                 base = &chain->data->npdata.blockref[0];
2347                                 count = chain->bytes /
2348                                         sizeof(hammer2_blockref_t);
2349                                 break;
2350                         case HAMMER2_BREF_TYPE_VOLUME:
2351                                 base = &hmp->voldata.sroot_blockset.blockref[0];
2352                                 count = HAMMER2_SET_COUNT;
2353                                 break;
2354                         default:
2355                                 base = NULL;
2356                                 panic("hammer2_chain_get: "
2357                                       "unrecognized blockref type: %d",
2358                                       chain->bref.type);
2359                         }
2360
2361                         KKASSERT(child->index >= 0);
2362                         base[child->index] = child->bref_flush;
2363
2364                         if (chain->bref.mirror_tid <
2365                             child->bref_flush.mirror_tid) {
2366                                 chain->bref.mirror_tid =
2367                                         child->bref_flush.mirror_tid;
2368                         }
2369
2370                         if (chain->bref.type == HAMMER2_BREF_TYPE_VOLUME &&
2371                             hmp->voldata.mirror_tid <
2372                             child->bref_flush.mirror_tid) {
2373                                 hmp->voldata.mirror_tid =
2374                                         child->bref_flush.mirror_tid;
2375                         }
2376                         atomic_clear_int(&child->flags, HAMMER2_CHAIN_MOVED);
2377                         hammer2_chain_drop(hmp, child); /* MOVED flag */
2378                         hammer2_chain_unlock(hmp, child);
2379                 }
2380         }
2381
2382         /*
2383          * If destroying the object we unconditonally clear the MODIFIED
2384          * and MOVED bits, and we destroy the buffer without writing it
2385          * out.
2386          *
2387          * We don't bother updating the hash/crc or the chain bref.
2388          *
2389          * NOTE: The destroy'd object's bref has already been updated.
2390          *       so we can clear MOVED without propagating mirror_tid
2391          *       or modify_tid upward.
2392          *
2393          * XXX allocations for unflushed data can be returned to the
2394          *     free pool.
2395          */
2396         if (chain->flags & HAMMER2_CHAIN_DESTROYED) {
2397                 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
2398                         if (chain->bp) {
2399                                 chain->bp->b_flags |= B_INVAL|B_RELBUF;
2400                         }
2401                         atomic_clear_int(&chain->flags,
2402                                          HAMMER2_CHAIN_MODIFIED |
2403                                          HAMMER2_CHAIN_MODIFY_TID);
2404                         hammer2_chain_drop(hmp, chain);
2405                 }
2406                 if (chain->flags & HAMMER2_CHAIN_MODIFIED_AUX) {
2407                         atomic_clear_int(&chain->flags,
2408                                          HAMMER2_CHAIN_MODIFIED_AUX);
2409                 }
2410                 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2411                         atomic_clear_int(&chain->flags,
2412                                          HAMMER2_CHAIN_MOVED);
2413                         hammer2_chain_drop(hmp, chain);
2414                 }
2415                 return;
2416         }
2417
2418         /*
2419          * Flush this chain entry only if it is marked modified.
2420          */
2421         if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2422                              HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2423                 goto done;
2424         }
2425
2426         /*
2427          * Synchronize cumulative data and inode count adjustments to
2428          * the inode and propagate the deltas upward to the parent.
2429          */
2430         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2431                 hammer2_inode_t *ip;
2432
2433                 ip = chain->u.ip;
2434                 ip->ip_data.inode_count += ip->delta_icount;
2435                 ip->ip_data.data_count += ip->delta_dcount;
2436                 if (ip->pip) {
2437                         ip->pip->delta_icount += ip->delta_icount;
2438                         ip->pip->delta_dcount += ip->delta_dcount;
2439                 }
2440                 ip->delta_icount = 0;
2441                 ip->delta_dcount = 0;
2442         }
2443
2444         /*
2445          * Flush if MODIFIED or MODIFIED_AUX is set.  MODIFIED_AUX is only
2446          * used by the volume header (&hmp->vchain).
2447          */
2448         if ((chain->flags & (HAMMER2_CHAIN_MODIFIED |
2449                              HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2450                 goto done;
2451         }
2452         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED_AUX);
2453
2454         /*
2455          * Clear MODIFIED and set HAMMER2_CHAIN_MOVED.  The caller
2456          * will re-test the MOVED bit.  We must also update the mirror_tid
2457          * and modify_tid fields as appropriate.
2458          *
2459          * bits own a single chain ref and the MOVED bit owns its own
2460          * chain ref.
2461          */
2462         chain->bref.mirror_tid = info->modify_tid;
2463         if (chain->flags & HAMMER2_CHAIN_MODIFY_TID)
2464                 chain->bref.modify_tid = info->modify_tid;
2465         wasmodified = (chain->flags & HAMMER2_CHAIN_MODIFIED) != 0;
2466         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED |
2467                                         HAMMER2_CHAIN_MODIFY_TID);
2468
2469         if (chain->flags & HAMMER2_CHAIN_MOVED) {
2470                 /*
2471                  * Drop the ref from the MODIFIED bit we cleared.
2472                  */
2473                 if (wasmodified)
2474                         hammer2_chain_drop(hmp, chain);
2475         } else {
2476                 /*
2477                  * If we were MODIFIED we inherit the ref from clearing
2478                  * that bit, otherwise we need another ref.
2479                  */
2480                 if (wasmodified == 0)
2481                         hammer2_chain_ref(hmp, chain);
2482                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2483         }
2484         chain->bref_flush = chain->bref;
2485
2486         /*
2487          * If this is part of a recursive flush we can go ahead and write
2488          * out the buffer cache buffer and pass a new bref back up the chain.
2489          *
2490          * This will never be a volume header.
2491          */
2492         switch(chain->bref.type) {
2493         case HAMMER2_BREF_TYPE_VOLUME:
2494                 /*
2495                  * The volume header is flushed manually by the syncer, not
2496                  * here.
2497                  */
2498                 break;
2499         case HAMMER2_BREF_TYPE_DATA:
2500                 /*
2501                  * Data elements have already been flushed via the logical
2502                  * file buffer cache.  Their hash was set in the bref by
2503                  * the vop_write code.
2504                  *
2505                  * Make sure the buffer(s) have been flushed out here.
2506                  */
2507                 bbytes = chain->bytes;
2508                 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
2509                 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2510
2511                 bp = getblk(hmp->devvp, pbase, bbytes, GETBLK_NOWAIT, 0);
2512                 if (bp) {
2513                         if ((bp->b_flags & (B_CACHE | B_DIRTY)) ==
2514                             (B_CACHE | B_DIRTY)) {
2515                                 kprintf("x");
2516                                 cluster_awrite(bp);
2517                         } else {
2518                                 bp->b_flags |= B_RELBUF;
2519                                 brelse(bp);
2520                         }
2521                 }
2522                 break;
2523         case HAMMER2_BREF_TYPE_INDIRECT:
2524                 /*
2525                  * Indirect blocks may be in an INITIAL state.  Use the
2526                  * chain_lock() call to ensure that the buffer has been
2527                  * instantiated (even though it is already locked the buffer
2528                  * might not have been instantiated).
2529                  *
2530                  * Only write the buffer out if it is dirty, it is possible
2531                  * the operating system had already written out the buffer.
2532                  */
2533                 hammer2_chain_lock(hmp, chain, HAMMER2_RESOLVE_ALWAYS);
2534                 KKASSERT(chain->bp != NULL);
2535
2536                 bp = chain->bp;
2537                 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) ||
2538                     (bp->b_flags & B_DIRTY)) {
2539                         bawrite(chain->bp);
2540                 } else {
2541                         brelse(chain->bp);
2542                 }
2543                 chain->bp = NULL;
2544                 chain->data = NULL;
2545                 hammer2_chain_unlock(hmp, chain);
2546                 break;
2547         default:
2548                 /*
2549                  * Embedded elements have to be flushed out.
2550                  */
2551                 KKASSERT(chain->data != NULL);
2552                 KKASSERT(chain->bp == NULL);
2553                 bref = &chain->bref;
2554
2555                 KKASSERT((bref->data_off & HAMMER2_OFF_MASK) != 0);
2556
2557                 if (chain->bp == NULL) {
2558                         /*
2559                          * The data is embedded, we have to acquire the
2560                          * buffer cache buffer and copy the data into it.
2561                          */
2562                         if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
2563                                 bbytes = HAMMER2_MINIOSIZE;
2564                         pbase = bref->data_off & ~(hammer2_off_t)(bbytes - 1);
2565                         boff = bref->data_off & HAMMER2_OFF_MASK & (bbytes - 1);
2566
2567                         /*
2568                          * The getblk() optimization can only be used if the
2569                          * physical block size matches the request.
2570                          */
2571                         if (chain->bytes == bbytes) {
2572                                 bp = getblk(hmp->devvp, pbase, bbytes, 0, 0);
2573                                 error = 0;
2574                         } else {
2575                                 error = bread(hmp->devvp, pbase, bbytes, &bp);
2576                                 KKASSERT(error == 0);
2577                         }
2578                         bdata = (char *)bp->b_data + boff;
2579
2580                         /*
2581                          * Copy the data to the buffer, mark the buffer
2582                          * dirty, and convert the chain to unmodified.
2583                          *
2584                          * We expect we might have to make adjustments to
2585                          * non-data delayed-write buffers when doing an
2586                          * actual flush so use bawrite() instead of
2587                          * cluster_awrite() here.
2588                          */
2589                         bcopy(chain->data, bdata, chain->bytes);
2590                         bp->b_flags |= B_CLUSTEROK;
2591                         bawrite(bp);
2592                         bp = NULL;
2593                         chain->bref.check.iscsi32.value =
2594                                 hammer2_icrc32(chain->data, chain->bytes);
2595                         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
2596                                 ++hammer2_iod_meta_write;
2597                         else
2598                                 ++hammer2_iod_indr_write;
2599                 } else {
2600                         chain->bref.check.iscsi32.value =
2601                                 hammer2_icrc32(chain->data, chain->bytes);
2602                 }
2603         }
2604
2605         /*
2606          * Adjustments to the bref.  The caller will use this to adjust
2607          * our chain's pointer to this chain element.
2608          */
2609         bref = &chain->bref;
2610
2611         switch(bref->type) {
2612         case HAMMER2_BREF_TYPE_VOLUME:
2613                 KKASSERT(chain->data != NULL);
2614                 KKASSERT(chain->bp == NULL);
2615
2616                 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT1]=
2617                         hammer2_icrc32(
2618                                 (char *)&hmp->voldata +
2619                                  HAMMER2_VOLUME_ICRC1_OFF,
2620                                 HAMMER2_VOLUME_ICRC1_SIZE);
2621                 hmp->voldata.icrc_sects[HAMMER2_VOL_ICRC_SECT0]=
2622                         hammer2_icrc32(
2623                                 (char *)&hmp->voldata +
2624                                  HAMMER2_VOLUME_ICRC0_OFF,
2625                                 HAMMER2_VOLUME_ICRC0_SIZE);
2626                 hmp->voldata.icrc_volheader =
2627                         hammer2_icrc32(
2628                                 (char *)&hmp->voldata +
2629                                  HAMMER2_VOLUME_ICRCVH_OFF,
2630                                 HAMMER2_VOLUME_ICRCVH_SIZE);
2631                 break;
2632         default:
2633                 break;
2634
2635         }
2636 done:
2637         if (hammer2_debug & 0x0008) {
2638                 kprintf("%*.*s} %p/%d %04x ",
2639                         info->depth, info->depth, "",
2640                         chain, chain->refs, chain->flags);
2641         }
2642 }
2643
2644 #if 0
2645 /*
2646  * PASS2 - not yet implemented (should be called only with the root chain?)
2647  */
2648 static void
2649 hammer2_chain_flush_pass2(hammer2_mount_t *hmp, hammer2_chain_t *chain)
2650 {
2651 }
2652 #endif
2653
2654 /*
2655  * Stand-alone flush.  If the chain is unable to completely flush we have
2656  * to be sure that SUBMODIFIED propagates up the parent chain.  We must not
2657  * clear the MOVED bit after flushing in this situation or our desynchronized
2658  * bref will not properly update in the parent.
2659  *
2660  * This routine can be called from several places but the most important
2661  * is from the hammer2_vop_reclaim() function.  We want to try to completely
2662  * clean out the inode structure to prevent disconnected inodes from
2663  * building up and blowing out the kmalloc pool.
2664  *
2665  * If modify_tid is 0 (usual case), a new modify_tid is allocated and
2666  * applied to the flush.  The depth-limit handling code is the only
2667  * code which passes a non-zero modify_tid to hammer2_chain_flush().
2668  */
2669 void
2670 hammer2_chain_flush(hammer2_mount_t *hmp, hammer2_chain_t *chain,
2671                     hammer2_tid_t modify_tid)
2672 {
2673         hammer2_chain_t *parent;
2674         hammer2_chain_t *scan;
2675         hammer2_blockref_t *base;
2676         hammer2_flush_info_t info;
2677         int count;
2678         int reflush;
2679
2680         /*
2681          * Execute the recursive flush and handle deferrals.
2682          *
2683          * Chains can be ridiculously long (thousands deep), so to
2684          * avoid blowing out the kernel stack the recursive flush has a
2685          * depth limit.  Elements at the limit are placed on a list
2686          * for re-execution after the stack has been popped.
2687          */
2688         bzero(&info, sizeof(info));
2689         TAILQ_INIT(&info.flush_list);
2690
2691         if (modify_tid == 0) {
2692                 hammer2_voldata_lock(hmp);
2693                 info.modify_tid = hmp->voldata.alloc_tid++;
2694                 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED_AUX);
2695                 hammer2_voldata_unlock(hmp);
2696         } else {
2697                 info.modify_tid = modify_tid;
2698         }
2699         reflush = 1;
2700
2701         while (reflush) {
2702                 /*
2703                  * Primary recursion
2704                  */
2705                 hammer2_chain_flush_pass1(hmp, chain, &info);
2706                 reflush = 0;
2707
2708                 while ((scan = TAILQ_FIRST(&info.flush_list)) != NULL) {
2709                         /*
2710                          * Secondary recursion.  Note that a reference is
2711                          * retained from the element's presence on the
2712                          * deferral list.
2713                          */
2714                         KKASSERT(scan->flags & HAMMER2_CHAIN_DEFERRED);
2715                         TAILQ_REMOVE(&info.flush_list, scan, flush_node);
2716                         atomic_clear_int(&scan->flags, HAMMER2_CHAIN_DEFERRED);
2717
2718                         /*
2719                          * Now that we've popped back up we can do a secondary
2720                          * recursion on the deferred elements.
2721                          */
2722                         if (hammer2_debug & 0x0040)
2723                                 kprintf("defered flush %p\n", scan);
2724                         hammer2_chain_lock(hmp, scan, HAMMER2_RESOLVE_MAYBE);
2725                         hammer2_chain_flush(hmp, scan, info.modify_tid);
2726                         hammer2_chain_unlock(hmp, scan);
2727
2728                         /*
2729                          * Only flag a reflush if SUBMODIFIED is no longer
2730                          * set.  If SUBMODIFIED is set the element will just
2731                          * wind up on our flush_list again.
2732                          */
2733                         if ((scan->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2734                                             HAMMER2_CHAIN_MODIFIED |
2735                                             HAMMER2_CHAIN_MODIFIED_AUX)) == 0) {
2736                                 reflush = 1;
2737                         }
2738                         hammer2_chain_drop(hmp, scan);
2739                 }
2740                 if ((hammer2_debug & 0x0040) && reflush)
2741                         kprintf("reflush %p\n", chain);
2742         }
2743
2744         /*
2745          * The SUBMODIFIED bit must propagate upward if the chain could not
2746          * be completely flushed.
2747          */
2748         if (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2749                             HAMMER2_CHAIN_MODIFIED |
2750                             HAMMER2_CHAIN_MODIFIED_AUX |
2751                             HAMMER2_CHAIN_MOVED)) {
2752                 hammer2_chain_parent_setsubmod(hmp, chain);
2753         }
2754
2755         /*
2756          * If the only thing left is a simple bref update try to
2757          * pro-actively update the parent, otherwise return early.
2758          */
2759         parent = chain->parent;
2760         if (parent == NULL) {
2761                 return;
2762         }
2763         if (chain->bref.type != HAMMER2_BREF_TYPE_INODE ||
2764             (chain->flags & (HAMMER2_CHAIN_SUBMODIFIED |
2765                              HAMMER2_CHAIN_MODIFIED |
2766                              HAMMER2_CHAIN_MODIFIED_AUX |
2767                              HAMMER2_CHAIN_MOVED)) != HAMMER2_CHAIN_MOVED) {
2768                 return;
2769         }
2770
2771         /*
2772          * We are locking backwards so allow the lock to fail.
2773          */
2774         if (ccms_thread_lock_nonblock(&parent->cst, CCMS_STATE_EXCLUSIVE))
2775                 return;
2776
2777         /*
2778          * We are updating brefs but we have to call chain_modify()
2779          * because our caller is not being run from a recursive flush.
2780          *
2781          * This will also chain up the parent list and set the SUBMODIFIED
2782          * flag.
2783          *
2784          * We do not want to set HAMMER2_CHAIN_MODIFY_TID here because the
2785          * modification is only related to updating a bref in the parent.
2786          *
2787          * When updating the blockset embedded in the volume header we must
2788          * also update voldata.mirror_tid.
2789          */
2790         hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_MAYBE);
2791         hammer2_chain_modify(hmp, parent, HAMMER2_MODIFY_NO_MODIFY_TID);
2792
2793         switch(parent->bref.type) {
2794         case HAMMER2_BREF_TYPE_INODE:
2795                 base = &parent->data->ipdata.u.blockset.
2796                         blockref[0];
2797                 count = HAMMER2_SET_COUNT;
2798                 break;
2799         case HAMMER2_BREF_TYPE_INDIRECT:
2800                 base = &parent->data->npdata.blockref[0];
2801                 count = parent->bytes /
2802                         sizeof(hammer2_blockref_t);
2803                 break;
2804         case HAMMER2_BREF_TYPE_VOLUME:
2805                 base = &hmp->voldata.sroot_blockset.blockref[0];
2806                 count = HAMMER2_SET_COUNT;
2807                 if (chain->flags & HAMMER2_CHAIN_MOVED) {
2808                         if (hmp->voldata.mirror_tid < chain->bref.mirror_tid) {
2809                                 hmp->voldata.mirror_tid =
2810                                         chain->bref.mirror_tid;
2811                         }
2812                 }
2813                 break;
2814         default:
2815                 base = NULL;
2816                 panic("hammer2_chain_flush: "
2817                       "unrecognized blockref type: %d",
2818                       parent->bref.type);
2819         }
2820
2821         /*
2822          * Update the blockref in the parent.  We do not have to set
2823          * MOVED in the parent because the parent has been marked modified,
2824          * so the flush sequence will pick up the bref change.
2825          *
2826          * We do have to propagate mirror_tid upward.
2827          */
2828         KKASSERT(chain->index >= 0 &&
2829                  chain->index < count);
2830         KKASSERT(chain->parent == parent);
2831         if (chain->flags & HAMMER2_CHAIN_MOVED) {
2832                 base[chain->index] = chain->bref_flush;
2833                 if (parent->bref.mirror_tid < chain->bref_flush.mirror_tid)
2834                         parent->bref.mirror_tid = chain->bref_flush.mirror_tid;
2835                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2836                 hammer2_chain_drop(hmp, chain);
2837         } else if (bcmp(&base[chain->index], &chain->bref_flush,
2838                    sizeof(chain->bref)) != 0) {
2839                 panic("hammer2: unflagged bref update(2)");
2840         }
2841         ccms_thread_unlock(&parent->cst);               /* release manual op */
2842         hammer2_chain_unlock(hmp, parent);
2843 }