hammer2 - major simplification 1/many (stabilization)
[dragonfly.git] / sys / vfs / hammer2 / hammer2_chain.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * and Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * This subsystem implements most of the core support functions for
37  * the hammer2_chain structure.
38  *
39  * Chains are the in-memory version on media objects (volume header, inodes,
40  * indirect blocks, data blocks, etc).  Chains represent a portion of the
41  * HAMMER2 topology.
42  *
43  * Chains are no-longer delete-duplicated.  Instead, the original in-memory
44  * chain will be moved along with its block reference (e.g. for things like
45  * renames, hardlink operations, modifications, etc), and will be indexed
46  * on a secondary list for flush handling instead of propagating a flag
47  * upward to the root.
48  *
49  * Concurrent front-end operations can still run against backend flushes
50  * as long as they do not cross the current flush boundary.  An operation
51  * running above the current flush (in areas not yet flushed) can become
52  * part of the current flush while ano peration running below the current
53  * flush can become part of the next flush.
54  */
55 #include <sys/cdefs.h>
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/types.h>
59 #include <sys/lock.h>
60 #include <sys/kern_syscall.h>
61 #include <sys/uuid.h>
62
63 #include "hammer2.h"
64
65 static int hammer2_indirect_optimize;   /* XXX SYSCTL */
66
67 static hammer2_chain_t *hammer2_chain_create_indirect(
68                 hammer2_trans_t *trans, hammer2_chain_t *parent,
69                 hammer2_key_t key, int keybits, int for_type, int *errorp);
70 static void hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop);
71 static hammer2_chain_t *hammer2_combined_find(
72                 hammer2_chain_t *parent,
73                 hammer2_blockref_t *base, int count,
74                 int *cache_indexp, hammer2_key_t *key_nextp,
75                 hammer2_key_t key_beg, hammer2_key_t key_end,
76                 hammer2_blockref_t **bresp);
77
78 /*
79  * Basic RBTree for chains (core->rbtree and core->dbtree).  Chains cannot
80  * overlap in the RB trees.  Deleted chains are moved from rbtree to either
81  * dbtree or to dbq.
82  *
83  * Chains in delete-duplicate sequences can always iterate through core_entry
84  * to locate the live version of the chain.
85  */
86 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
87
88 int
89 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
90 {
91         hammer2_key_t c1_beg;
92         hammer2_key_t c1_end;
93         hammer2_key_t c2_beg;
94         hammer2_key_t c2_end;
95
96         /*
97          * Compare chains.  Overlaps are not supposed to happen and catch
98          * any software issues early we count overlaps as a match.
99          */
100         c1_beg = chain1->bref.key;
101         c1_end = c1_beg + ((hammer2_key_t)1 << chain1->bref.keybits) - 1;
102         c2_beg = chain2->bref.key;
103         c2_end = c2_beg + ((hammer2_key_t)1 << chain2->bref.keybits) - 1;
104
105         if (c1_end < c2_beg)    /* fully to the left */
106                 return(-1);
107         if (c1_beg > c2_end)    /* fully to the right */
108                 return(1);
109         return(0);              /* overlap (must not cross edge boundary) */
110 }
111
112 static __inline
113 int
114 hammer2_isclusterable(hammer2_chain_t *chain)
115 {
116         if (hammer2_cluster_enable) {
117                 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
118                     chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
119                     chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
120                         return(1);
121                 }
122         }
123         return(0);
124 }
125
126 /*
127  * Make a chain visible to the flusher.  The flusher needs to be able to
128  * do flushes of a subdirectory chains or single files so it does a top-down
129  * recursion using the ONFLUSH flag for the recursion.  It locates MODIFIED
130  * or UPDATE chains and flushes back up the chain to the root.
131  */
132 void
133 hammer2_chain_setflush(hammer2_trans_t *trans, hammer2_chain_t *chain)
134 {
135         hammer2_chain_t *parent;
136
137         if ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) {
138                 spin_lock(&chain->core.cst.spin);
139                 while ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) {
140                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
141                         if ((parent = chain->parent) == NULL)
142                                 break;
143                         spin_lock(&parent->core.cst.spin);
144                         spin_unlock(&chain->core.cst.spin);
145                         chain = parent;
146                 }
147                 spin_unlock(&chain->core.cst.spin);
148         }
149 }
150
151 /*
152  * Allocate a new disconnected chain element representing the specified
153  * bref.  chain->refs is set to 1 and the passed bref is copied to
154  * chain->bref.  chain->bytes is derived from the bref.
155  *
156  * chain->core is NOT allocated and the media data and bp pointers are left
157  * NULL.  The caller must call chain_core_alloc() to allocate or associate
158  * a core with the chain.
159  *
160  * chain->pmp inherits pmp unless the chain is an inode (other than the
161  * super-root inode).
162  *
163  * NOTE: Returns a referenced but unlocked (because there is no core) chain.
164  */
165 hammer2_chain_t *
166 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_pfsmount_t *pmp,
167                     hammer2_trans_t *trans, hammer2_blockref_t *bref)
168 {
169         hammer2_chain_t *chain;
170         u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
171
172         /*
173          * Construct the appropriate system structure.
174          */
175         switch(bref->type) {
176         case HAMMER2_BREF_TYPE_INODE:
177         case HAMMER2_BREF_TYPE_INDIRECT:
178         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
179         case HAMMER2_BREF_TYPE_DATA:
180         case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
181                 /*
182                  * Chain's are really only associated with the hmp but we
183                  * maintain a pmp association for per-mount memory tracking
184                  * purposes.  The pmp can be NULL.
185                  */
186                 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
187                 break;
188         case HAMMER2_BREF_TYPE_VOLUME:
189         case HAMMER2_BREF_TYPE_FREEMAP:
190                 chain = NULL;
191                 panic("hammer2_chain_alloc volume type illegal for op");
192         default:
193                 chain = NULL;
194                 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
195                       bref->type);
196         }
197
198         /*
199          * Initialize the new chain structure.
200          */
201         chain->pmp = pmp;
202         chain->hmp = hmp;
203         chain->bref = *bref;
204         chain->bytes = bytes;
205         chain->refs = 1;
206         chain->flags = HAMMER2_CHAIN_ALLOCATED;
207
208         /*
209          * Set the PFS boundary flag if this chain represents a PFS root.
210          */
211         if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT)
212                 chain->flags |= HAMMER2_CHAIN_PFSBOUNDARY;
213
214         return (chain);
215 }
216
217 /*
218  * Associate an existing core with the chain or allocate a new core.
219  *
220  * The core is not locked.  No additional refs on the chain are made.
221  * (trans) must not be NULL if (core) is not NULL.
222  *
223  * When chains are delete-duplicated during flushes we insert nchain on
224  * the ownerq after ochain instead of at the end in order to give the
225  * drop code visibility in the correct order, otherwise drops can be missed.
226  */
227 void
228 hammer2_chain_core_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain)
229 {
230         hammer2_chain_core_t *core = &chain->core;
231
232         /*
233          * Fresh core under nchain (no multi-homing of ochain's
234          * sub-tree).
235          */
236         RB_INIT(&core->rbtree); /* live chains */
237         ccms_cst_init(&core->cst, chain);
238 }
239
240 /*
241  * Add a reference to a chain element, preventing its destruction.
242  *
243  * (can be called with spinlock held)
244  */
245 void
246 hammer2_chain_ref(hammer2_chain_t *chain)
247 {
248         atomic_add_int(&chain->refs, 1);
249 }
250
251 /*
252  * Insert the chain in the core rbtree.
253  *
254  * Normal insertions are placed in the live rbtree.  Insertion of a deleted
255  * chain is a special case used by the flush code that is placed on the
256  * unstaged deleted list to avoid confusing the live view.
257  */
258 #define HAMMER2_CHAIN_INSERT_SPIN       0x0001
259 #define HAMMER2_CHAIN_INSERT_LIVE       0x0002
260 #define HAMMER2_CHAIN_INSERT_RACE       0x0004
261
262 static
263 int
264 hammer2_chain_insert(hammer2_chain_t *parent, hammer2_chain_t *chain,
265                      int flags, int generation)
266 {
267         hammer2_chain_t *xchain;
268         int error = 0;
269
270         if (flags & HAMMER2_CHAIN_INSERT_SPIN)
271                 spin_lock(&parent->core.cst.spin);
272
273         /*
274          * Interlocked by spinlock, check for race
275          */
276         if ((flags & HAMMER2_CHAIN_INSERT_RACE) &&
277             parent->core.generation != generation) {
278                 error = EAGAIN;
279                 goto failed;
280         }
281
282         /*
283          * Insert chain
284          */
285         xchain = RB_INSERT(hammer2_chain_tree, &parent->core.rbtree, chain);
286         KASSERT(xchain == NULL,
287                 ("hammer2_chain_insert: collision %p %p", chain, xchain));
288         atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
289         chain->parent = parent;
290         ++parent->core.chain_count;
291         ++parent->core.generation;      /* XXX incs for _get() too, XXX */
292
293         /*
294          * We have to keep track of the effective live-view blockref count
295          * so the create code knows when to push an indirect block.
296          */
297         if (flags & HAMMER2_CHAIN_INSERT_LIVE)
298                 atomic_add_int(&parent->core.live_count, 1);
299 failed:
300         if (flags & HAMMER2_CHAIN_INSERT_SPIN)
301                 spin_unlock(&parent->core.cst.spin);
302         return error;
303 }
304
305 /*
306  * Drop the caller's reference to the chain.  When the ref count drops to
307  * zero this function will try to disassociate the chain from its parent and
308  * deallocate it, then recursely drop the parent using the implied ref
309  * from the chain's chain->parent.
310  */
311 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
312
313 void
314 hammer2_chain_drop(hammer2_chain_t *chain)
315 {
316         u_int refs;
317         u_int need = 0;
318
319         if (hammer2_debug & 0x200000)
320                 Debugger("drop");
321
322         if (chain->flags & HAMMER2_CHAIN_UPDATE)
323                 ++need;
324         if (chain->flags & HAMMER2_CHAIN_MODIFIED)
325                 ++need;
326         KKASSERT(chain->refs > need);
327
328         while (chain) {
329                 refs = chain->refs;
330                 cpu_ccfence();
331                 KKASSERT(refs > 0);
332
333                 if (refs == 1) {
334                         chain = hammer2_chain_lastdrop(chain);
335                 } else {
336                         if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
337                                 break;
338                         /* retry the same chain */
339                 }
340         }
341 }
342
343 /*
344  * Safe handling of the 1->0 transition on chain.  Returns a chain for
345  * recursive drop or NULL, possibly returning the same chain if the atomic
346  * op fails.
347  *
348  * Whem two chains need to be recursively dropped we use the chain
349  * we would otherwise free to placehold the additional chain.  It's a bit
350  * convoluted but we can't just recurse without potentially blowing out
351  * the kernel stack.
352  *
353  * The chain cannot be freed if it has a non-empty core (children) or
354  * it is not at the head of ownerq.
355  *
356  * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
357  */
358 static
359 hammer2_chain_t *
360 hammer2_chain_lastdrop(hammer2_chain_t *chain)
361 {
362         hammer2_pfsmount_t *pmp;
363         hammer2_mount_t *hmp;
364         hammer2_chain_t *parent;
365         hammer2_chain_t *rdrop;
366
367         /*
368          * Spinlock the core and check to see if it is empty.  If it is
369          * not empty we leave chain intact with refs == 0.  The elements
370          * in core->rbtree are associated with other chains contemporary
371          * with ours but not with our chain directly.
372          */
373         spin_lock(&chain->core.cst.spin);
374
375         /*
376          * We can't free non-stale chains with children until we are
377          * able to free the children because there might be a flush
378          * dependency.  Flushes of stale children (which should also
379          * have their deleted flag set) short-cut recursive flush
380          * dependencies and can be freed here.  Any flushes which run
381          * through stale children due to the flush synchronization
382          * point should have a FLUSH_* bit set in the chain and not
383          * reach lastdrop at this time.
384          *
385          * NOTE: We return (chain) on failure to retry.
386          */
387         if (chain->core.chain_count) {
388                 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
389                         spin_unlock(&chain->core.cst.spin);
390                         chain = NULL;   /* success */
391                 } else {
392                         spin_unlock(&chain->core.cst.spin);
393                 }
394                 return(chain);
395         }
396         /* no chains left under us */
397
398         /*
399          * chain->core has no children left so no accessors can get to our
400          * chain from there.  Now we have to lock the parent core to interlock
401          * remaining possible accessors that might bump chain's refs before
402          * we can safely drop chain's refs with intent to free the chain.
403          */
404         hmp = chain->hmp;
405         pmp = chain->pmp;       /* can be NULL */
406         rdrop = NULL;
407
408         /*
409          * Spinlock the parent and try to drop the last ref on chain.
410          * On success remove chain from its parent, otherwise return NULL.
411          *
412          * (normal core locks are top-down recursive but we define core
413          *  spinlocks as bottom-up recursive, so this is safe).
414          */
415         if ((parent = chain->parent) != NULL) {
416                 spin_lock(&parent->core.cst.spin);
417                 if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) {
418                         /* 1->0 transition failed */
419                         spin_unlock(&parent->core.cst.spin);
420                         spin_unlock(&chain->core.cst.spin);
421                         return(chain);  /* retry */
422                 }
423
424                 /*
425                  * 1->0 transition successful, remove chain from its
426                  * above core.
427                  */
428                 if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
429                         RB_REMOVE(hammer2_chain_tree,
430                                   &parent->core.rbtree, chain);
431                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
432                         --parent->core.chain_count;
433                         chain->parent = NULL;
434                 }
435
436                 /*
437                  * If our chain was the last chain in the parent's core the
438                  * core is now empty and its parent might have to be
439                  * re-dropped if it has 0 refs.
440                  */
441                 if (parent->core.chain_count == 0) {
442                         rdrop = parent;
443                         if (atomic_cmpset_int(&rdrop->refs, 0, 1) == 0) {
444                                 rdrop = NULL;
445                         }
446                 }
447                 spin_unlock(&parent->core.cst.spin);
448                 parent = NULL;  /* safety */
449         }
450
451         /*
452          * Successful 1->0 transition and the chain can be destroyed now.
453          *
454          * We still have the core spinlock, and core's chain_count is 0.
455          * Any parent spinlock is gone.
456          */
457         spin_unlock(&chain->core.cst.spin);
458         KKASSERT(RB_EMPTY(&chain->core.rbtree) &&
459                  chain->core.chain_count == 0);
460         KKASSERT(chain->core.cst.count == 0);
461         KKASSERT(chain->core.cst.upgrade == 0);
462
463         /*
464          * All spin locks are gone, finish freeing stuff.
465          */
466         KKASSERT((chain->flags & (HAMMER2_CHAIN_UPDATE |
467                                   HAMMER2_CHAIN_MODIFIED)) == 0);
468         hammer2_chain_drop_data(chain, 1);
469
470         KKASSERT(chain->dio == NULL);
471
472         /*
473          * Once chain resources are gone we can use the now dead chain
474          * structure to placehold what might otherwise require a recursive
475          * drop, because we have potentially two things to drop and can only
476          * return one directly.
477          */
478         if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
479                 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
480                 chain->hmp = NULL;
481                 kfree(chain, hmp->mchain);
482         }
483
484         /*
485          * Possible chaining loop when parent re-drop needed.
486          */
487         return(rdrop);
488 }
489
490 /*
491  * On either last lock release or last drop
492  */
493 static void
494 hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop)
495 {
496         /*hammer2_mount_t *hmp = chain->hmp;*/
497
498         switch(chain->bref.type) {
499         case HAMMER2_BREF_TYPE_VOLUME:
500         case HAMMER2_BREF_TYPE_FREEMAP:
501                 if (lastdrop)
502                         chain->data = NULL;
503                 break;
504         default:
505                 KKASSERT(chain->data == NULL);
506                 break;
507         }
508 }
509
510 /*
511  * Ref and lock a chain element, acquiring its data with I/O if necessary,
512  * and specify how you would like the data to be resolved.
513  *
514  * Returns 0 on success or an error code if the data could not be acquired.
515  * The chain element is locked on return regardless of whether an error
516  * occurred or not.
517  *
518  * The lock is allowed to recurse, multiple locking ops will aggregate
519  * the requested resolve types.  Once data is assigned it will not be
520  * removed until the last unlock.
521  *
522  * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
523  *                         (typically used to avoid device/logical buffer
524  *                          aliasing for data)
525  *
526  * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
527  *                         the INITIAL-create state (indirect blocks only).
528  *
529  *                         Do not resolve data elements for DATA chains.
530  *                         (typically used to avoid device/logical buffer
531  *                          aliasing for data)
532  *
533  * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
534  *
535  * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
536  *                         it will be locked exclusive.
537  *
538  * NOTE: Embedded elements (volume header, inodes) are always resolved
539  *       regardless.
540  *
541  * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
542  *       element will instantiate and zero its buffer, and flush it on
543  *       release.
544  *
545  * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
546  *       so as not to instantiate a device buffer, which could alias against
547  *       a logical file buffer.  However, if ALWAYS is specified the
548  *       device buffer will be instantiated anyway.
549  *
550  * WARNING! If data must be fetched a shared lock will temporarily be
551  *          upgraded to exclusive.  However, a deadlock can occur if
552  *          the caller owns more than one shared lock.
553  */
554 int
555 hammer2_chain_lock(hammer2_chain_t *chain, int how)
556 {
557         hammer2_mount_t *hmp;
558         hammer2_blockref_t *bref;
559         ccms_state_t ostate;
560         char *bdata;
561         int error;
562
563         /*
564          * Ref and lock the element.  Recursive locks are allowed.
565          */
566         if ((how & HAMMER2_RESOLVE_NOREF) == 0)
567                 hammer2_chain_ref(chain);
568         atomic_add_int(&chain->lockcnt, 1);
569
570         hmp = chain->hmp;
571         KKASSERT(hmp != NULL);
572
573         /*
574          * Get the appropriate lock.
575          */
576         if (how & HAMMER2_RESOLVE_SHARED)
577                 ccms_thread_lock(&chain->core.cst, CCMS_STATE_SHARED);
578         else
579                 ccms_thread_lock(&chain->core.cst, CCMS_STATE_EXCLUSIVE);
580
581         /*
582          * If we already have a valid data pointer no further action is
583          * necessary.
584          */
585         if (chain->data)
586                 return (0);
587
588         /*
589          * Do we have to resolve the data?
590          */
591         switch(how & HAMMER2_RESOLVE_MASK) {
592         case HAMMER2_RESOLVE_NEVER:
593                 return(0);
594         case HAMMER2_RESOLVE_MAYBE:
595                 if (chain->flags & HAMMER2_CHAIN_INITIAL)
596                         return(0);
597                 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
598                         return(0);
599 #if 0
600                 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
601                         return(0);
602 #endif
603                 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
604                         return(0);
605                 /* fall through */
606         case HAMMER2_RESOLVE_ALWAYS:
607                 break;
608         }
609
610         /*
611          * Upgrade to an exclusive lock so we can safely manipulate the
612          * buffer cache.  If another thread got to it before us we
613          * can just return.
614          */
615         ostate = ccms_thread_lock_upgrade(&chain->core.cst);
616         if (chain->data) {
617                 ccms_thread_lock_downgrade(&chain->core.cst, ostate);
618                 return (0);
619         }
620
621         /*
622          * We must resolve to a device buffer, either by issuing I/O or
623          * by creating a zero-fill element.  We do not mark the buffer
624          * dirty when creating a zero-fill element (the hammer2_chain_modify()
625          * API must still be used to do that).
626          *
627          * The device buffer is variable-sized in powers of 2 down
628          * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
629          * chunk always contains buffers of the same size. (XXX)
630          *
631          * The minimum physical IO size may be larger than the variable
632          * block size.
633          */
634         bref = &chain->bref;
635
636         /*
637          * The getblk() optimization can only be used on newly created
638          * elements if the physical block size matches the request.
639          */
640         if (chain->flags & HAMMER2_CHAIN_INITIAL) {
641                 error = hammer2_io_new(hmp, bref->data_off, chain->bytes,
642                                         &chain->dio);
643         } else {
644                 error = hammer2_io_bread(hmp, bref->data_off, chain->bytes,
645                                          &chain->dio);
646                 hammer2_adjreadcounter(&chain->bref, chain->bytes);
647         }
648
649         if (error) {
650                 kprintf("hammer2_chain_lock: I/O error %016jx: %d\n",
651                         (intmax_t)bref->data_off, error);
652                 hammer2_io_bqrelse(&chain->dio);
653                 ccms_thread_lock_downgrade(&chain->core.cst, ostate);
654                 return (error);
655         }
656
657 #if 0
658         /*
659          * No need for this, always require that hammer2_chain_modify()
660          * be called before any modifying operations.
661          */
662         if ((chain->flags & HAMMER2_CHAIN_MODIFIED) &&
663             !hammer2_io_isdirty(chain->dio)) {
664                 hammer2_io_setdirty(chain->dio);
665         }
666 #endif
667
668         /*
669          * Clear INITIAL.  In this case we used io_new() and the buffer has
670          * been zero'd and marked dirty.
671          */
672         bdata = hammer2_io_data(chain->dio, chain->bref.data_off);
673         if (chain->flags & HAMMER2_CHAIN_INITIAL)
674                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
675
676         /*
677          * Setup the data pointer, either pointing it to an embedded data
678          * structure and copying the data from the buffer, or pointing it
679          * into the buffer.
680          *
681          * The buffer is not retained when copying to an embedded data
682          * structure in order to avoid potential deadlocks or recursions
683          * on the same physical buffer.
684          */
685         switch (bref->type) {
686         case HAMMER2_BREF_TYPE_VOLUME:
687         case HAMMER2_BREF_TYPE_FREEMAP:
688                 /*
689                  * Copy data from bp to embedded buffer
690                  */
691                 panic("hammer2_chain_lock: called on unresolved volume header");
692                 break;
693         case HAMMER2_BREF_TYPE_INODE:
694         case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
695         case HAMMER2_BREF_TYPE_INDIRECT:
696         case HAMMER2_BREF_TYPE_DATA:
697         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
698         default:
699                 /*
700                  * Point data at the device buffer and leave dio intact.
701                  */
702                 chain->data = (void *)bdata;
703                 break;
704         }
705         ccms_thread_lock_downgrade(&chain->core.cst, ostate);
706         return (0);
707 }
708
709 /*
710  * This basically calls hammer2_io_breadcb() but does some pre-processing
711  * of the chain first to handle certain cases.
712  */
713 void
714 hammer2_chain_load_async(hammer2_cluster_t *cluster,
715                          void (*callback)(hammer2_io_t *dio,
716                                           hammer2_cluster_t *cluster,
717                                           hammer2_chain_t *chain,
718                                           void *arg_p, off_t arg_o),
719                          void *arg_p)
720 {
721         hammer2_chain_t *chain;
722         hammer2_mount_t *hmp;
723         struct hammer2_io *dio;
724         hammer2_blockref_t *bref;
725         int error;
726         int i;
727
728         /*
729          * If no chain specified see if any chain data is available and use
730          * that, otherwise begin an I/O iteration using the first chain.
731          */
732         chain = NULL;
733         for (i = 0; i < cluster->nchains; ++i) {
734                 chain = cluster->array[i];
735                 if (chain && chain->data)
736                         break;
737         }
738         if (i == cluster->nchains) {
739                 chain = cluster->array[0];
740                 i = 0;
741         }
742
743         if (chain->data) {
744                 callback(NULL, cluster, chain, arg_p, (off_t)i);
745                 return;
746         }
747
748         /*
749          * We must resolve to a device buffer, either by issuing I/O or
750          * by creating a zero-fill element.  We do not mark the buffer
751          * dirty when creating a zero-fill element (the hammer2_chain_modify()
752          * API must still be used to do that).
753          *
754          * The device buffer is variable-sized in powers of 2 down
755          * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
756          * chunk always contains buffers of the same size. (XXX)
757          *
758          * The minimum physical IO size may be larger than the variable
759          * block size.
760          */
761         bref = &chain->bref;
762         hmp = chain->hmp;
763
764         /*
765          * The getblk() optimization can only be used on newly created
766          * elements if the physical block size matches the request.
767          */
768         if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
769             chain->bytes == hammer2_devblksize(chain->bytes)) {
770                 error = hammer2_io_new(hmp, bref->data_off, chain->bytes, &dio);
771                 KKASSERT(error == 0);
772                 callback(dio, cluster, chain, arg_p, (off_t)i);
773                 return;
774         }
775
776         /*
777          * Otherwise issue a read
778          */
779         hammer2_adjreadcounter(&chain->bref, chain->bytes);
780         hammer2_io_breadcb(hmp, bref->data_off, chain->bytes,
781                            callback, cluster, chain, arg_p, (off_t)i);
782 }
783
784 /*
785  * Unlock and deref a chain element.
786  *
787  * On the last lock release any non-embedded data (chain->dio) will be
788  * retired.
789  */
790 void
791 hammer2_chain_unlock(hammer2_chain_t *chain)
792 {
793         ccms_state_t ostate;
794         long *counterp;
795         u_int lockcnt;
796
797         /*
798          * The core->cst lock can be shared across several chains so we
799          * need to track the per-chain lockcnt separately.
800          *
801          * If multiple locks are present (or being attempted) on this
802          * particular chain we can just unlock, drop refs, and return.
803          *
804          * Otherwise fall-through on the 1->0 transition.
805          */
806         for (;;) {
807                 lockcnt = chain->lockcnt;
808                 KKASSERT(lockcnt > 0);
809                 cpu_ccfence();
810                 if (lockcnt > 1) {
811                         if (atomic_cmpset_int(&chain->lockcnt,
812                                               lockcnt, lockcnt - 1)) {
813                                 ccms_thread_unlock(&chain->core.cst);
814                                 hammer2_chain_drop(chain);
815                                 return;
816                         }
817                 } else {
818                         if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
819                                 break;
820                 }
821                 /* retry */
822         }
823
824         /*
825          * On the 1->0 transition we upgrade the core lock (if necessary)
826          * to exclusive for terminal processing.  If after upgrading we find
827          * that lockcnt is non-zero, another thread is racing us and will
828          * handle the unload for us later on, so just cleanup and return
829          * leaving the data/io intact
830          *
831          * Otherwise if lockcnt is still 0 it is possible for it to become
832          * non-zero and race, but since we hold the core->cst lock
833          * exclusively all that will happen is that the chain will be
834          * reloaded after we unload it.
835          */
836         ostate = ccms_thread_lock_upgrade(&chain->core.cst);
837         if (chain->lockcnt) {
838                 ccms_thread_unlock_upgraded(&chain->core.cst, ostate);
839                 hammer2_chain_drop(chain);
840                 return;
841         }
842
843         /*
844          * Shortcut the case if the data is embedded or not resolved.
845          *
846          * Do NOT NULL out chain->data (e.g. inode data), it might be
847          * dirty.
848          */
849         if (chain->dio == NULL) {
850                 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
851                         hammer2_chain_drop_data(chain, 0);
852                 ccms_thread_unlock_upgraded(&chain->core.cst, ostate);
853                 hammer2_chain_drop(chain);
854                 return;
855         }
856
857         /*
858          * Statistics
859          */
860         if (hammer2_io_isdirty(chain->dio) == 0) {
861                 ;
862         } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
863                 switch(chain->bref.type) {
864                 case HAMMER2_BREF_TYPE_DATA:
865                         counterp = &hammer2_ioa_file_write;
866                         break;
867                 case HAMMER2_BREF_TYPE_INODE:
868                         counterp = &hammer2_ioa_meta_write;
869                         break;
870                 case HAMMER2_BREF_TYPE_INDIRECT:
871                         counterp = &hammer2_ioa_indr_write;
872                         break;
873                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
874                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
875                         counterp = &hammer2_ioa_fmap_write;
876                         break;
877                 default:
878                         counterp = &hammer2_ioa_volu_write;
879                         break;
880                 }
881                 *counterp += chain->bytes;
882         } else {
883                 switch(chain->bref.type) {
884                 case HAMMER2_BREF_TYPE_DATA:
885                         counterp = &hammer2_iod_file_write;
886                         break;
887                 case HAMMER2_BREF_TYPE_INODE:
888                         counterp = &hammer2_iod_meta_write;
889                         break;
890                 case HAMMER2_BREF_TYPE_INDIRECT:
891                         counterp = &hammer2_iod_indr_write;
892                         break;
893                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
894                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
895                         counterp = &hammer2_iod_fmap_write;
896                         break;
897                 default:
898                         counterp = &hammer2_iod_volu_write;
899                         break;
900                 }
901                 *counterp += chain->bytes;
902         }
903
904         /*
905          * Clean out the dio.
906          *
907          * If a device buffer was used for data be sure to destroy the
908          * buffer when we are done to avoid aliases (XXX what about the
909          * underlying VM pages?).
910          *
911          * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
912          *       is possible.
913          *
914          * NOTE: The isdirty check tracks whether we have to bdwrite() the
915          *       buffer or not.  The buffer might already be dirty.  The
916          *       flag is re-set when chain_modify() is called, even if
917          *       MODIFIED is already set, allowing the OS to retire the
918          *       buffer independent of a hammer2 flush.
919          */
920         chain->data = NULL;
921         if ((chain->flags & HAMMER2_CHAIN_IOFLUSH) &&
922             hammer2_io_isdirty(chain->dio)) {
923                 hammer2_io_bawrite(&chain->dio);
924         } else {
925                 hammer2_io_bqrelse(&chain->dio);
926         }
927         ccms_thread_unlock_upgraded(&chain->core.cst, ostate);
928         hammer2_chain_drop(chain);
929 }
930
931 /*
932  * This counts the number of live blockrefs in a block array and
933  * also calculates the point at which all remaining blockrefs are empty.
934  * This routine can only be called on a live chain (DUPLICATED flag not set).
935  *
936  * NOTE: Flag is not set until after the count is complete, allowing
937  *       callers to test the flag without holding the spinlock.
938  *
939  * NOTE: If base is NULL the related chain is still in the INITIAL
940  *       state and there are no blockrefs to count.
941  *
942  * NOTE: live_count may already have some counts accumulated due to
943  *       creation and deletion and could even be initially negative.
944  */
945 void
946 hammer2_chain_countbrefs(hammer2_chain_t *chain,
947                          hammer2_blockref_t *base, int count)
948 {
949         spin_lock(&chain->core.cst.spin);
950         if ((chain->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0) {
951                 if (base) {
952                         while (--count >= 0) {
953                                 if (base[count].type)
954                                         break;
955                         }
956                         chain->core.live_zero = count + 1;
957                         while (count >= 0) {
958                                 if (base[count].type)
959                                         atomic_add_int(&chain->core.live_count,
960                                                        1);
961                                 --count;
962                         }
963                 } else {
964                         chain->core.live_zero = 0;
965                 }
966                 /* else do not modify live_count */
967                 atomic_set_int(&chain->core.flags, HAMMER2_CORE_COUNTEDBREFS);
968         }
969         spin_unlock(&chain->core.cst.spin);
970 }
971
972 /*
973  * Resize the chain's physical storage allocation in-place.  This will
974  * modify the passed-in chain.  Chains can be resized smaller without
975  * reallocating the storage.  Resizing larger will reallocate the storage.
976  * Excess or prior storage is reclaimed asynchronously at a later time.
977  *
978  * Must be passed an exclusively locked parent and chain.
979  *
980  * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
981  * to avoid instantiating a device buffer that conflicts with the vnode data
982  * buffer.  That is, the passed-in bp is a logical buffer, whereas any
983  * chain-oriented bp would be a device buffer.
984  *
985  * XXX return error if cannot resize.
986  */
987 void
988 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
989                      hammer2_chain_t *parent, hammer2_chain_t *chain,
990                      int nradix, int flags)
991 {
992         hammer2_mount_t *hmp;
993         size_t obytes;
994         size_t nbytes;
995
996         hmp = chain->hmp;
997
998         /*
999          * Only data and indirect blocks can be resized for now.
1000          * (The volu root, inodes, and freemap elements use a fixed size).
1001          */
1002         KKASSERT(chain != &hmp->vchain);
1003         KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
1004                  chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
1005
1006         /*
1007          * Nothing to do if the element is already the proper size
1008          */
1009         obytes = chain->bytes;
1010         nbytes = 1U << nradix;
1011         if (obytes == nbytes)
1012                 return;
1013
1014         /*
1015          * The parent does not have to be locked for the delete/duplicate call,
1016          * but is in this particular code path.
1017          *
1018          * NOTE: If we are not crossing a synchronization point the
1019          *       duplication code will simply reuse the existing chain
1020          *       structure.
1021          *
1022          * NOTE: The modify will set BMAPUPD for us if BMAPPED is set.
1023          */
1024         hammer2_chain_modify(trans, chain, 0);
1025
1026         /*
1027          * Relocate the block, even if making it smaller (because different
1028          * block sizes may be in different regions).
1029          *
1030          * (data blocks only, we aren't copying the storage here).
1031          */
1032         hammer2_freemap_alloc(trans, chain, nbytes);
1033         chain->bytes = nbytes;
1034         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1035         /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
1036
1037         /*
1038          * For now just support it on DATA chains (and not on indirect
1039          * blocks).
1040          */
1041         KKASSERT(chain->dio == NULL);
1042 }
1043
1044 #if 0
1045
1046 /*
1047  * REMOVED - see cluster code
1048  *
1049  * Set a chain modified, making it read-write and duplicating it if necessary.
1050  * This function will assign a new physical block to the chain if necessary
1051  *
1052  * Duplication of already-modified chains is possible when the modification
1053  * crosses a flush synchronization boundary.
1054  *
1055  * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
1056  *                   level or the COW operation will not work.
1057  *
1058  * Data blocks     - The chain is usually locked RESOLVE_NEVER so as not to
1059  *                   run the data through the device buffers.
1060  *
1061  * This function may return a different chain than was passed, in which case
1062  * the old chain will be unlocked and the new chain will be locked.
1063  *
1064  * ip->chain may be adjusted by hammer2_chain_modify_ip().
1065  */
1066 hammer2_inode_data_t *
1067 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1068                         hammer2_chain_t **chainp, int flags)
1069 {
1070         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1071         hammer2_chain_modify(trans, chainp, flags);
1072         if (ip->chain != *chainp)
1073                 hammer2_inode_repoint(ip, NULL, *chainp);
1074         if (ip->vp)
1075                 vsetisdirty(ip->vp);
1076         return(&ip->chain->data->ipdata);
1077 }
1078
1079 #endif
1080
1081 void
1082 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
1083 {
1084         hammer2_mount_t *hmp;
1085         hammer2_io_t *dio;
1086         int error;
1087         int wasinitial;
1088         int newmod;
1089         char *bdata;
1090
1091         hmp = chain->hmp;
1092
1093         /*
1094          * data is not optional for freemap chains (we must always be sure
1095          * to copy the data on COW storage allocations).
1096          */
1097         if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1098             chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1099                 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1100                          (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1101         }
1102
1103         /*
1104          * Data must be resolved if already assigned unless explicitly
1105          * flagged otherwise.
1106          */
1107         if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1108             (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1109                 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1110                 hammer2_chain_unlock(chain);
1111         }
1112
1113         /*
1114          * Otherwise do initial-chain handling.  Set MODIFIED to indicate
1115          * that the chain has been modified.  Set UPDATE to ensure that
1116          * the blockref is updated in the parent.
1117          */
1118         if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1119                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1120                 hammer2_chain_ref(chain);
1121                 hammer2_pfs_memory_inc(chain->pmp);
1122                 newmod = 1;
1123         } else {
1124                 newmod = 0;
1125         }
1126         if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
1127                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1128                 hammer2_chain_ref(chain);
1129         }
1130
1131         /*
1132          * The modification or re-modification requires an allocation and
1133          * possible COW.
1134          *
1135          * We normally always allocate new storage here.  If storage exists
1136          * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1137          */
1138         if (chain != &hmp->vchain && chain != &hmp->fchain) {
1139                 if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1140                      ((flags & HAMMER2_MODIFY_NOREALLOC) == 0 && newmod)
1141                 ) {
1142                         hammer2_freemap_alloc(trans, chain, chain->bytes);
1143                         /* XXX failed allocation */
1144                 } else if (chain->flags & HAMMER2_CHAIN_FORCECOW) {
1145                         hammer2_freemap_alloc(trans, chain, chain->bytes);
1146                         /* XXX failed allocation */
1147                 }
1148                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1149         }
1150
1151         /*
1152          * Set BMAPUPD to tell the flush code that an existing blockmap entry
1153          * requires updating as well as to tell the delete code that the
1154          * chain's blockref might not exactly match (in terms of physical size
1155          * or block offset) the one in the parent's blocktable.  The base key
1156          * of course will still match.
1157          */
1158         if (chain->flags & HAMMER2_CHAIN_BMAPPED)
1159                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPUPD);
1160
1161         /*
1162          * Do not COW BREF_TYPE_DATA when OPTDATA is set.  This is because
1163          * data modifications are done via the logical buffer cache so COWing
1164          * it here would result in unnecessary extra copies (and possibly extra
1165          * block reallocations).  The INITIAL flag remains unchanged in this
1166          * situation.
1167          *
1168          * (This is a bit of a hack).
1169          */
1170         if (chain->bref.type == HAMMER2_BREF_TYPE_DATA &&
1171             (flags & HAMMER2_MODIFY_OPTDATA)) {
1172                 goto skip2;
1173         }
1174
1175         /*
1176          * Clearing the INITIAL flag (for indirect blocks) indicates that
1177          * we've processed the uninitialized storage allocation.
1178          *
1179          * If this flag is already clear we are likely in a copy-on-write
1180          * situation but we have to be sure NOT to bzero the storage if
1181          * no data is present.
1182          */
1183         if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1184                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1185                 wasinitial = 1;
1186         } else {
1187                 wasinitial = 0;
1188         }
1189
1190         /*
1191          * Instantiate data buffer and possibly execute COW operation
1192          */
1193         switch(chain->bref.type) {
1194         case HAMMER2_BREF_TYPE_VOLUME:
1195         case HAMMER2_BREF_TYPE_FREEMAP:
1196                 /*
1197                  * The data is embedded, no copy-on-write operation is
1198                  * needed.
1199                  */
1200                 KKASSERT(chain->dio == NULL);
1201                 break;
1202         case HAMMER2_BREF_TYPE_INODE:
1203         case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1204         case HAMMER2_BREF_TYPE_DATA:
1205         case HAMMER2_BREF_TYPE_INDIRECT:
1206         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1207                 /*
1208                  * Perform the copy-on-write operation
1209                  *
1210                  * zero-fill or copy-on-write depending on whether
1211                  * chain->data exists or not and set the dirty state for
1212                  * the new buffer.  hammer2_io_new() will handle the
1213                  * zero-fill.
1214                  */
1215                 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1216
1217                 if (wasinitial) {
1218                         error = hammer2_io_new(hmp, chain->bref.data_off,
1219                                                chain->bytes, &dio);
1220                 } else {
1221                         error = hammer2_io_bread(hmp, chain->bref.data_off,
1222                                                  chain->bytes, &dio);
1223                 }
1224                 hammer2_adjreadcounter(&chain->bref, chain->bytes);
1225                 KKASSERT(error == 0);
1226
1227                 bdata = hammer2_io_data(dio, chain->bref.data_off);
1228
1229                 if (chain->data) {
1230                         KKASSERT(chain->dio != NULL);
1231                         if (chain->data != (void *)bdata) {
1232                                 bcopy(chain->data, bdata, chain->bytes);
1233                         }
1234                 } else if (wasinitial == 0) {
1235                         /*
1236                          * We have a problem.  We were asked to COW but
1237                          * we don't have any data to COW with!
1238                          */
1239                         panic("hammer2_chain_modify: having a COW %p\n",
1240                               chain);
1241                 }
1242
1243                 /*
1244                  * Retire the old buffer, replace with the new
1245                  */
1246                 if (chain->dio)
1247                         hammer2_io_brelse(&chain->dio);
1248                 chain->data = (void *)bdata;
1249                 chain->dio = dio;
1250                 hammer2_io_setdirty(dio);       /* modified by bcopy above */
1251                 break;
1252         default:
1253                 panic("hammer2_chain_modify: illegal non-embedded type %d",
1254                       chain->bref.type);
1255                 break;
1256
1257         }
1258 skip2:
1259         /*
1260          * setflush on parent indicating that the parent must recurse down
1261          * to us.  Do not call on chain itself which might already have it
1262          * set.
1263          */
1264         if (chain->parent)
1265                 hammer2_chain_setflush(trans, chain->parent);
1266 }
1267
1268 /*
1269  * Volume header data locks
1270  */
1271 void
1272 hammer2_voldata_lock(hammer2_mount_t *hmp)
1273 {
1274         lockmgr(&hmp->vollk, LK_EXCLUSIVE);
1275 }
1276
1277 void
1278 hammer2_voldata_unlock(hammer2_mount_t *hmp)
1279 {
1280         lockmgr(&hmp->vollk, LK_RELEASE);
1281 }
1282
1283 void
1284 hammer2_voldata_modify(hammer2_mount_t *hmp)
1285 {
1286         if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1287                 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
1288                 hammer2_chain_ref(&hmp->vchain);
1289                 hammer2_pfs_memory_inc(hmp->vchain.pmp);
1290         }
1291 }
1292
1293 /*
1294  * This function returns the chain at the nearest key within the specified
1295  * range.  The returned chain will be referenced but not locked.
1296  *
1297  * This function will recurse through chain->rbtree as necessary and will
1298  * return a *key_nextp suitable for iteration.  *key_nextp is only set if
1299  * the iteration value is less than the current value of *key_nextp.
1300  *
1301  * The caller should use (*key_nextp) to calculate the actual range of
1302  * the returned element, which will be (key_beg to *key_nextp - 1), because
1303  * there might be another element which is superior to the returned element
1304  * and overlaps it.
1305  *
1306  * (*key_nextp) can be passed as key_beg in an iteration only while non-NULL
1307  * chains continue to be returned.  On EOF (*key_nextp) may overflow since
1308  * it will wind up being (key_end + 1).
1309  *
1310  * WARNING!  Must be called with child's spinlock held.  Spinlock remains
1311  *           held through the operation.
1312  */
1313 struct hammer2_chain_find_info {
1314         hammer2_chain_t         *best;
1315         hammer2_key_t           key_beg;
1316         hammer2_key_t           key_end;
1317         hammer2_key_t           key_next;
1318 };
1319
1320 static int hammer2_chain_find_cmp(hammer2_chain_t *child, void *data);
1321 static int hammer2_chain_find_callback(hammer2_chain_t *child, void *data);
1322
1323 static
1324 hammer2_chain_t *
1325 hammer2_chain_find(hammer2_chain_t *parent, hammer2_key_t *key_nextp,
1326                           hammer2_key_t key_beg, hammer2_key_t key_end)
1327 {
1328         struct hammer2_chain_find_info info;
1329
1330         info.best = NULL;
1331         info.key_beg = key_beg;
1332         info.key_end = key_end;
1333         info.key_next = *key_nextp;
1334
1335         RB_SCAN(hammer2_chain_tree, &parent->core.rbtree,
1336                 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1337                 &info);
1338         *key_nextp = info.key_next;
1339 #if 0
1340         kprintf("chain_find %p %016jx:%016jx next=%016jx\n",
1341                 parent, key_beg, key_end, *key_nextp);
1342 #endif
1343
1344         return (info.best);
1345 }
1346
1347 static
1348 int
1349 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1350 {
1351         struct hammer2_chain_find_info *info = data;
1352         hammer2_key_t child_beg;
1353         hammer2_key_t child_end;
1354
1355         child_beg = child->bref.key;
1356         child_end = child_beg + ((hammer2_key_t)1 << child->bref.keybits) - 1;
1357
1358         if (child_end < info->key_beg)
1359                 return(-1);
1360         if (child_beg > info->key_end)
1361                 return(1);
1362         return(0);
1363 }
1364
1365 static
1366 int
1367 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1368 {
1369         struct hammer2_chain_find_info *info = data;
1370         hammer2_chain_t *best;
1371         hammer2_key_t child_end;
1372
1373         /*
1374          * WARNING! Do not discard DUPLICATED chains, it is possible that
1375          *          we are catching an insertion half-way done.  If a
1376          *          duplicated chain turns out to be the best choice the
1377          *          caller will re-check its flags after locking it.
1378          *
1379          * WARNING! Layerq is scanned forwards, exact matches should keep
1380          *          the existing info->best.
1381          */
1382         if ((best = info->best) == NULL) {
1383                 /*
1384                  * No previous best.  Assign best
1385                  */
1386                 info->best = child;
1387         } else if (best->bref.key <= info->key_beg &&
1388                    child->bref.key <= info->key_beg) {
1389                 /*
1390                  * Illegal overlap.
1391                  */
1392                 KKASSERT(0);
1393                 /*info->best = child;*/
1394         } else if (child->bref.key < best->bref.key) {
1395                 /*
1396                  * Child has a nearer key and best is not flush with key_beg.
1397                  * Set best to child.  Truncate key_next to the old best key.
1398                  */
1399                 info->best = child;
1400                 if (info->key_next > best->bref.key || info->key_next == 0)
1401                         info->key_next = best->bref.key;
1402         } else if (child->bref.key == best->bref.key) {
1403                 /*
1404                  * If our current best is flush with the child then this
1405                  * is an illegal overlap.
1406                  *
1407                  * key_next will automatically be limited to the smaller of
1408                  * the two end-points.
1409                  */
1410                 KKASSERT(0);
1411                 info->best = child;
1412         } else {
1413                 /*
1414                  * Keep the current best but truncate key_next to the child's
1415                  * base.
1416                  *
1417                  * key_next will also automatically be limited to the smaller
1418                  * of the two end-points (probably not necessary for this case
1419                  * but we do it anyway).
1420                  */
1421                 if (info->key_next > child->bref.key || info->key_next == 0)
1422                         info->key_next = child->bref.key;
1423         }
1424
1425         /*
1426          * Always truncate key_next based on child's end-of-range.
1427          */
1428         child_end = child->bref.key + ((hammer2_key_t)1 << child->bref.keybits);
1429         if (child_end && (info->key_next > child_end || info->key_next == 0))
1430                 info->key_next = child_end;
1431
1432         return(0);
1433 }
1434
1435 /*
1436  * Retrieve the specified chain from a media blockref, creating the
1437  * in-memory chain structure which reflects it.
1438  *
1439  * To handle insertion races pass the INSERT_RACE flag along with the
1440  * generation number of the core.  NULL will be returned if the generation
1441  * number changes before we have a chance to insert the chain.  Insert
1442  * races can occur because the parent might be held shared.
1443  *
1444  * Caller must hold the parent locked shared or exclusive since we may
1445  * need the parent's bref array to find our block.
1446  *
1447  * WARNING! chain->pmp is left NULL if the bref represents a PFS mount
1448  *          point.
1449  */
1450 hammer2_chain_t *
1451 hammer2_chain_get(hammer2_chain_t *parent, int generation,
1452                   hammer2_blockref_t *bref)
1453 {
1454         hammer2_mount_t *hmp = parent->hmp;
1455         hammer2_chain_t *chain;
1456         int error;
1457
1458         /*
1459          * Allocate a chain structure representing the existing media
1460          * entry.  Resulting chain has one ref and is not locked.
1461          */
1462         if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT)
1463                 chain = hammer2_chain_alloc(hmp, NULL, NULL, bref);
1464         else
1465                 chain = hammer2_chain_alloc(hmp, parent->pmp, NULL, bref);
1466         hammer2_chain_core_alloc(NULL, chain);
1467         /* ref'd chain returned */
1468
1469         /*
1470          * Flag that the chain is in the parent's blockmap so delete/flush
1471          * knows what to do with it.
1472          */
1473         atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPPED);
1474
1475         /*
1476          * Link the chain into its parent.  A spinlock is required to safely
1477          * access the RBTREE, and it is possible to collide with another
1478          * hammer2_chain_get() operation because the caller might only hold
1479          * a shared lock on the parent.
1480          */
1481         KKASSERT(parent->refs > 0);
1482         error = hammer2_chain_insert(parent, chain,
1483                                      HAMMER2_CHAIN_INSERT_SPIN |
1484                                      HAMMER2_CHAIN_INSERT_RACE,
1485                                      generation);
1486         if (error) {
1487                 KKASSERT((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
1488                 kprintf("chain %p get race\n", chain);
1489                 hammer2_chain_drop(chain);
1490                 chain = NULL;
1491         } else {
1492                 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
1493         }
1494
1495         /*
1496          * Return our new chain referenced but not locked, or NULL if
1497          * a race occurred.
1498          */
1499         return (chain);
1500 }
1501
1502 /*
1503  * Lookup initialization/completion API
1504  */
1505 hammer2_chain_t *
1506 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1507 {
1508         if (flags & HAMMER2_LOOKUP_SHARED) {
1509                 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1510                                            HAMMER2_RESOLVE_SHARED);
1511         } else {
1512                 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1513         }
1514         return (parent);
1515 }
1516
1517 void
1518 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1519 {
1520         if (parent)
1521                 hammer2_chain_unlock(parent);
1522 }
1523
1524 static
1525 hammer2_chain_t *
1526 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1527 {
1528         hammer2_chain_t *oparent;
1529         hammer2_chain_t *nparent;
1530
1531         /*
1532          * Be careful of order, oparent must be unlocked before nparent
1533          * is locked below to avoid a deadlock.
1534          */
1535         oparent = *parentp;
1536         spin_lock(&oparent->core.cst.spin);
1537         nparent = oparent->parent;
1538         hammer2_chain_ref(nparent);
1539         spin_unlock(&oparent->core.cst.spin);
1540         if (oparent) {
1541                 hammer2_chain_unlock(oparent);
1542                 oparent = NULL;
1543         }
1544
1545         hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1546         *parentp = nparent;
1547
1548         return (nparent);
1549 }
1550
1551 /*
1552  * Locate the first chain whos key range overlaps (key_beg, key_end) inclusive.
1553  * (*parentp) typically points to an inode but can also point to a related
1554  * indirect block and this function will recurse upwards and find the inode
1555  * again.
1556  *
1557  * (*parentp) must be exclusively locked and referenced and can be an inode
1558  * or an existing indirect block within the inode.
1559  *
1560  * On return (*parentp) will be modified to point at the deepest parent chain
1561  * element encountered during the search, as a helper for an insertion or
1562  * deletion.   The new (*parentp) will be locked and referenced and the old
1563  * will be unlocked and dereferenced (no change if they are both the same).
1564  *
1565  * The matching chain will be returned exclusively locked.  If NOLOCK is
1566  * requested the chain will be returned only referenced.
1567  *
1568  * NULL is returned if no match was found, but (*parentp) will still
1569  * potentially be adjusted.
1570  *
1571  * On return (*key_nextp) will point to an iterative value for key_beg.
1572  * (If NULL is returned (*key_nextp) is set to key_end).
1573  *
1574  * This function will also recurse up the chain if the key is not within the
1575  * current parent's range.  (*parentp) can never be set to NULL.  An iteration
1576  * can simply allow (*parentp) to float inside the loop.
1577  *
1578  * NOTE!  chain->data is not always resolved.  By default it will not be
1579  *        resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF.  Use
1580  *        HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1581  *        BREF_TYPE_DATA as the device buffer can alias the logical file
1582  *        buffer).
1583  */
1584 hammer2_chain_t *
1585 hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp,
1586                      hammer2_key_t key_beg, hammer2_key_t key_end,
1587                      int *cache_indexp, int flags, int *ddflagp)
1588 {
1589         hammer2_mount_t *hmp;
1590         hammer2_chain_t *parent;
1591         hammer2_chain_t *chain;
1592         hammer2_blockref_t *base;
1593         hammer2_blockref_t *bref;
1594         hammer2_blockref_t bcopy;
1595         hammer2_key_t scan_beg;
1596         hammer2_key_t scan_end;
1597         int count = 0;
1598         int how_always = HAMMER2_RESOLVE_ALWAYS;
1599         int how_maybe = HAMMER2_RESOLVE_MAYBE;
1600         int how;
1601         int generation;
1602         int maxloops = 300000;
1603
1604         *ddflagp = 0;
1605         if (flags & HAMMER2_LOOKUP_ALWAYS) {
1606                 how_maybe = how_always;
1607                 how = HAMMER2_RESOLVE_ALWAYS;
1608         } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1609                 how = HAMMER2_RESOLVE_NEVER;
1610         } else {
1611                 how = HAMMER2_RESOLVE_MAYBE;
1612         }
1613         if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1614                 how_maybe |= HAMMER2_RESOLVE_SHARED;
1615                 how_always |= HAMMER2_RESOLVE_SHARED;
1616                 how |= HAMMER2_RESOLVE_SHARED;
1617         }
1618
1619         /*
1620          * Recurse (*parentp) upward if necessary until the parent completely
1621          * encloses the key range or we hit the inode.
1622          *
1623          * This function handles races against the flusher doing a delete-
1624          * duplicate above us and re-homes the parent to the duplicate in
1625          * that case, otherwise we'd wind up recursing down a stale chain.
1626          */
1627         parent = *parentp;
1628         hmp = parent->hmp;
1629
1630         while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1631                parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1632                 scan_beg = parent->bref.key;
1633                 scan_end = scan_beg +
1634                            ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1635                 if (key_beg >= scan_beg && key_end <= scan_end)
1636                         break;
1637                 parent = hammer2_chain_getparent(parentp, how_maybe);
1638         }
1639
1640 again:
1641         if (--maxloops == 0)
1642                 panic("hammer2_chain_lookup: maxloops");
1643         /*
1644          * Locate the blockref array.  Currently we do a fully associative
1645          * search through the array.
1646          */
1647         switch(parent->bref.type) {
1648         case HAMMER2_BREF_TYPE_INODE:
1649                 /*
1650                  * Special shortcut for embedded data returns the inode
1651                  * itself.  Callers must detect this condition and access
1652                  * the embedded data (the strategy code does this for us).
1653                  *
1654                  * This is only applicable to regular files and softlinks.
1655                  */
1656                 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1657                         if (flags & HAMMER2_LOOKUP_NOLOCK)
1658                                 hammer2_chain_ref(parent);
1659                         else
1660                                 hammer2_chain_lock(parent, how_always);
1661                         *key_nextp = key_end + 1;
1662                         *ddflagp = 1;
1663                         return (parent);
1664                 }
1665                 base = &parent->data->ipdata.u.blockset.blockref[0];
1666                 count = HAMMER2_SET_COUNT;
1667                 break;
1668         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1669         case HAMMER2_BREF_TYPE_INDIRECT:
1670                 /*
1671                  * Handle MATCHIND on the parent
1672                  */
1673                 if (flags & HAMMER2_LOOKUP_MATCHIND) {
1674                         scan_beg = parent->bref.key;
1675                         scan_end = scan_beg +
1676                                ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1677                         if (key_beg == scan_beg && key_end == scan_end) {
1678                                 chain = parent;
1679                                 hammer2_chain_lock(chain, how_maybe);
1680                                 *key_nextp = scan_end + 1;
1681                                 goto done;
1682                         }
1683                 }
1684                 /*
1685                  * Optimize indirect blocks in the INITIAL state to avoid
1686                  * I/O.
1687                  */
1688                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1689                         base = NULL;
1690                 } else {
1691                         if (parent->data == NULL)
1692                                 panic("parent->data is NULL");
1693                         base = &parent->data->npdata[0];
1694                 }
1695                 count = parent->bytes / sizeof(hammer2_blockref_t);
1696                 break;
1697         case HAMMER2_BREF_TYPE_VOLUME:
1698                 base = &hmp->voldata.sroot_blockset.blockref[0];
1699                 count = HAMMER2_SET_COUNT;
1700                 break;
1701         case HAMMER2_BREF_TYPE_FREEMAP:
1702                 base = &hmp->voldata.freemap_blockset.blockref[0];
1703                 count = HAMMER2_SET_COUNT;
1704                 break;
1705         default:
1706                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1707                       parent->bref.type);
1708                 base = NULL;    /* safety */
1709                 count = 0;      /* safety */
1710         }
1711
1712         /*
1713          * Merged scan to find next candidate.
1714          *
1715          * hammer2_base_*() functions require the parent->core.live_* fields
1716          * to be synchronized.
1717          *
1718          * We need to hold the spinlock to access the block array and RB tree
1719          * and to interlock chain creation.
1720          */
1721         if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
1722                 hammer2_chain_countbrefs(parent, base, count);
1723
1724         /*
1725          * Combined search
1726          */
1727         spin_lock(&parent->core.cst.spin);
1728         chain = hammer2_combined_find(parent, base, count,
1729                                       cache_indexp, key_nextp,
1730                                       key_beg, key_end,
1731                                       &bref);
1732         generation = parent->core.generation;
1733
1734         /*
1735          * Exhausted parent chain, iterate.
1736          */
1737         if (bref == NULL) {
1738                 spin_unlock(&parent->core.cst.spin);
1739                 if (key_beg == key_end) /* short cut single-key case */
1740                         return (NULL);
1741
1742                 /*
1743                  * Stop if we reached the end of the iteration.
1744                  */
1745                 if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1746                     parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1747                         return (NULL);
1748                 }
1749
1750                 /*
1751                  * Calculate next key, stop if we reached the end of the
1752                  * iteration, otherwise go up one level and loop.
1753                  */
1754                 key_beg = parent->bref.key +
1755                           ((hammer2_key_t)1 << parent->bref.keybits);
1756                 if (key_beg == 0 || key_beg > key_end)
1757                         return (NULL);
1758                 parent = hammer2_chain_getparent(parentp, how_maybe);
1759                 goto again;
1760         }
1761
1762         /*
1763          * Selected from blockref or in-memory chain.
1764          */
1765         if (chain == NULL) {
1766                 bcopy = *bref;
1767                 spin_unlock(&parent->core.cst.spin);
1768                 chain = hammer2_chain_get(parent, generation,
1769                                           &bcopy);
1770                 if (chain == NULL) {
1771                         kprintf("retry lookup parent %p keys %016jx:%016jx\n",
1772                                 parent, key_beg, key_end);
1773                         goto again;
1774                 }
1775                 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
1776                         hammer2_chain_drop(chain);
1777                         goto again;
1778                 }
1779         } else {
1780                 hammer2_chain_ref(chain);
1781                 spin_unlock(&parent->core.cst.spin);
1782         }
1783
1784         /*
1785          * chain is referenced but not locked.  We must lock the chain
1786          * to obtain definitive DUPLICATED/DELETED state
1787          */
1788         if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1789             chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1790                 hammer2_chain_lock(chain, how_maybe | HAMMER2_RESOLVE_NOREF);
1791         } else {
1792                 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1793         }
1794
1795         /*
1796          * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
1797          *
1798          * NOTE: Chain's key range is not relevant as there might be
1799          *       one-offs within the range that are not deleted.
1800          *
1801          * NOTE: Lookups can race delete-duplicate because
1802          *       delete-duplicate does not lock the parent's core
1803          *       (they just use the spinlock on the core).  We must
1804          *       check for races by comparing the DUPLICATED flag before
1805          *       releasing the spinlock with the flag after locking the
1806          *       chain.
1807          */
1808         if (chain->flags & HAMMER2_CHAIN_DELETED) {
1809                 hammer2_chain_unlock(chain);
1810                 key_beg = *key_nextp;
1811                 if (key_beg == 0 || key_beg > key_end)
1812                         return(NULL);
1813                 goto again;
1814         }
1815
1816         /*
1817          * If the chain element is an indirect block it becomes the new
1818          * parent and we loop on it.  We must maintain our top-down locks
1819          * to prevent the flusher from interfering (i.e. doing a
1820          * delete-duplicate and leaving us recursing down a deleted chain).
1821          *
1822          * The parent always has to be locked with at least RESOLVE_MAYBE
1823          * so we can access its data.  It might need a fixup if the caller
1824          * passed incompatible flags.  Be careful not to cause a deadlock
1825          * as a data-load requires an exclusive lock.
1826          *
1827          * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1828          * range is within the requested key range we return the indirect
1829          * block and do NOT loop.  This is usually only used to acquire
1830          * freemap nodes.
1831          */
1832         if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1833             chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1834                 hammer2_chain_unlock(parent);
1835                 *parentp = parent = chain;
1836                 goto again;
1837         }
1838 done:
1839         /*
1840          * All done, return the chain
1841          */
1842         return (chain);
1843 }
1844
1845 /*
1846  * After having issued a lookup we can iterate all matching keys.
1847  *
1848  * If chain is non-NULL we continue the iteration from just after it's index.
1849  *
1850  * If chain is NULL we assume the parent was exhausted and continue the
1851  * iteration at the next parent.
1852  *
1853  * parent must be locked on entry and remains locked throughout.  chain's
1854  * lock status must match flags.  Chain is always at least referenced.
1855  *
1856  * WARNING!  The MATCHIND flag does not apply to this function.
1857  */
1858 hammer2_chain_t *
1859 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1860                    hammer2_key_t *key_nextp,
1861                    hammer2_key_t key_beg, hammer2_key_t key_end,
1862                    int *cache_indexp, int flags)
1863 {
1864         hammer2_chain_t *parent;
1865         int how_maybe;
1866         int ddflag;
1867
1868         /*
1869          * Calculate locking flags for upward recursion.
1870          */
1871         how_maybe = HAMMER2_RESOLVE_MAYBE;
1872         if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1873                 how_maybe |= HAMMER2_RESOLVE_SHARED;
1874
1875         parent = *parentp;
1876
1877         /*
1878          * Calculate the next index and recalculate the parent if necessary.
1879          */
1880         if (chain) {
1881                 key_beg = chain->bref.key +
1882                           ((hammer2_key_t)1 << chain->bref.keybits);
1883                 if (flags & HAMMER2_LOOKUP_NOLOCK)
1884                         hammer2_chain_drop(chain);
1885                 else
1886                         hammer2_chain_unlock(chain);
1887
1888                 /*
1889                  * Any scan where the lookup returned degenerate data embedded
1890                  * in the inode has an invalid index and must terminate.
1891                  */
1892                 if (chain == parent)
1893                         return(NULL);
1894                 if (key_beg == 0 || key_beg > key_end)
1895                         return(NULL);
1896                 chain = NULL;
1897         } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1898                    parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1899                 /*
1900                  * We reached the end of the iteration.
1901                  */
1902                 return (NULL);
1903         } else {
1904                 /*
1905                  * Continue iteration with next parent unless the current
1906                  * parent covers the range.
1907                  */
1908                 key_beg = parent->bref.key +
1909                           ((hammer2_key_t)1 << parent->bref.keybits);
1910                 if (key_beg == 0 || key_beg > key_end)
1911                         return (NULL);
1912                 parent = hammer2_chain_getparent(parentp, how_maybe);
1913         }
1914
1915         /*
1916          * And execute
1917          */
1918         return (hammer2_chain_lookup(parentp, key_nextp,
1919                                      key_beg, key_end,
1920                                      cache_indexp, flags, &ddflag));
1921 }
1922
1923 /*
1924  * The raw scan function is similar to lookup/next but does not seek to a key.
1925  * Blockrefs are iterated via first_chain = (parent, NULL) and
1926  * next_chain = (parent, chain).
1927  *
1928  * The passed-in parent must be locked and its data resolved.  The returned
1929  * chain will be locked.  Pass chain == NULL to acquire the first sub-chain
1930  * under parent and then iterate with the passed-in chain (which this
1931  * function will unlock).
1932  */
1933 hammer2_chain_t *
1934 hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t *chain,
1935                    int *cache_indexp, int flags)
1936 {
1937         hammer2_mount_t *hmp;
1938         hammer2_blockref_t *base;
1939         hammer2_blockref_t *bref;
1940         hammer2_blockref_t bcopy;
1941         hammer2_key_t key;
1942         hammer2_key_t next_key;
1943         int count = 0;
1944         int how_always = HAMMER2_RESOLVE_ALWAYS;
1945         int how_maybe = HAMMER2_RESOLVE_MAYBE;
1946         int how;
1947         int generation;
1948         int maxloops = 300000;
1949
1950         hmp = parent->hmp;
1951
1952         /*
1953          * Scan flags borrowed from lookup
1954          */
1955         if (flags & HAMMER2_LOOKUP_ALWAYS) {
1956                 how_maybe = how_always;
1957                 how = HAMMER2_RESOLVE_ALWAYS;
1958         } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1959                 how = HAMMER2_RESOLVE_NEVER;
1960         } else {
1961                 how = HAMMER2_RESOLVE_MAYBE;
1962         }
1963         if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1964                 how_maybe |= HAMMER2_RESOLVE_SHARED;
1965                 how_always |= HAMMER2_RESOLVE_SHARED;
1966                 how |= HAMMER2_RESOLVE_SHARED;
1967         }
1968
1969         /*
1970          * Calculate key to locate first/next element, unlocking the previous
1971          * element as we go.  Be careful, the key calculation can overflow.
1972          */
1973         if (chain) {
1974                 key = chain->bref.key +
1975                       ((hammer2_key_t)1 << chain->bref.keybits);
1976                 hammer2_chain_unlock(chain);
1977                 chain = NULL;
1978                 if (key == 0)
1979                         goto done;
1980         } else {
1981                 key = 0;
1982         }
1983
1984 again:
1985         if (--maxloops == 0)
1986                 panic("hammer2_chain_scan: maxloops");
1987         /*
1988          * Locate the blockref array.  Currently we do a fully associative
1989          * search through the array.
1990          */
1991         switch(parent->bref.type) {
1992         case HAMMER2_BREF_TYPE_INODE:
1993                 /*
1994                  * An inode with embedded data has no sub-chains.
1995                  */
1996                 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
1997                         goto done;
1998                 base = &parent->data->ipdata.u.blockset.blockref[0];
1999                 count = HAMMER2_SET_COUNT;
2000                 break;
2001         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2002         case HAMMER2_BREF_TYPE_INDIRECT:
2003                 /*
2004                  * Optimize indirect blocks in the INITIAL state to avoid
2005                  * I/O.
2006                  */
2007                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2008                         base = NULL;
2009                 } else {
2010                         if (parent->data == NULL)
2011                                 panic("parent->data is NULL");
2012                         base = &parent->data->npdata[0];
2013                 }
2014                 count = parent->bytes / sizeof(hammer2_blockref_t);
2015                 break;
2016         case HAMMER2_BREF_TYPE_VOLUME:
2017                 base = &hmp->voldata.sroot_blockset.blockref[0];
2018                 count = HAMMER2_SET_COUNT;
2019                 break;
2020         case HAMMER2_BREF_TYPE_FREEMAP:
2021                 base = &hmp->voldata.freemap_blockset.blockref[0];
2022                 count = HAMMER2_SET_COUNT;
2023                 break;
2024         default:
2025                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
2026                       parent->bref.type);
2027                 base = NULL;    /* safety */
2028                 count = 0;      /* safety */
2029         }
2030
2031         /*
2032          * Merged scan to find next candidate.
2033          *
2034          * hammer2_base_*() functions require the parent->core.live_* fields
2035          * to be synchronized.
2036          *
2037          * We need to hold the spinlock to access the block array and RB tree
2038          * and to interlock chain creation.
2039          */
2040         if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2041                 hammer2_chain_countbrefs(parent, base, count);
2042
2043         next_key = 0;
2044         spin_lock(&parent->core.cst.spin);
2045         chain = hammer2_combined_find(parent, base, count,
2046                                       cache_indexp, &next_key,
2047                                       key, HAMMER2_KEY_MAX,
2048                                       &bref);
2049         generation = parent->core.generation;
2050
2051         /*
2052          * Exhausted parent chain, we're done.
2053          */
2054         if (bref == NULL) {
2055                 spin_unlock(&parent->core.cst.spin);
2056                 KKASSERT(chain == NULL);
2057                 goto done;
2058         }
2059
2060         /*
2061          * Selected from blockref or in-memory chain.
2062          */
2063         if (chain == NULL) {
2064                 bcopy = *bref;
2065                 spin_unlock(&parent->core.cst.spin);
2066                 chain = hammer2_chain_get(parent, generation, &bcopy);
2067                 if (chain == NULL) {
2068                         kprintf("retry scan parent %p keys %016jx\n",
2069                                 parent, key);
2070                         goto again;
2071                 }
2072                 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2073                         hammer2_chain_drop(chain);
2074                         chain = NULL;
2075                         goto again;
2076                 }
2077         } else {
2078                 hammer2_chain_ref(chain);
2079                 spin_unlock(&parent->core.cst.spin);
2080         }
2081
2082         /*
2083          * chain is referenced but not locked.  We must lock the chain
2084          * to obtain definitive DUPLICATED/DELETED state
2085          */
2086         hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
2087
2088         /*
2089          * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
2090          *
2091          * NOTE: chain's key range is not relevant as there might be
2092          *       one-offs within the range that are not deleted.
2093          *
2094          * NOTE: XXX this could create problems with scans used in
2095          *       situations other than mount-time recovery.
2096          *
2097          * NOTE: Lookups can race delete-duplicate because
2098          *       delete-duplicate does not lock the parent's core
2099          *       (they just use the spinlock on the core).  We must
2100          *       check for races by comparing the DUPLICATED flag before
2101          *       releasing the spinlock with the flag after locking the
2102          *       chain.
2103          */
2104         if (chain->flags & HAMMER2_CHAIN_DELETED) {
2105                 hammer2_chain_unlock(chain);
2106                 chain = NULL;
2107
2108                 key = next_key;
2109                 if (key == 0)
2110                         goto done;
2111                 goto again;
2112         }
2113
2114 done:
2115         /*
2116          * All done, return the chain or NULL
2117          */
2118         return (chain);
2119 }
2120
2121 /*
2122  * Create and return a new hammer2 system memory structure of the specified
2123  * key, type and size and insert it under (*parentp).  This is a full
2124  * insertion, based on the supplied key/keybits, and may involve creating
2125  * indirect blocks and moving other chains around via delete/duplicate.
2126  *
2127  * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (*parentp) TO THE INSERTION
2128  * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2129  * FULL.  This typically means that the caller is creating the chain after
2130  * doing a hammer2_chain_lookup().
2131  *
2132  * (*parentp) must be exclusive locked and may be replaced on return
2133  * depending on how much work the function had to do.
2134  *
2135  * (*chainp) usually starts out NULL and returns the newly created chain,
2136  * but if the caller desires the caller may allocate a disconnected chain
2137  * and pass it in instead.
2138  *
2139  * This function should NOT be used to insert INDIRECT blocks.  It is
2140  * typically used to create/insert inodes and data blocks.
2141  *
2142  * Caller must pass-in an exclusively locked parent the new chain is to
2143  * be inserted under, and optionally pass-in a disconnected, exclusively
2144  * locked chain to insert (else we create a new chain).  The function will
2145  * adjust (*parentp) as necessary, create or connect the chain, and
2146  * return an exclusively locked chain in *chainp.
2147  */
2148 int
2149 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2150                      hammer2_chain_t **chainp, hammer2_pfsmount_t *pmp,
2151                      hammer2_key_t key, int keybits, int type, size_t bytes)
2152 {
2153         hammer2_mount_t *hmp;
2154         hammer2_chain_t *chain;
2155         hammer2_chain_t *parent;
2156         hammer2_blockref_t *base;
2157         hammer2_blockref_t dummy;
2158         int allocated = 0;
2159         int error = 0;
2160         int count;
2161         int maxloops = 300000;
2162
2163         /*
2164          * Topology may be crossing a PFS boundary.
2165          */
2166         parent = *parentp;
2167         KKASSERT(ccms_thread_lock_owned(&parent->core.cst));
2168         hmp = parent->hmp;
2169         chain = *chainp;
2170
2171         if (chain == NULL) {
2172                 /*
2173                  * First allocate media space and construct the dummy bref,
2174                  * then allocate the in-memory chain structure.  Set the
2175                  * INITIAL flag for fresh chains which do not have embedded
2176                  * data.
2177                  */
2178                 bzero(&dummy, sizeof(dummy));
2179                 dummy.type = type;
2180                 dummy.key = key;
2181                 dummy.keybits = keybits;
2182                 dummy.data_off = hammer2_getradix(bytes);
2183                 dummy.methods = parent->bref.methods;
2184                 chain = hammer2_chain_alloc(hmp, pmp, trans, &dummy);
2185                 hammer2_chain_core_alloc(trans, chain);
2186
2187                 /*
2188                  * Lock the chain manually, chain_lock will load the chain
2189                  * which we do NOT want to do.  (note: chain->refs is set
2190                  * to 1 by chain_alloc() for us, but lockcnt is not).
2191                  */
2192                 chain->lockcnt = 1;
2193                 ccms_thread_lock(&chain->core.cst, CCMS_STATE_EXCLUSIVE);
2194                 allocated = 1;
2195
2196                 /*
2197                  * We do NOT set INITIAL here (yet).  INITIAL is only
2198                  * used for indirect blocks.
2199                  *
2200                  * Recalculate bytes to reflect the actual media block
2201                  * allocation.
2202                  */
2203                 bytes = (hammer2_off_t)1 <<
2204                         (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2205                 chain->bytes = bytes;
2206
2207                 switch(type) {
2208                 case HAMMER2_BREF_TYPE_VOLUME:
2209                 case HAMMER2_BREF_TYPE_FREEMAP:
2210                         panic("hammer2_chain_create: called with volume type");
2211                         break;
2212                 case HAMMER2_BREF_TYPE_INDIRECT:
2213                         panic("hammer2_chain_create: cannot be used to"
2214                               "create indirect block");
2215                         break;
2216                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2217                         panic("hammer2_chain_create: cannot be used to"
2218                               "create freemap root or node");
2219                         break;
2220                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2221                         KKASSERT(bytes == sizeof(chain->data->bmdata));
2222                         /* fall through */
2223                 case HAMMER2_BREF_TYPE_INODE:
2224                 case HAMMER2_BREF_TYPE_DATA:
2225                 default:
2226                         /*
2227                          * leave chain->data NULL, set INITIAL
2228                          */
2229                         KKASSERT(chain->data == NULL);
2230                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2231                         break;
2232                 }
2233         } else {
2234                 /*
2235                  * We are reattaching a previously deleted chain, possibly
2236                  * under a new parent and possibly with a new key/keybits.
2237                  * The chain does not have to be in a modified state.  The
2238                  * UPDATE flag will be set later on in this routine.
2239                  *
2240                  * Do NOT mess with the current state of the INITIAL flag.
2241                  */
2242                 chain->bref.key = key;
2243                 chain->bref.keybits = keybits;
2244                 if (chain->flags & HAMMER2_CHAIN_DELETED)
2245                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2246                 KKASSERT(chain->parent == NULL);
2247         }
2248
2249         /*
2250          * Calculate how many entries we have in the blockref array and
2251          * determine if an indirect block is required.
2252          */
2253 again:
2254         if (--maxloops == 0)
2255                 panic("hammer2_chain_create: maxloops");
2256
2257         switch(parent->bref.type) {
2258         case HAMMER2_BREF_TYPE_INODE:
2259                 KKASSERT((parent->data->ipdata.op_flags &
2260                           HAMMER2_OPFLAG_DIRECTDATA) == 0);
2261                 KKASSERT(parent->data != NULL);
2262                 base = &parent->data->ipdata.u.blockset.blockref[0];
2263                 count = HAMMER2_SET_COUNT;
2264                 break;
2265         case HAMMER2_BREF_TYPE_INDIRECT:
2266         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2267                 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2268                         base = NULL;
2269                 else
2270                         base = &parent->data->npdata[0];
2271                 count = parent->bytes / sizeof(hammer2_blockref_t);
2272                 break;
2273         case HAMMER2_BREF_TYPE_VOLUME:
2274                 KKASSERT(parent->data != NULL);
2275                 base = &hmp->voldata.sroot_blockset.blockref[0];
2276                 count = HAMMER2_SET_COUNT;
2277                 break;
2278         case HAMMER2_BREF_TYPE_FREEMAP:
2279                 KKASSERT(parent->data != NULL);
2280                 base = &hmp->voldata.freemap_blockset.blockref[0];
2281                 count = HAMMER2_SET_COUNT;
2282                 break;
2283         default:
2284                 panic("hammer2_chain_create: unrecognized blockref type: %d",
2285                       parent->bref.type);
2286                 base = NULL;
2287                 count = 0;
2288                 break;
2289         }
2290
2291         /*
2292          * Make sure we've counted the brefs
2293          */
2294         if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2295                 hammer2_chain_countbrefs(parent, base, count);
2296
2297         KKASSERT(parent->core.live_count >= 0 &&
2298                  parent->core.live_count <= count);
2299
2300         /*
2301          * If no free blockref could be found we must create an indirect
2302          * block and move a number of blockrefs into it.  With the parent
2303          * locked we can safely lock each child in order to delete+duplicate
2304          * it without causing a deadlock.
2305          *
2306          * This may return the new indirect block or the old parent depending
2307          * on where the key falls.  NULL is returned on error.
2308          */
2309         if (parent->core.live_count == count) {
2310                 hammer2_chain_t *nparent;
2311
2312                 nparent = hammer2_chain_create_indirect(trans, parent,
2313                                                         key, keybits,
2314                                                         type, &error);
2315                 if (nparent == NULL) {
2316                         if (allocated)
2317                                 hammer2_chain_drop(chain);
2318                         chain = NULL;
2319                         goto done;
2320                 }
2321                 if (parent != nparent) {
2322                         hammer2_chain_unlock(parent);
2323                         parent = *parentp = nparent;
2324                 }
2325                 goto again;
2326         }
2327
2328         /*
2329          * Link the chain into its parent.
2330          */
2331         if (chain->parent != NULL)
2332                 panic("hammer2: hammer2_chain_create: chain already connected");
2333         KKASSERT(chain->parent == NULL);
2334         hammer2_chain_insert(parent, chain,
2335                              HAMMER2_CHAIN_INSERT_SPIN |
2336                              HAMMER2_CHAIN_INSERT_LIVE,
2337                              0);
2338
2339         if (allocated) {
2340                 /*
2341                  * Mark the newly created chain modified.  This will cause
2342                  * UPDATE to be set.
2343                  *
2344                  * Device buffers are not instantiated for DATA elements
2345                  * as these are handled by logical buffers.
2346                  *
2347                  * Indirect and freemap node indirect blocks are handled
2348                  * by hammer2_chain_create_indirect() and not by this
2349                  * function.
2350                  *
2351                  * Data for all other bref types is expected to be
2352                  * instantiated (INODE, LEAF).
2353                  */
2354                 switch(chain->bref.type) {
2355                 case HAMMER2_BREF_TYPE_DATA:
2356                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2357                 case HAMMER2_BREF_TYPE_INODE:
2358                         hammer2_chain_modify(trans, chain,
2359                                              HAMMER2_MODIFY_OPTDATA);
2360                         break;
2361                 default:
2362                         /*
2363                          * Remaining types are not supported by this function.
2364                          * In particular, INDIRECT and LEAF_NODE types are
2365                          * handled by create_indirect().
2366                          */
2367                         panic("hammer2_chain_create: bad type: %d",
2368                               chain->bref.type);
2369                         /* NOT REACHED */
2370                         break;
2371                 }
2372         } else {
2373                 /*
2374                  * When reconnecting a chain we must set UPDATE and
2375                  * setflush so the flush recognizes that it must update
2376                  * the bref in the parent.
2377                  */
2378                 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
2379                         hammer2_chain_ref(chain);
2380                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
2381                 }
2382         }
2383
2384         /*
2385          * We must setflush(parent) to ensure that it recurses through to
2386          * chain.  setflush(chain) might not work because ONFLUSH is possibly
2387          * already set in the chain (so it won't recurse up to set it in the
2388          * parent).
2389          */
2390         hammer2_chain_setflush(trans, parent);
2391
2392 done:
2393         *chainp = chain;
2394
2395         return (error);
2396 }
2397
2398 /*
2399  * Move the chain from its old parent to a new parent.  The chain must have
2400  * already been deleted or already disconnected (or never associated) with
2401  * a parent.  The chain is reassociated with the new parent and the deleted
2402  * flag will be cleared (no longer deleted).  The chain's modification state
2403  * is not altered.
2404  *
2405  * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (parent) TO THE INSERTION
2406  * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2407  * FULL.  This typically means that the caller is creating the chain after
2408  * doing a hammer2_chain_lookup().
2409  *
2410  * A non-NULL bref is typically passed when key and keybits must be overridden.
2411  * Note that hammer2_cluster_duplicate() *ONLY* uses the key and keybits fields
2412  * from a passed-in bref and uses the old chain's bref for everything else.
2413  *
2414  * If (parent) is non-NULL then the new duplicated chain is inserted under
2415  * the parent.
2416  *
2417  * If (parent) is NULL then the newly duplicated chain is not inserted
2418  * anywhere, similar to if it had just been chain_alloc()'d (suitable for
2419  * passing into hammer2_chain_create() after this function returns).
2420  *
2421  * WARNING! This function calls create which means it can insert indirect
2422  *          blocks.  This can cause other unrelated chains in the parent to
2423  *          be moved to a newly inserted indirect block in addition to the
2424  *          specific chain.
2425  */
2426 void
2427 hammer2_chain_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
2428                      hammer2_chain_t **parentp, hammer2_chain_t *chain)
2429 {
2430         hammer2_mount_t *hmp;
2431         hammer2_chain_t *parent;
2432         size_t bytes;
2433
2434         /*
2435          * WARNING!  We should never resolve DATA to device buffers
2436          *           (XXX allow it if the caller did?), and since
2437          *           we currently do not have the logical buffer cache
2438          *           buffer in-hand to fix its cached physical offset
2439          *           we also force the modify code to not COW it. XXX
2440          */
2441         hmp = chain->hmp;
2442         KKASSERT(chain->parent == NULL);
2443
2444         /*
2445          * Now create a duplicate of the chain structure, associating
2446          * it with the same core, making it the same size, pointing it
2447          * to the same bref (the same media block).
2448          */
2449         if (bref == NULL)
2450                 bref = &chain->bref;
2451         bytes = (hammer2_off_t)1 <<
2452                 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2453
2454         /*
2455          * If parent is not NULL the duplicated chain will be entered under
2456          * the parent and the UPDATE bit set to tell flush to update
2457          * the blockref.
2458          *
2459          * We must setflush(parent) to ensure that it recurses through to
2460          * chain.  setflush(chain) might not work because ONFLUSH is possibly
2461          * already set in the chain (so it won't recurse up to set it in the
2462          * parent).
2463          *
2464          * Having both chains locked is extremely important for atomicy.
2465          */
2466         if (parentp && (parent = *parentp) != NULL) {
2467                 KKASSERT(ccms_thread_lock_owned(&parent->core.cst));
2468                 KKASSERT(parent->refs > 0);
2469
2470                 hammer2_chain_create(trans, parentp, &chain, chain->pmp,
2471                                      bref->key, bref->keybits, bref->type,
2472                                      chain->bytes);
2473                 KKASSERT(chain->flags & HAMMER2_CHAIN_UPDATE);
2474                 hammer2_chain_setflush(trans, *parentp);
2475         }
2476 }
2477
2478 /*
2479  * Helper function for deleting chains.
2480  *
2481  * The chain is removed from the live view (the RBTREE) as well as the parent's
2482  * blockmap.  Both chain and its parent must be locked.
2483  */
2484 static void
2485 _hammer2_chain_delete_helper(hammer2_trans_t *trans,
2486                              hammer2_chain_t *parent, hammer2_chain_t *chain)
2487 {
2488         hammer2_mount_t *hmp;
2489
2490         KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2491         hmp = chain->hmp;
2492
2493         if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
2494                 /*
2495                  * Chain is blockmapped, so there must be a parent.
2496                  * Atomically remove the chain from the parent and remove
2497                  * the blockmap entry.
2498                  */
2499                 hammer2_blockref_t *base;
2500                 int count;
2501
2502                 KKASSERT(parent != NULL);
2503                 KKASSERT((parent->flags & HAMMER2_CHAIN_INITIAL) == 0);
2504                 hammer2_chain_modify(trans, parent,
2505                                      HAMMER2_MODIFY_OPTDATA);
2506
2507                 /*
2508                  * Calculate blockmap pointer
2509                  */
2510                 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
2511                 spin_lock(&parent->core.cst.spin);
2512
2513                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2514                 atomic_add_int(&parent->core.live_count, -1);
2515                 ++parent->core.generation;
2516                 RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain);
2517                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2518                 --parent->core.chain_count;
2519                 chain->parent = NULL;
2520
2521                 switch(parent->bref.type) {
2522                 case HAMMER2_BREF_TYPE_INODE:
2523                         /*
2524                          * Access the inode's block array.  However, there
2525                          * is no block array if the inode is flagged
2526                          * DIRECTDATA.  The DIRECTDATA case typicaly only
2527                          * occurs when a hardlink has been shifted up the
2528                          * tree and the original inode gets replaced with
2529                          * an OBJTYPE_HARDLINK placeholding inode.
2530                          */
2531                         if (parent->data &&
2532                             (parent->data->ipdata.op_flags &
2533                              HAMMER2_OPFLAG_DIRECTDATA) == 0) {
2534                                 base =
2535                                    &parent->data->ipdata.u.blockset.blockref[0];
2536                         } else {
2537                                 base = NULL;
2538                         }
2539                         count = HAMMER2_SET_COUNT;
2540                         break;
2541                 case HAMMER2_BREF_TYPE_INDIRECT:
2542                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2543                         if (parent->data)
2544                                 base = &parent->data->npdata[0];
2545                         else
2546                                 base = NULL;
2547                         count = parent->bytes / sizeof(hammer2_blockref_t);
2548                         break;
2549                 case HAMMER2_BREF_TYPE_VOLUME:
2550                         base = &hmp->voldata.sroot_blockset.blockref[0];
2551                         count = HAMMER2_SET_COUNT;
2552                         break;
2553                 case HAMMER2_BREF_TYPE_FREEMAP:
2554                         base = &parent->data->npdata[0];
2555                         count = HAMMER2_SET_COUNT;
2556                         break;
2557                 default:
2558                         base = NULL;
2559                         count = 0;
2560                         panic("hammer2_flush_pass2: "
2561                               "unrecognized blockref type: %d",
2562                               parent->bref.type);
2563                 }
2564                 if (base) {
2565                         int cache_index = -1;
2566                         hammer2_base_delete(trans, parent, base, count,
2567                                             &cache_index, chain);
2568                 }
2569                 spin_unlock(&parent->core.cst.spin);
2570         } else if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
2571                 /*
2572                  * Chain is not blockmapped but a parent is present.
2573                  * Atomically remove the chain from the parent.  There is
2574                  * no blockmap entry to remove.
2575                  */
2576                 spin_lock(&parent->core.cst.spin);
2577                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2578                 atomic_add_int(&parent->core.live_count, -1);
2579                 ++parent->core.generation;
2580                 RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain);
2581                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2582                 --parent->core.chain_count;
2583                 chain->parent = NULL;
2584                 spin_unlock(&parent->core.cst.spin);
2585         } else {
2586                 /*
2587                  * Chain is not blockmapped and has no parent.  This
2588                  * is a degenerate case.
2589                  */
2590                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2591         }
2592 }
2593
2594 /*
2595  * Create an indirect block that covers one or more of the elements in the
2596  * current parent.  Either returns the existing parent with no locking or
2597  * ref changes or returns the new indirect block locked and referenced
2598  * and leaving the original parent lock/ref intact as well.
2599  *
2600  * If an error occurs, NULL is returned and *errorp is set to the error.
2601  *
2602  * The returned chain depends on where the specified key falls.
2603  *
2604  * The key/keybits for the indirect mode only needs to follow three rules:
2605  *
2606  * (1) That all elements underneath it fit within its key space and
2607  *
2608  * (2) That all elements outside it are outside its key space.
2609  *
2610  * (3) When creating the new indirect block any elements in the current
2611  *     parent that fit within the new indirect block's keyspace must be
2612  *     moved into the new indirect block.
2613  *
2614  * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2615  *     keyspace the the current parent, but lookup/iteration rules will
2616  *     ensure (and must ensure) that rule (2) for all parents leading up
2617  *     to the nearest inode or the root volume header is adhered to.  This
2618  *     is accomplished by always recursing through matching keyspaces in
2619  *     the hammer2_chain_lookup() and hammer2_chain_next() API.
2620  *
2621  * The current implementation calculates the current worst-case keyspace by
2622  * iterating the current parent and then divides it into two halves, choosing
2623  * whichever half has the most elements (not necessarily the half containing
2624  * the requested key).
2625  *
2626  * We can also opt to use the half with the least number of elements.  This
2627  * causes lower-numbered keys (aka logical file offsets) to recurse through
2628  * fewer indirect blocks and higher-numbered keys to recurse through more.
2629  * This also has the risk of not moving enough elements to the new indirect
2630  * block and being forced to create several indirect blocks before the element
2631  * can be inserted.
2632  *
2633  * Must be called with an exclusively locked parent.
2634  */
2635 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
2636                                 hammer2_key_t *keyp, int keybits,
2637                                 hammer2_blockref_t *base, int count);
2638 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
2639                                 hammer2_key_t *keyp, int keybits,
2640                                 hammer2_blockref_t *base, int count);
2641 static
2642 hammer2_chain_t *
2643 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2644                               hammer2_key_t create_key, int create_bits,
2645                               int for_type, int *errorp)
2646 {
2647         hammer2_mount_t *hmp;
2648         hammer2_blockref_t *base;
2649         hammer2_blockref_t *bref;
2650         hammer2_blockref_t bcopy;
2651         hammer2_chain_t *chain;
2652         hammer2_chain_t *ichain;
2653         hammer2_chain_t dummy;
2654         hammer2_key_t key = create_key;
2655         hammer2_key_t key_beg;
2656         hammer2_key_t key_end;
2657         hammer2_key_t key_next;
2658         int keybits = create_bits;
2659         int count;
2660         int nbytes;
2661         int cache_index;
2662         int loops;
2663         int reason;
2664         int generation;
2665         int maxloops = 300000;
2666
2667         /*
2668          * Calculate the base blockref pointer or NULL if the chain
2669          * is known to be empty.  We need to calculate the array count
2670          * for RB lookups either way.
2671          */
2672         hmp = parent->hmp;
2673         *errorp = 0;
2674         KKASSERT(ccms_thread_lock_owned(&parent->core.cst));
2675
2676         /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
2677         if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2678                 base = NULL;
2679
2680                 switch(parent->bref.type) {
2681                 case HAMMER2_BREF_TYPE_INODE:
2682                         count = HAMMER2_SET_COUNT;
2683                         break;
2684                 case HAMMER2_BREF_TYPE_INDIRECT:
2685                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2686                         count = parent->bytes / sizeof(hammer2_blockref_t);
2687                         break;
2688                 case HAMMER2_BREF_TYPE_VOLUME:
2689                         count = HAMMER2_SET_COUNT;
2690                         break;
2691                 case HAMMER2_BREF_TYPE_FREEMAP:
2692                         count = HAMMER2_SET_COUNT;
2693                         break;
2694                 default:
2695                         panic("hammer2_chain_create_indirect: "
2696                               "unrecognized blockref type: %d",
2697                               parent->bref.type);
2698                         count = 0;
2699                         break;
2700                 }
2701         } else {
2702                 switch(parent->bref.type) {
2703                 case HAMMER2_BREF_TYPE_INODE:
2704                         base = &parent->data->ipdata.u.blockset.blockref[0];
2705                         count = HAMMER2_SET_COUNT;
2706                         break;
2707                 case HAMMER2_BREF_TYPE_INDIRECT:
2708                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2709                         base = &parent->data->npdata[0];
2710                         count = parent->bytes / sizeof(hammer2_blockref_t);
2711                         break;
2712                 case HAMMER2_BREF_TYPE_VOLUME:
2713                         base = &hmp->voldata.sroot_blockset.blockref[0];
2714                         count = HAMMER2_SET_COUNT;
2715                         break;
2716                 case HAMMER2_BREF_TYPE_FREEMAP:
2717                         base = &hmp->voldata.freemap_blockset.blockref[0];
2718                         count = HAMMER2_SET_COUNT;
2719                         break;
2720                 default:
2721                         panic("hammer2_chain_create_indirect: "
2722                               "unrecognized blockref type: %d",
2723                               parent->bref.type);
2724                         count = 0;
2725                         break;
2726                 }
2727         }
2728
2729         /*
2730          * dummy used in later chain allocation (no longer used for lookups).
2731          */
2732         bzero(&dummy, sizeof(dummy));
2733
2734         /*
2735          * When creating an indirect block for a freemap node or leaf
2736          * the key/keybits must be fitted to static radix levels because
2737          * particular radix levels use particular reserved blocks in the
2738          * related zone.
2739          *
2740          * This routine calculates the key/radix of the indirect block
2741          * we need to create, and whether it is on the high-side or the
2742          * low-side.
2743          */
2744         if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2745             for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2746                 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
2747                                                        base, count);
2748         } else {
2749                 keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
2750                                                       base, count);
2751         }
2752
2753         /*
2754          * Normalize the key for the radix being represented, keeping the
2755          * high bits and throwing away the low bits.
2756          */
2757         key &= ~(((hammer2_key_t)1 << keybits) - 1);
2758
2759         /*
2760          * How big should our new indirect block be?  It has to be at least
2761          * as large as its parent.
2762          */
2763         if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2764                 nbytes = HAMMER2_IND_BYTES_MIN;
2765         else
2766                 nbytes = HAMMER2_IND_BYTES_MAX;
2767         if (nbytes < count * sizeof(hammer2_blockref_t))
2768                 nbytes = count * sizeof(hammer2_blockref_t);
2769
2770         /*
2771          * Ok, create our new indirect block
2772          */
2773         if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2774             for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2775                 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2776         } else {
2777                 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2778         }
2779         dummy.bref.key = key;
2780         dummy.bref.keybits = keybits;
2781         dummy.bref.data_off = hammer2_getradix(nbytes);
2782         dummy.bref.methods = parent->bref.methods;
2783
2784         ichain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy.bref);
2785         atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2786         hammer2_chain_core_alloc(trans, ichain);
2787         hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2788         hammer2_chain_drop(ichain);     /* excess ref from alloc */
2789
2790         /*
2791          * We have to mark it modified to allocate its block, but use
2792          * OPTDATA to allow it to remain in the INITIAL state.  Otherwise
2793          * it won't be acted upon by the flush code.
2794          */
2795         hammer2_chain_modify(trans, ichain, HAMMER2_MODIFY_OPTDATA);
2796
2797         /*
2798          * Iterate the original parent and move the matching brefs into
2799          * the new indirect block.
2800          *
2801          * XXX handle flushes.
2802          */
2803         key_beg = 0;
2804         key_end = HAMMER2_KEY_MAX;
2805         cache_index = 0;
2806         spin_lock(&parent->core.cst.spin);
2807         loops = 0;
2808         reason = 0;
2809
2810         for (;;) {
2811                 if (++loops > 100000) {
2812                     spin_unlock(&parent->core.cst.spin);
2813                     panic("excessive loops r=%d p=%p base/count %p:%d %016jx\n",
2814                           reason, parent, base, count, key_next);
2815                 }
2816
2817                 /*
2818                  * NOTE: spinlock stays intact, returned chain (if not NULL)
2819                  *       is not referenced or locked which means that we
2820                  *       cannot safely check its flagged / deletion status
2821                  *       until we lock it.
2822                  */
2823                 chain = hammer2_combined_find(parent, base, count,
2824                                               &cache_index, &key_next,
2825                                               key_beg, key_end,
2826                                               &bref);
2827                 generation = parent->core.generation;
2828                 if (bref == NULL)
2829                         break;
2830                 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
2831
2832                 /*
2833                  * Skip keys that are not within the key/radix of the new
2834                  * indirect block.  They stay in the parent.
2835                  */
2836                 if ((~(((hammer2_key_t)1 << keybits) - 1) &
2837                     (key ^ bref->key)) != 0) {
2838                         goto next_key_spinlocked;
2839                 }
2840
2841                 /*
2842                  * Load the new indirect block by acquiring the related
2843                  * chains (potentially from media as it might not be
2844                  * in-memory).  Then move it to the new parent (ichain)
2845                  * via DELETE-DUPLICATE.
2846                  *
2847                  * chain is referenced but not locked.  We must lock the
2848                  * chain to obtain definitive DUPLICATED/DELETED state
2849                  */
2850                 if (chain) {
2851                         /*
2852                          * Use chain already present in the RBTREE
2853                          */
2854                         hammer2_chain_ref(chain);
2855                         spin_unlock(&parent->core.cst.spin);
2856                         hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
2857                                                   HAMMER2_RESOLVE_NOREF);
2858                 } else {
2859                         /*
2860                          * Get chain for blockref element.  _get returns NULL
2861                          * on insertion race.
2862                          */
2863                         bcopy = *bref;
2864                         spin_unlock(&parent->core.cst.spin);
2865                         chain = hammer2_chain_get(parent, generation, &bcopy);
2866                         if (chain == NULL) {
2867                                 reason = 1;
2868                                 spin_lock(&parent->core.cst.spin);
2869                                 continue;
2870                         }
2871                         if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2872                                 kprintf("REASON 2\n");
2873                                 reason = 2;
2874                                 hammer2_chain_drop(chain);
2875                                 spin_lock(&parent->core.cst.spin);
2876                                 continue;
2877                         }
2878                         hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
2879                                                   HAMMER2_RESOLVE_NOREF);
2880                 }
2881
2882                 /*
2883                  * This is always live so if the chain has been deleted
2884                  * we raced someone and we have to retry.
2885                  *
2886                  * NOTE: Lookups can race delete-duplicate because
2887                  *       delete-duplicate does not lock the parent's core
2888                  *       (they just use the spinlock on the core).  We must
2889                  *       check for races by comparing the DUPLICATED flag before
2890                  *       releasing the spinlock with the flag after locking the
2891                  *       chain.
2892                  *
2893                  *       (note reversed logic for this one)
2894                  */
2895                 if (chain->flags & HAMMER2_CHAIN_DELETED) {
2896                         hammer2_chain_unlock(chain);
2897                         goto next_key;
2898                 }
2899
2900                 /*
2901                  * Shift the chain to the indirect block.
2902                  *
2903                  * WARNING! Can cause held-over chains to require a refactor.
2904                  *          Fortunately we have none (our locked chains are
2905                  *          passed into and modified by the call).
2906                  */
2907                 hammer2_chain_delete(trans, parent, chain, 0);
2908                 hammer2_chain_rename(trans, NULL, &ichain, chain);
2909                 hammer2_chain_unlock(chain);
2910                 KKASSERT(parent->refs > 0);
2911                 chain = NULL;
2912 next_key:
2913                 spin_lock(&parent->core.cst.spin);
2914 next_key_spinlocked:
2915                 if (--maxloops == 0)
2916                         panic("hammer2_chain_create_indirect: maxloops");
2917                 reason = 4;
2918                 if (key_next == 0 || key_next > key_end)
2919                         break;
2920                 key_beg = key_next;
2921                 /* loop */
2922         }
2923         spin_unlock(&parent->core.cst.spin);
2924
2925         /*
2926          * Insert the new indirect block into the parent now that we've
2927          * cleared out some entries in the parent.  We calculated a good
2928          * insertion index in the loop above (ichain->index).
2929          *
2930          * We don't have to set UPDATE here because we mark ichain
2931          * modified down below (so the normal modified -> flush -> set-moved
2932          * sequence applies).
2933          *
2934          * The insertion shouldn't race as this is a completely new block
2935          * and the parent is locked.
2936          */
2937         KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
2938         hammer2_chain_insert(parent, ichain,
2939                              HAMMER2_CHAIN_INSERT_SPIN |
2940                              HAMMER2_CHAIN_INSERT_LIVE,
2941                              0);
2942
2943         /*
2944          * Make sure flushes propogate after our manual insertion.
2945          */
2946         hammer2_chain_setflush(trans, ichain);
2947         hammer2_chain_setflush(trans, parent);
2948
2949         /*
2950          * Figure out what to return.
2951          */
2952         if (~(((hammer2_key_t)1 << keybits) - 1) &
2953                    (create_key ^ key)) {
2954                 /*
2955                  * Key being created is outside the key range,
2956                  * return the original parent.
2957                  */
2958                 hammer2_chain_unlock(ichain);
2959         } else {
2960                 /*
2961                  * Otherwise its in the range, return the new parent.
2962                  * (leave both the new and old parent locked).
2963                  */
2964                 parent = ichain;
2965         }
2966
2967         return(parent);
2968 }
2969
2970 /*
2971  * Calculate the keybits and highside/lowside of the freemap node the
2972  * caller is creating.
2973  *
2974  * This routine will specify the next higher-level freemap key/radix
2975  * representing the lowest-ordered set.  By doing so, eventually all
2976  * low-ordered sets will be moved one level down.
2977  *
2978  * We have to be careful here because the freemap reserves a limited
2979  * number of blocks for a limited number of levels.  So we can't just
2980  * push indiscriminately.
2981  */
2982 int
2983 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
2984                              int keybits, hammer2_blockref_t *base, int count)
2985 {
2986         hammer2_chain_t *chain;
2987         hammer2_blockref_t *bref;
2988         hammer2_key_t key;
2989         hammer2_key_t key_beg;
2990         hammer2_key_t key_end;
2991         hammer2_key_t key_next;
2992         int cache_index;
2993         int locount;
2994         int hicount;
2995         int maxloops = 300000;
2996
2997         key = *keyp;
2998         locount = 0;
2999         hicount = 0;
3000         keybits = 64;
3001
3002         /*
3003          * Calculate the range of keys in the array being careful to skip
3004          * slots which are overridden with a deletion.
3005          */
3006         key_beg = 0;
3007         key_end = HAMMER2_KEY_MAX;
3008         cache_index = 0;
3009         spin_lock(&parent->core.cst.spin);
3010
3011         for (;;) {
3012                 if (--maxloops == 0) {
3013                         panic("indkey_freemap shit %p %p:%d\n",
3014                               parent, base, count);
3015                 }
3016                 chain = hammer2_combined_find(parent, base, count,
3017                                               &cache_index, &key_next,
3018                                               key_beg, key_end,
3019                                               &bref);
3020
3021                 /*
3022                  * Exhausted search
3023                  */
3024                 if (bref == NULL)
3025                         break;
3026
3027                 /*
3028                  * Skip deleted chains.
3029                  */
3030                 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3031                         if (key_next == 0 || key_next > key_end)
3032                                 break;
3033                         key_beg = key_next;
3034                         continue;
3035                 }
3036
3037                 /*
3038                  * Use the full live (not deleted) element for the scan
3039                  * iteration.  HAMMER2 does not allow partial replacements.
3040                  *
3041                  * XXX should be built into hammer2_combined_find().
3042                  */
3043                 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3044
3045                 if (keybits > bref->keybits) {
3046                         key = bref->key;
3047                         keybits = bref->keybits;
3048                 } else if (keybits == bref->keybits && bref->key < key) {
3049                         key = bref->key;
3050                 }
3051                 if (key_next == 0)
3052                         break;
3053                 key_beg = key_next;
3054         }
3055         spin_unlock(&parent->core.cst.spin);
3056
3057         /*
3058          * Return the keybits for a higher-level FREEMAP_NODE covering
3059          * this node.
3060          */
3061         switch(keybits) {
3062         case HAMMER2_FREEMAP_LEVEL0_RADIX:
3063                 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3064                 break;
3065         case HAMMER2_FREEMAP_LEVEL1_RADIX:
3066                 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3067                 break;
3068         case HAMMER2_FREEMAP_LEVEL2_RADIX:
3069                 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3070                 break;
3071         case HAMMER2_FREEMAP_LEVEL3_RADIX:
3072                 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3073                 break;
3074         case HAMMER2_FREEMAP_LEVEL4_RADIX:
3075                 panic("hammer2_chain_indkey_freemap: level too high");
3076                 break;
3077         default:
3078                 panic("hammer2_chain_indkey_freemap: bad radix");
3079                 break;
3080         }
3081         *keyp = key;
3082
3083         return (keybits);
3084 }
3085
3086 /*
3087  * Calculate the keybits and highside/lowside of the indirect block the
3088  * caller is creating.
3089  */
3090 static int
3091 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3092                             int keybits, hammer2_blockref_t *base, int count)
3093 {
3094         hammer2_blockref_t *bref;
3095         hammer2_chain_t *chain;
3096         hammer2_key_t key_beg;
3097         hammer2_key_t key_end;
3098         hammer2_key_t key_next;
3099         hammer2_key_t key;
3100         int nkeybits;
3101         int locount;
3102         int hicount;
3103         int cache_index;
3104         int maxloops = 300000;
3105
3106         key = *keyp;
3107         locount = 0;
3108         hicount = 0;
3109
3110         /*
3111          * Calculate the range of keys in the array being careful to skip
3112          * slots which are overridden with a deletion.  Once the scan
3113          * completes we will cut the key range in half and shift half the
3114          * range into the new indirect block.
3115          */
3116         key_beg = 0;
3117         key_end = HAMMER2_KEY_MAX;
3118         cache_index = 0;
3119         spin_lock(&parent->core.cst.spin);
3120
3121         for (;;) {
3122                 if (--maxloops == 0) {
3123                         panic("indkey_freemap shit %p %p:%d\n",
3124                               parent, base, count);
3125                 }
3126                 chain = hammer2_combined_find(parent, base, count,
3127                                               &cache_index, &key_next,
3128                                               key_beg, key_end,
3129                                               &bref);
3130
3131                 /*
3132                  * Exhausted search
3133                  */
3134                 if (bref == NULL)
3135                         break;
3136
3137                 /*
3138                  * NOTE: No need to check DUPLICATED here because we do
3139                  *       not release the spinlock.
3140                  */
3141                 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3142                         if (key_next == 0 || key_next > key_end)
3143                                 break;
3144                         key_beg = key_next;
3145                         continue;
3146                 }
3147
3148                 /*
3149                  * Use the full live (not deleted) element for the scan
3150                  * iteration.  HAMMER2 does not allow partial replacements.
3151                  *
3152                  * XXX should be built into hammer2_combined_find().
3153                  */
3154                 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3155
3156                 /*
3157                  * Expand our calculated key range (key, keybits) to fit
3158                  * the scanned key.  nkeybits represents the full range
3159                  * that we will later cut in half (two halves @ nkeybits - 1).
3160                  */
3161                 nkeybits = keybits;
3162                 if (nkeybits < bref->keybits) {
3163                         if (bref->keybits > 64) {
3164                                 kprintf("bad bref chain %p bref %p\n",
3165                                         chain, bref);
3166                                 Debugger("fubar");
3167                         }
3168                         nkeybits = bref->keybits;
3169                 }
3170                 while (nkeybits < 64 &&
3171                        (~(((hammer2_key_t)1 << nkeybits) - 1) &
3172                         (key ^ bref->key)) != 0) {
3173                         ++nkeybits;
3174                 }
3175
3176                 /*
3177                  * If the new key range is larger we have to determine
3178                  * which side of the new key range the existing keys fall
3179                  * under by checking the high bit, then collapsing the
3180                  * locount into the hicount or vise-versa.
3181                  */
3182                 if (keybits != nkeybits) {
3183                         if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3184                                 hicount += locount;
3185                                 locount = 0;
3186                         } else {
3187                                 locount += hicount;
3188                                 hicount = 0;
3189                         }
3190                         keybits = nkeybits;
3191                 }
3192
3193                 /*
3194                  * The newly scanned key will be in the lower half or the
3195                  * upper half of the (new) key range.
3196                  */
3197                 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3198                         ++hicount;
3199                 else
3200                         ++locount;
3201
3202                 if (key_next == 0)
3203                         break;
3204                 key_beg = key_next;
3205         }
3206         spin_unlock(&parent->core.cst.spin);
3207         bref = NULL;    /* now invalid (safety) */
3208
3209         /*
3210          * Adjust keybits to represent half of the full range calculated
3211          * above (radix 63 max)
3212          */
3213         --keybits;
3214
3215         /*
3216          * Select whichever half contains the most elements.  Theoretically
3217          * we can select either side as long as it contains at least one
3218          * element (in order to ensure that a free slot is present to hold
3219          * the indirect block).
3220          */
3221         if (hammer2_indirect_optimize) {
3222                 /*
3223                  * Insert node for least number of keys, this will arrange
3224                  * the first few blocks of a large file or the first few
3225                  * inodes in a directory with fewer indirect blocks when
3226                  * created linearly.
3227                  */
3228                 if (hicount < locount && hicount != 0)
3229                         key |= (hammer2_key_t)1 << keybits;
3230                 else
3231                         key &= ~(hammer2_key_t)1 << keybits;
3232         } else {
3233                 /*
3234                  * Insert node for most number of keys, best for heavily
3235                  * fragmented files.
3236                  */
3237                 if (hicount > locount)
3238                         key |= (hammer2_key_t)1 << keybits;
3239                 else
3240                         key &= ~(hammer2_key_t)1 << keybits;
3241         }
3242         *keyp = key;
3243
3244         return (keybits);
3245 }
3246
3247 /*
3248  * Sets CHAIN_DELETED and remove the chain's blockref from the parent if
3249  * it exists.
3250  *
3251  * Both parent and chain must be locked exclusively.
3252  *
3253  * This function will modify the parent if the blockref requires removal
3254  * from the parent's block table.
3255  *
3256  * This function is NOT recursive.  Any entity already pushed into the
3257  * chain (such as an inode) may still need visibility into its contents,
3258  * as well as the ability to read and modify the contents.  For example,
3259  * for an unlinked file which is still open.
3260  */
3261 void
3262 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
3263                      hammer2_chain_t *chain, int flags)
3264 {
3265         KKASSERT(ccms_thread_lock_owned(&chain->core.cst));
3266
3267         /*
3268          * Nothing to do if already marked.
3269          *
3270          * We need the spinlock on the core whos RBTREE contains chain
3271          * to protect against races.
3272          */
3273         if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
3274                 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0 &&
3275                          chain->parent == parent);
3276                 _hammer2_chain_delete_helper(trans, parent, chain);
3277         }
3278
3279         if (flags & HAMMER2_DELETE_PERMANENT) {
3280                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
3281                 hammer2_flush(trans, chain);
3282         } else {
3283                 /* XXX might not be needed */
3284                 hammer2_chain_setflush(trans, chain);
3285         }
3286 }
3287
3288 /*
3289  * Returns the index of the nearest element in the blockref array >= elm.
3290  * Returns (count) if no element could be found.
3291  *
3292  * Sets *key_nextp to the next key for loop purposes but does not modify
3293  * it if the next key would be higher than the current value of *key_nextp.
3294  * Note that *key_nexp can overflow to 0, which should be tested by the
3295  * caller.
3296  *
3297  * (*cache_indexp) is a heuristic and can be any value without effecting
3298  * the result.
3299  *
3300  * WARNING!  Must be called with parent's spinlock held.  Spinlock remains
3301  *           held through the operation.
3302  */
3303 static int
3304 hammer2_base_find(hammer2_chain_t *parent,
3305                   hammer2_blockref_t *base, int count,
3306                   int *cache_indexp, hammer2_key_t *key_nextp,
3307                   hammer2_key_t key_beg, hammer2_key_t key_end)
3308 {
3309         hammer2_blockref_t *scan;
3310         hammer2_key_t scan_end;
3311         int i;
3312         int limit;
3313
3314         /*
3315          * Require the live chain's already have their core's counted
3316          * so we can optimize operations.
3317          */
3318         KKASSERT(parent->core.flags & HAMMER2_CORE_COUNTEDBREFS);
3319
3320         /*
3321          * Degenerate case
3322          */
3323         if (count == 0 || base == NULL)
3324                 return(count);
3325
3326         /*
3327          * Sequential optimization using *cache_indexp.  This is the most
3328          * likely scenario.
3329          *
3330          * We can avoid trailing empty entries on live chains, otherwise
3331          * we might have to check the whole block array.
3332          */
3333         i = *cache_indexp;
3334         cpu_ccfence();
3335         limit = parent->core.live_zero;
3336         if (i >= limit)
3337                 i = limit - 1;
3338         if (i < 0)
3339                 i = 0;
3340         KKASSERT(i < count);
3341
3342         /*
3343          * Search backwards
3344          */
3345         scan = &base[i];
3346         while (i > 0 && (scan->type == 0 || scan->key > key_beg)) {
3347                 --scan;
3348                 --i;
3349         }
3350         *cache_indexp = i;
3351
3352         /*
3353          * Search forwards, stop when we find a scan element which
3354          * encloses the key or until we know that there are no further
3355          * elements.
3356          */
3357         while (i < count) {
3358                 if (scan->type != 0) {
3359                         scan_end = scan->key +
3360                                    ((hammer2_key_t)1 << scan->keybits) - 1;
3361                         if (scan->key > key_beg || scan_end >= key_beg)
3362                                 break;
3363                 }
3364                 if (i >= limit)
3365                         return (count);
3366                 ++scan;
3367                 ++i;
3368         }
3369         if (i != count) {
3370                 *cache_indexp = i;
3371                 if (i >= limit) {
3372                         i = count;
3373                 } else {
3374                         scan_end = scan->key +
3375                                    ((hammer2_key_t)1 << scan->keybits);
3376                         if (scan_end && (*key_nextp > scan_end ||
3377                                          *key_nextp == 0)) {
3378                                 *key_nextp = scan_end;
3379                         }
3380                 }
3381         }
3382         return (i);
3383 }
3384
3385 /*
3386  * Do a combined search and return the next match either from the blockref
3387  * array or from the in-memory chain.  Sets *bresp to the returned bref in
3388  * both cases, or sets it to NULL if the search exhausted.  Only returns
3389  * a non-NULL chain if the search matched from the in-memory chain.
3390  *
3391  * When no in-memory chain has been found and a non-NULL bref is returned
3392  * in *bresp.
3393  *
3394  *
3395  * The returned chain is not locked or referenced.  Use the returned bref
3396  * to determine if the search exhausted or not.  Iterate if the base find
3397  * is chosen but matches a deleted chain.
3398  *
3399  * WARNING!  Must be called with parent's spinlock held.  Spinlock remains
3400  *           held through the operation.
3401  */
3402 static hammer2_chain_t *
3403 hammer2_combined_find(hammer2_chain_t *parent,
3404                       hammer2_blockref_t *base, int count,
3405                       int *cache_indexp, hammer2_key_t *key_nextp,
3406                       hammer2_key_t key_beg, hammer2_key_t key_end,
3407                       hammer2_blockref_t **bresp)
3408 {
3409         hammer2_blockref_t *bref;
3410         hammer2_chain_t *chain;
3411         int i;
3412
3413         /*
3414          * Lookup in block array and in rbtree.
3415          */
3416         *key_nextp = key_end + 1;
3417         i = hammer2_base_find(parent, base, count, cache_indexp,
3418                               key_nextp, key_beg, key_end);
3419         chain = hammer2_chain_find(parent, key_nextp, key_beg, key_end);
3420
3421         /*
3422          * Neither matched
3423          */
3424         if (i == count && chain == NULL) {
3425                 *bresp = NULL;
3426                 return(NULL);
3427         }
3428
3429         /*
3430          * Only chain matched.
3431          */
3432         if (i == count) {
3433                 bref = &chain->bref;
3434                 goto found;
3435         }
3436
3437         /*
3438          * Only blockref matched.
3439          */
3440         if (chain == NULL) {
3441                 bref = &base[i];
3442                 goto found;
3443         }
3444
3445         /*
3446          * Both in-memory and blockref matched, select the nearer element.
3447          *
3448          * If both are flush with the left-hand side or both are the
3449          * same distance away, select the chain.  In this situation the
3450          * chain must have been loaded from the matching blockmap.
3451          */
3452         if ((chain->bref.key <= key_beg && base[i].key <= key_beg) ||
3453             chain->bref.key == base[i].key) {
3454                 KKASSERT(chain->bref.key == base[i].key);
3455                 bref = &chain->bref;
3456                 goto found;
3457         }
3458
3459         /*
3460          * Select the nearer key
3461          */
3462         if (chain->bref.key < base[i].key) {
3463                 bref = &chain->bref;
3464         } else {
3465                 bref = &base[i];
3466                 chain = NULL;
3467         }
3468
3469         /*
3470          * If the bref is out of bounds we've exhausted our search.
3471          */
3472 found:
3473         if (bref->key > key_end) {
3474                 *bresp = NULL;
3475                 chain = NULL;
3476         } else {
3477                 *bresp = bref;
3478         }
3479         return(chain);
3480 }
3481
3482 /*
3483  * Locate the specified block array element and delete it.  The element
3484  * must exist.
3485  *
3486  * The spin lock on the related chain must be held.
3487  *
3488  * NOTE: live_count was adjusted when the chain was deleted, so it does not
3489  *       need to be adjusted when we commit the media change.
3490  */
3491 void
3492 hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
3493                     hammer2_blockref_t *base, int count,
3494                     int *cache_indexp, hammer2_chain_t *chain)
3495 {
3496         hammer2_blockref_t *elm = &chain->bref;
3497         hammer2_key_t key_next;
3498         int i;
3499
3500         /*
3501          * Delete element.  Expect the element to exist.
3502          *
3503          * XXX see caller, flush code not yet sophisticated enough to prevent
3504          *     re-flushed in some cases.
3505          */
3506         key_next = 0; /* max range */
3507         i = hammer2_base_find(parent, base, count, cache_indexp,
3508                               &key_next, elm->key, elm->key);
3509         if (i == count || base[i].type == 0 ||
3510             base[i].key != elm->key ||
3511             ((chain->flags & HAMMER2_CHAIN_BMAPUPD) == 0 &&
3512              base[i].keybits != elm->keybits)) {
3513                 spin_unlock(&parent->core.cst.spin);
3514                 panic("delete base %p element not found at %d/%d elm %p\n",
3515                       base, i, count, elm);
3516                 return;
3517         }
3518         bzero(&base[i], sizeof(*base));
3519
3520         /*
3521          * We can only optimize parent->core.live_zero for live chains.
3522          */
3523         if (parent->core.live_zero == i + 1) {
3524                 while (--i >= 0 && base[i].type == 0)
3525                         ;
3526                 parent->core.live_zero = i + 1;
3527         }
3528
3529         /*
3530          * Clear appropriate blockmap flags in chain.
3531          */
3532         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
3533                                         HAMMER2_CHAIN_BMAPUPD);
3534 }
3535
3536 /*
3537  * Insert the specified element.  The block array must not already have the
3538  * element and must have space available for the insertion.
3539  *
3540  * The spin lock on the related chain must be held.
3541  *
3542  * NOTE: live_count was adjusted when the chain was deleted, so it does not
3543  *       need to be adjusted when we commit the media change.
3544  */
3545 void
3546 hammer2_base_insert(hammer2_trans_t *trans __unused, hammer2_chain_t *parent,
3547                     hammer2_blockref_t *base, int count,
3548                     int *cache_indexp, hammer2_chain_t *chain)
3549 {
3550         hammer2_blockref_t *elm = &chain->bref;
3551         hammer2_key_t key_next;
3552         hammer2_key_t xkey;
3553         int i;
3554         int j;
3555         int k;
3556         int l;
3557         int u = 1;
3558
3559         /*
3560          * Insert new element.  Expect the element to not already exist
3561          * unless we are replacing it.
3562          *
3563          * XXX see caller, flush code not yet sophisticated enough to prevent
3564          *     re-flushed in some cases.
3565          */
3566         key_next = 0; /* max range */
3567         i = hammer2_base_find(parent, base, count, cache_indexp,
3568                               &key_next, elm->key, elm->key);
3569
3570         /*
3571          * Shortcut fill optimization, typical ordered insertion(s) may not
3572          * require a search.
3573          */
3574         KKASSERT(i >= 0 && i <= count);
3575
3576         /*
3577          * Set appropriate blockmap flags in chain.
3578          */
3579         atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPPED);
3580
3581         /*
3582          * We can only optimize parent->core.live_zero for live chains.
3583          */
3584         if (i == count && parent->core.live_zero < count) {
3585                 i = parent->core.live_zero++;
3586                 base[i] = *elm;
3587                 return;
3588         }
3589
3590         xkey = elm->key + ((hammer2_key_t)1 << elm->keybits) - 1;
3591         if (i != count && (base[i].key < elm->key || xkey >= base[i].key)) {
3592                 spin_unlock(&parent->core.cst.spin);
3593                 panic("insert base %p overlapping elements at %d elm %p\n",
3594                       base, i, elm);
3595         }
3596
3597         /*
3598          * Try to find an empty slot before or after.
3599          */
3600         j = i;
3601         k = i;
3602         while (j > 0 || k < count) {
3603                 --j;
3604                 if (j >= 0 && base[j].type == 0) {
3605                         if (j == i - 1) {
3606                                 base[j] = *elm;
3607                         } else {
3608                                 bcopy(&base[j+1], &base[j],
3609                                       (i - j - 1) * sizeof(*base));
3610                                 base[i - 1] = *elm;
3611                         }
3612                         goto validate;
3613                 }
3614                 ++k;
3615                 if (k < count && base[k].type == 0) {
3616                         bcopy(&base[i], &base[i+1],
3617                               (k - i) * sizeof(hammer2_blockref_t));
3618                         base[i] = *elm;
3619
3620                         /*
3621                          * We can only update parent->core.live_zero for live
3622                          * chains.
3623                          */
3624                         if (parent->core.live_zero <= k)
3625                                 parent->core.live_zero = k + 1;
3626                         u = 2;
3627                         goto validate;
3628                 }
3629         }
3630         panic("hammer2_base_insert: no room!");
3631
3632         /*
3633          * Debugging
3634          */
3635 validate:
3636         key_next = 0;
3637         for (l = 0; l < count; ++l) {
3638                 if (base[l].type) {
3639                         key_next = base[l].key +
3640                                    ((hammer2_key_t)1 << base[l].keybits) - 1;
3641                         break;
3642                 }
3643         }
3644         while (++l < count) {
3645                 if (base[l].type) {
3646                         if (base[l].key <= key_next)
3647                                 panic("base_insert %d %d,%d,%d fail %p:%d", u, i, j, k, base, l);
3648                         key_next = base[l].key +
3649                                    ((hammer2_key_t)1 << base[l].keybits) - 1;
3650
3651                 }
3652         }
3653
3654 }
3655
3656 #if 0
3657
3658 /*
3659  * Sort the blockref array for the chain.  Used by the flush code to
3660  * sort the blockref[] array.
3661  *
3662  * The chain must be exclusively locked AND spin-locked.
3663  */
3664 typedef hammer2_blockref_t *hammer2_blockref_p;
3665
3666 static
3667 int
3668 hammer2_base_sort_callback(const void *v1, const void *v2)
3669 {
3670         hammer2_blockref_p bref1 = *(const hammer2_blockref_p *)v1;
3671         hammer2_blockref_p bref2 = *(const hammer2_blockref_p *)v2;
3672
3673         /*
3674          * Make sure empty elements are placed at the end of the array
3675          */
3676         if (bref1->type == 0) {
3677                 if (bref2->type == 0)
3678                         return(0);
3679                 return(1);
3680         } else if (bref2->type == 0) {
3681                 return(-1);
3682         }
3683
3684         /*
3685          * Sort by key
3686          */
3687         if (bref1->key < bref2->key)
3688                 return(-1);
3689         if (bref1->key > bref2->key)
3690                 return(1);
3691         return(0);
3692 }
3693
3694 void
3695 hammer2_base_sort(hammer2_chain_t *chain)
3696 {
3697         hammer2_blockref_t *base;
3698         int count;
3699
3700         switch(chain->bref.type) {
3701         case HAMMER2_BREF_TYPE_INODE:
3702                 /*
3703                  * Special shortcut for embedded data returns the inode
3704                  * itself.  Callers must detect this condition and access
3705                  * the embedded data (the strategy code does this for us).
3706                  *
3707                  * This is only applicable to regular files and softlinks.
3708                  */
3709                 if (chain->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
3710                         return;
3711                 base = &chain->data->ipdata.u.blockset.blockref[0];
3712                 count = HAMMER2_SET_COUNT;
3713                 break;
3714         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3715         case HAMMER2_BREF_TYPE_INDIRECT:
3716                 /*
3717                  * Optimize indirect blocks in the INITIAL state to avoid
3718                  * I/O.
3719                  */
3720                 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) == 0);
3721                 base = &chain->data->npdata[0];
3722                 count = chain->bytes / sizeof(hammer2_blockref_t);
3723                 break;
3724         case HAMMER2_BREF_TYPE_VOLUME:
3725                 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
3726                 count = HAMMER2_SET_COUNT;
3727                 break;
3728         case HAMMER2_BREF_TYPE_FREEMAP:
3729                 base = &chain->hmp->voldata.freemap_blockset.blockref[0];
3730                 count = HAMMER2_SET_COUNT;
3731                 break;
3732         default:
3733                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
3734                       chain->bref.type);
3735                 base = NULL;    /* safety */
3736                 count = 0;      /* safety */
3737         }
3738         kqsort(base, count, sizeof(*base), hammer2_base_sort_callback);
3739 }
3740
3741 #endif
3742
3743 /*
3744  * Chain memory management
3745  */
3746 void
3747 hammer2_chain_wait(hammer2_chain_t *chain)
3748 {
3749         tsleep(chain, 0, "chnflw", 1);
3750 }