40c2e01c79501a4f0385b10663fbd8882027dcff
[dragonfly.git] / sys / vfs / hammer2 / hammer2_chain.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * and Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 /*
36  * This subsystem implements most of the core support functions for
37  * the hammer2_chain structure.
38  *
39  * Chains are the in-memory version on media objects (volume header, inodes,
40  * indirect blocks, data blocks, etc).  Chains represent a portion of the
41  * HAMMER2 topology.
42  *
43  * Chains are no-longer delete-duplicated.  Instead, the original in-memory
44  * chain will be moved along with its block reference (e.g. for things like
45  * renames, hardlink operations, modifications, etc), and will be indexed
46  * on a secondary list for flush handling instead of propagating a flag
47  * upward to the root.
48  *
49  * Concurrent front-end operations can still run against backend flushes
50  * as long as they do not cross the current flush boundary.  An operation
51  * running above the current flush (in areas not yet flushed) can become
52  * part of the current flush while ano peration running below the current
53  * flush can become part of the next flush.
54  */
55 #include <sys/cdefs.h>
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/types.h>
59 #include <sys/lock.h>
60 #include <sys/kern_syscall.h>
61 #include <sys/uuid.h>
62
63 #include "hammer2.h"
64
65 static int hammer2_indirect_optimize;   /* XXX SYSCTL */
66
67 static hammer2_chain_t *hammer2_chain_create_indirect(
68                 hammer2_trans_t *trans, hammer2_chain_t *parent,
69                 hammer2_key_t key, int keybits, int for_type, int *errorp);
70 static void hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop);
71 static hammer2_chain_t *hammer2_combined_find(
72                 hammer2_chain_t *parent,
73                 hammer2_blockref_t *base, int count,
74                 int *cache_indexp, hammer2_key_t *key_nextp,
75                 hammer2_key_t key_beg, hammer2_key_t key_end,
76                 hammer2_blockref_t **bresp);
77
78 /*
79  * Basic RBTree for chains (core->rbtree and core->dbtree).  Chains cannot
80  * overlap in the RB trees.  Deleted chains are moved from rbtree to either
81  * dbtree or to dbq.
82  *
83  * Chains in delete-duplicate sequences can always iterate through core_entry
84  * to locate the live version of the chain.
85  */
86 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
87
88 int
89 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
90 {
91         hammer2_key_t c1_beg;
92         hammer2_key_t c1_end;
93         hammer2_key_t c2_beg;
94         hammer2_key_t c2_end;
95
96         /*
97          * Compare chains.  Overlaps are not supposed to happen and catch
98          * any software issues early we count overlaps as a match.
99          */
100         c1_beg = chain1->bref.key;
101         c1_end = c1_beg + ((hammer2_key_t)1 << chain1->bref.keybits) - 1;
102         c2_beg = chain2->bref.key;
103         c2_end = c2_beg + ((hammer2_key_t)1 << chain2->bref.keybits) - 1;
104
105         if (c1_end < c2_beg)    /* fully to the left */
106                 return(-1);
107         if (c1_beg > c2_end)    /* fully to the right */
108                 return(1);
109         return(0);              /* overlap (must not cross edge boundary) */
110 }
111
112 static __inline
113 int
114 hammer2_isclusterable(hammer2_chain_t *chain)
115 {
116         if (hammer2_cluster_enable) {
117                 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
118                     chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
119                     chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
120                         return(1);
121                 }
122         }
123         return(0);
124 }
125
126 /*
127  * Make a chain visible to the flusher.  The flusher needs to be able to
128  * do flushes of a subdirectory chains or single files so it does a top-down
129  * recursion using the ONFLUSH flag for the recursion.  It locates MODIFIED
130  * or UPDATE chains and flushes back up the chain to the root.
131  */
132 void
133 hammer2_chain_setflush(hammer2_trans_t *trans, hammer2_chain_t *chain)
134 {
135         hammer2_chain_t *parent;
136
137         if ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) {
138                 spin_lock(&chain->core.cst.spin);
139                 while ((chain->flags & HAMMER2_CHAIN_ONFLUSH) == 0) {
140                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONFLUSH);
141                         if ((parent = chain->parent) == NULL)
142                                 break;
143                         spin_lock(&parent->core.cst.spin);
144                         spin_unlock(&chain->core.cst.spin);
145                         chain = parent;
146                 }
147                 spin_unlock(&chain->core.cst.spin);
148         }
149 }
150
151 /*
152  * Allocate a new disconnected chain element representing the specified
153  * bref.  chain->refs is set to 1 and the passed bref is copied to
154  * chain->bref.  chain->bytes is derived from the bref.
155  *
156  * chain->core is NOT allocated and the media data and bp pointers are left
157  * NULL.  The caller must call chain_core_alloc() to allocate or associate
158  * a core with the chain.
159  *
160  * chain->pmp inherits pmp unless the chain is an inode (other than the
161  * super-root inode).
162  *
163  * NOTE: Returns a referenced but unlocked (because there is no core) chain.
164  */
165 hammer2_chain_t *
166 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_pfsmount_t *pmp,
167                     hammer2_trans_t *trans, hammer2_blockref_t *bref)
168 {
169         hammer2_chain_t *chain;
170         u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
171
172         /*
173          * Construct the appropriate system structure.
174          */
175         switch(bref->type) {
176         case HAMMER2_BREF_TYPE_INODE:
177         case HAMMER2_BREF_TYPE_INDIRECT:
178         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
179         case HAMMER2_BREF_TYPE_DATA:
180         case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
181                 /*
182                  * Chain's are really only associated with the hmp but we
183                  * maintain a pmp association for per-mount memory tracking
184                  * purposes.  The pmp can be NULL.
185                  */
186                 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
187                 break;
188         case HAMMER2_BREF_TYPE_VOLUME:
189         case HAMMER2_BREF_TYPE_FREEMAP:
190                 chain = NULL;
191                 panic("hammer2_chain_alloc volume type illegal for op");
192         default:
193                 chain = NULL;
194                 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
195                       bref->type);
196         }
197
198         /*
199          * Initialize the new chain structure.
200          */
201         chain->pmp = pmp;
202         chain->hmp = hmp;
203         chain->bref = *bref;
204         chain->bytes = bytes;
205         chain->refs = 1;
206         chain->flags = HAMMER2_CHAIN_ALLOCATED;
207
208         /*
209          * Set the PFS boundary flag if this chain represents a PFS root.
210          */
211         if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT)
212                 chain->flags |= HAMMER2_CHAIN_PFSBOUNDARY;
213
214         return (chain);
215 }
216
217 /*
218  * Associate an existing core with the chain or allocate a new core.
219  *
220  * The core is not locked.  No additional refs on the chain are made.
221  * (trans) must not be NULL if (core) is not NULL.
222  *
223  * When chains are delete-duplicated during flushes we insert nchain on
224  * the ownerq after ochain instead of at the end in order to give the
225  * drop code visibility in the correct order, otherwise drops can be missed.
226  */
227 void
228 hammer2_chain_core_alloc(hammer2_trans_t *trans, hammer2_chain_t *chain)
229 {
230         hammer2_chain_core_t *core = &chain->core;
231
232         /*
233          * Fresh core under nchain (no multi-homing of ochain's
234          * sub-tree).
235          */
236         RB_INIT(&core->rbtree); /* live chains */
237         ccms_cst_init(&core->cst, chain);
238 }
239
240 /*
241  * Add a reference to a chain element, preventing its destruction.
242  *
243  * (can be called with spinlock held)
244  */
245 void
246 hammer2_chain_ref(hammer2_chain_t *chain)
247 {
248         atomic_add_int(&chain->refs, 1);
249 }
250
251 /*
252  * Insert the chain in the core rbtree.
253  *
254  * Normal insertions are placed in the live rbtree.  Insertion of a deleted
255  * chain is a special case used by the flush code that is placed on the
256  * unstaged deleted list to avoid confusing the live view.
257  */
258 #define HAMMER2_CHAIN_INSERT_SPIN       0x0001
259 #define HAMMER2_CHAIN_INSERT_LIVE       0x0002
260 #define HAMMER2_CHAIN_INSERT_RACE       0x0004
261
262 static
263 int
264 hammer2_chain_insert(hammer2_chain_t *parent, hammer2_chain_t *chain,
265                      int flags, int generation)
266 {
267         hammer2_chain_t *xchain;
268         int error = 0;
269
270         if (flags & HAMMER2_CHAIN_INSERT_SPIN)
271                 spin_lock(&parent->core.cst.spin);
272
273         /*
274          * Interlocked by spinlock, check for race
275          */
276         if ((flags & HAMMER2_CHAIN_INSERT_RACE) &&
277             parent->core.generation != generation) {
278                 error = EAGAIN;
279                 goto failed;
280         }
281
282         /*
283          * Insert chain
284          */
285         xchain = RB_INSERT(hammer2_chain_tree, &parent->core.rbtree, chain);
286         KASSERT(xchain == NULL,
287                 ("hammer2_chain_insert: collision %p %p", chain, xchain));
288         atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
289         chain->parent = parent;
290         ++parent->core.chain_count;
291         ++parent->core.generation;      /* XXX incs for _get() too, XXX */
292
293         /*
294          * We have to keep track of the effective live-view blockref count
295          * so the create code knows when to push an indirect block.
296          */
297         if (flags & HAMMER2_CHAIN_INSERT_LIVE)
298                 atomic_add_int(&parent->core.live_count, 1);
299 failed:
300         if (flags & HAMMER2_CHAIN_INSERT_SPIN)
301                 spin_unlock(&parent->core.cst.spin);
302         return error;
303 }
304
305 /*
306  * Drop the caller's reference to the chain.  When the ref count drops to
307  * zero this function will try to disassociate the chain from its parent and
308  * deallocate it, then recursely drop the parent using the implied ref
309  * from the chain's chain->parent.
310  */
311 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
312
313 void
314 hammer2_chain_drop(hammer2_chain_t *chain)
315 {
316         u_int refs;
317         u_int need = 0;
318
319         if (hammer2_debug & 0x200000)
320                 Debugger("drop");
321
322         if (chain->flags & HAMMER2_CHAIN_UPDATE)
323                 ++need;
324         if (chain->flags & HAMMER2_CHAIN_MODIFIED)
325                 ++need;
326         KKASSERT(chain->refs > need);
327
328         while (chain) {
329                 refs = chain->refs;
330                 cpu_ccfence();
331                 KKASSERT(refs > 0);
332
333                 if (refs == 1) {
334                         chain = hammer2_chain_lastdrop(chain);
335                 } else {
336                         if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
337                                 break;
338                         /* retry the same chain */
339                 }
340         }
341 }
342
343 /*
344  * Safe handling of the 1->0 transition on chain.  Returns a chain for
345  * recursive drop or NULL, possibly returning the same chain if the atomic
346  * op fails.
347  *
348  * Whem two chains need to be recursively dropped we use the chain
349  * we would otherwise free to placehold the additional chain.  It's a bit
350  * convoluted but we can't just recurse without potentially blowing out
351  * the kernel stack.
352  *
353  * The chain cannot be freed if it has a non-empty core (children) or
354  * it is not at the head of ownerq.
355  *
356  * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
357  */
358 static
359 hammer2_chain_t *
360 hammer2_chain_lastdrop(hammer2_chain_t *chain)
361 {
362         hammer2_pfsmount_t *pmp;
363         hammer2_mount_t *hmp;
364         hammer2_chain_t *parent;
365         hammer2_chain_t *rdrop;
366
367         /*
368          * Spinlock the core and check to see if it is empty.  If it is
369          * not empty we leave chain intact with refs == 0.  The elements
370          * in core->rbtree are associated with other chains contemporary
371          * with ours but not with our chain directly.
372          */
373         spin_lock(&chain->core.cst.spin);
374
375         /*
376          * We can't free non-stale chains with children until we are
377          * able to free the children because there might be a flush
378          * dependency.  Flushes of stale children (which should also
379          * have their deleted flag set) short-cut recursive flush
380          * dependencies and can be freed here.  Any flushes which run
381          * through stale children due to the flush synchronization
382          * point should have a FLUSH_* bit set in the chain and not
383          * reach lastdrop at this time.
384          *
385          * NOTE: We return (chain) on failure to retry.
386          */
387         if (chain->core.chain_count) {
388                 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
389                         spin_unlock(&chain->core.cst.spin);
390                         chain = NULL;   /* success */
391                 } else {
392                         spin_unlock(&chain->core.cst.spin);
393                 }
394                 return(chain);
395         }
396         /* no chains left under us */
397
398         /*
399          * chain->core has no children left so no accessors can get to our
400          * chain from there.  Now we have to lock the parent core to interlock
401          * remaining possible accessors that might bump chain's refs before
402          * we can safely drop chain's refs with intent to free the chain.
403          */
404         hmp = chain->hmp;
405         pmp = chain->pmp;       /* can be NULL */
406         rdrop = NULL;
407
408         /*
409          * Spinlock the parent and try to drop the last ref on chain.
410          * On success remove chain from its parent, otherwise return NULL.
411          *
412          * (normal core locks are top-down recursive but we define core
413          *  spinlocks as bottom-up recursive, so this is safe).
414          */
415         if ((parent = chain->parent) != NULL) {
416                 spin_lock(&parent->core.cst.spin);
417                 if (atomic_cmpset_int(&chain->refs, 1, 0) == 0) {
418                         /* 1->0 transition failed */
419                         spin_unlock(&parent->core.cst.spin);
420                         spin_unlock(&chain->core.cst.spin);
421                         return(chain);  /* retry */
422                 }
423
424                 /*
425                  * 1->0 transition successful, remove chain from its
426                  * above core.
427                  */
428                 if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
429                         RB_REMOVE(hammer2_chain_tree,
430                                   &parent->core.rbtree, chain);
431                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
432                         --parent->core.chain_count;
433                         chain->parent = NULL;
434                 }
435
436                 /*
437                  * If our chain was the last chain in the parent's core the
438                  * core is now empty and its parent might have to be
439                  * re-dropped if it has 0 refs.
440                  */
441                 if (parent->core.chain_count == 0) {
442                         rdrop = parent;
443                         if (atomic_cmpset_int(&rdrop->refs, 0, 1) == 0) {
444                                 rdrop = NULL;
445                         }
446                 }
447                 spin_unlock(&parent->core.cst.spin);
448                 parent = NULL;  /* safety */
449         }
450
451         /*
452          * Successful 1->0 transition and the chain can be destroyed now.
453          *
454          * We still have the core spinlock, and core's chain_count is 0.
455          * Any parent spinlock is gone.
456          */
457         spin_unlock(&chain->core.cst.spin);
458         KKASSERT(RB_EMPTY(&chain->core.rbtree) &&
459                  chain->core.chain_count == 0);
460         KKASSERT(chain->core.cst.count == 0);
461         KKASSERT(chain->core.cst.upgrade == 0);
462
463         /*
464          * All spin locks are gone, finish freeing stuff.
465          */
466         KKASSERT((chain->flags & (HAMMER2_CHAIN_UPDATE |
467                                   HAMMER2_CHAIN_MODIFIED)) == 0);
468         hammer2_chain_drop_data(chain, 1);
469
470         KKASSERT(chain->dio == NULL);
471
472         /*
473          * Once chain resources are gone we can use the now dead chain
474          * structure to placehold what might otherwise require a recursive
475          * drop, because we have potentially two things to drop and can only
476          * return one directly.
477          */
478         if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
479                 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
480                 chain->hmp = NULL;
481                 kfree(chain, hmp->mchain);
482         }
483
484         /*
485          * Possible chaining loop when parent re-drop needed.
486          */
487         return(rdrop);
488 }
489
490 /*
491  * On either last lock release or last drop
492  */
493 static void
494 hammer2_chain_drop_data(hammer2_chain_t *chain, int lastdrop)
495 {
496         /*hammer2_mount_t *hmp = chain->hmp;*/
497
498         switch(chain->bref.type) {
499         case HAMMER2_BREF_TYPE_VOLUME:
500         case HAMMER2_BREF_TYPE_FREEMAP:
501                 if (lastdrop)
502                         chain->data = NULL;
503                 break;
504         default:
505                 KKASSERT(chain->data == NULL);
506                 break;
507         }
508 }
509
510 /*
511  * Ref and lock a chain element, acquiring its data with I/O if necessary,
512  * and specify how you would like the data to be resolved.
513  *
514  * Returns 0 on success or an error code if the data could not be acquired.
515  * The chain element is locked on return regardless of whether an error
516  * occurred or not.
517  *
518  * The lock is allowed to recurse, multiple locking ops will aggregate
519  * the requested resolve types.  Once data is assigned it will not be
520  * removed until the last unlock.
521  *
522  * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
523  *                         (typically used to avoid device/logical buffer
524  *                          aliasing for data)
525  *
526  * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
527  *                         the INITIAL-create state (indirect blocks only).
528  *
529  *                         Do not resolve data elements for DATA chains.
530  *                         (typically used to avoid device/logical buffer
531  *                          aliasing for data)
532  *
533  * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
534  *
535  * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
536  *                         it will be locked exclusive.
537  *
538  * NOTE: Embedded elements (volume header, inodes) are always resolved
539  *       regardless.
540  *
541  * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
542  *       element will instantiate and zero its buffer, and flush it on
543  *       release.
544  *
545  * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
546  *       so as not to instantiate a device buffer, which could alias against
547  *       a logical file buffer.  However, if ALWAYS is specified the
548  *       device buffer will be instantiated anyway.
549  *
550  * WARNING! If data must be fetched a shared lock will temporarily be
551  *          upgraded to exclusive.  However, a deadlock can occur if
552  *          the caller owns more than one shared lock.
553  */
554 int
555 hammer2_chain_lock(hammer2_chain_t *chain, int how)
556 {
557         hammer2_mount_t *hmp;
558         hammer2_blockref_t *bref;
559         ccms_state_t ostate;
560         char *bdata;
561         int error;
562
563         /*
564          * Ref and lock the element.  Recursive locks are allowed.
565          */
566         if ((how & HAMMER2_RESOLVE_NOREF) == 0)
567                 hammer2_chain_ref(chain);
568         atomic_add_int(&chain->lockcnt, 1);
569
570         hmp = chain->hmp;
571         KKASSERT(hmp != NULL);
572
573         /*
574          * Get the appropriate lock.
575          */
576         if (how & HAMMER2_RESOLVE_SHARED)
577                 ccms_thread_lock(&chain->core.cst, CCMS_STATE_SHARED);
578         else
579                 ccms_thread_lock(&chain->core.cst, CCMS_STATE_EXCLUSIVE);
580
581         /*
582          * If we already have a valid data pointer no further action is
583          * necessary.
584          */
585         if (chain->data)
586                 return (0);
587
588         /*
589          * Do we have to resolve the data?
590          */
591         switch(how & HAMMER2_RESOLVE_MASK) {
592         case HAMMER2_RESOLVE_NEVER:
593                 return(0);
594         case HAMMER2_RESOLVE_MAYBE:
595                 if (chain->flags & HAMMER2_CHAIN_INITIAL)
596                         return(0);
597                 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
598                         return(0);
599 #if 0
600                 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
601                         return(0);
602 #endif
603                 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
604                         return(0);
605                 /* fall through */
606         case HAMMER2_RESOLVE_ALWAYS:
607                 break;
608         }
609
610         /*
611          * Upgrade to an exclusive lock so we can safely manipulate the
612          * buffer cache.  If another thread got to it before us we
613          * can just return.
614          */
615         ostate = ccms_thread_lock_upgrade(&chain->core.cst);
616         if (chain->data) {
617                 ccms_thread_lock_downgrade(&chain->core.cst, ostate);
618                 return (0);
619         }
620
621         /*
622          * We must resolve to a device buffer, either by issuing I/O or
623          * by creating a zero-fill element.  We do not mark the buffer
624          * dirty when creating a zero-fill element (the hammer2_chain_modify()
625          * API must still be used to do that).
626          *
627          * The device buffer is variable-sized in powers of 2 down
628          * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
629          * chunk always contains buffers of the same size. (XXX)
630          *
631          * The minimum physical IO size may be larger than the variable
632          * block size.
633          */
634         bref = &chain->bref;
635
636         /*
637          * The getblk() optimization can only be used on newly created
638          * elements if the physical block size matches the request.
639          */
640         if (chain->flags & HAMMER2_CHAIN_INITIAL) {
641                 error = hammer2_io_new(hmp, bref->data_off, chain->bytes,
642                                         &chain->dio);
643         } else {
644                 error = hammer2_io_bread(hmp, bref->data_off, chain->bytes,
645                                          &chain->dio);
646                 hammer2_adjreadcounter(&chain->bref, chain->bytes);
647         }
648
649         if (error) {
650                 kprintf("hammer2_chain_lock: I/O error %016jx: %d\n",
651                         (intmax_t)bref->data_off, error);
652                 hammer2_io_bqrelse(&chain->dio);
653                 ccms_thread_lock_downgrade(&chain->core.cst, ostate);
654                 return (error);
655         }
656
657 #if 0
658         /*
659          * No need for this, always require that hammer2_chain_modify()
660          * be called before any modifying operations.
661          */
662         if ((chain->flags & HAMMER2_CHAIN_MODIFIED) &&
663             !hammer2_io_isdirty(chain->dio)) {
664                 hammer2_io_setdirty(chain->dio);
665         }
666 #endif
667
668         /*
669          * Clear INITIAL.  In this case we used io_new() and the buffer has
670          * been zero'd and marked dirty.
671          */
672         bdata = hammer2_io_data(chain->dio, chain->bref.data_off);
673         if (chain->flags & HAMMER2_CHAIN_INITIAL)
674                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
675
676         /*
677          * Setup the data pointer, either pointing it to an embedded data
678          * structure and copying the data from the buffer, or pointing it
679          * into the buffer.
680          *
681          * The buffer is not retained when copying to an embedded data
682          * structure in order to avoid potential deadlocks or recursions
683          * on the same physical buffer.
684          */
685         switch (bref->type) {
686         case HAMMER2_BREF_TYPE_VOLUME:
687         case HAMMER2_BREF_TYPE_FREEMAP:
688                 /*
689                  * Copy data from bp to embedded buffer
690                  */
691                 panic("hammer2_chain_lock: called on unresolved volume header");
692                 break;
693         case HAMMER2_BREF_TYPE_INODE:
694         case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
695         case HAMMER2_BREF_TYPE_INDIRECT:
696         case HAMMER2_BREF_TYPE_DATA:
697         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
698         default:
699                 /*
700                  * Point data at the device buffer and leave dio intact.
701                  */
702                 chain->data = (void *)bdata;
703                 break;
704         }
705         ccms_thread_lock_downgrade(&chain->core.cst, ostate);
706         return (0);
707 }
708
709 /*
710  * This basically calls hammer2_io_breadcb() but does some pre-processing
711  * of the chain first to handle certain cases.
712  */
713 void
714 hammer2_chain_load_async(hammer2_cluster_t *cluster,
715                          void (*callback)(hammer2_io_t *dio,
716                                           hammer2_cluster_t *cluster,
717                                           hammer2_chain_t *chain,
718                                           void *arg_p, off_t arg_o),
719                          void *arg_p)
720 {
721         hammer2_chain_t *chain;
722         hammer2_mount_t *hmp;
723         struct hammer2_io *dio;
724         hammer2_blockref_t *bref;
725         int error;
726         int i;
727
728         /*
729          * If no chain specified see if any chain data is available and use
730          * that, otherwise begin an I/O iteration using the first chain.
731          */
732         chain = NULL;
733         for (i = 0; i < cluster->nchains; ++i) {
734                 chain = cluster->array[i];
735                 if (chain && chain->data)
736                         break;
737         }
738         if (i == cluster->nchains) {
739                 chain = cluster->array[0];
740                 i = 0;
741         }
742
743         if (chain->data) {
744                 callback(NULL, cluster, chain, arg_p, (off_t)i);
745                 return;
746         }
747
748         /*
749          * We must resolve to a device buffer, either by issuing I/O or
750          * by creating a zero-fill element.  We do not mark the buffer
751          * dirty when creating a zero-fill element (the hammer2_chain_modify()
752          * API must still be used to do that).
753          *
754          * The device buffer is variable-sized in powers of 2 down
755          * to HAMMER2_MIN_ALLOC (typically 1K).  A 64K physical storage
756          * chunk always contains buffers of the same size. (XXX)
757          *
758          * The minimum physical IO size may be larger than the variable
759          * block size.
760          */
761         bref = &chain->bref;
762         hmp = chain->hmp;
763
764         /*
765          * The getblk() optimization can only be used on newly created
766          * elements if the physical block size matches the request.
767          */
768         if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
769             chain->bytes == hammer2_devblksize(chain->bytes)) {
770                 error = hammer2_io_new(hmp, bref->data_off, chain->bytes, &dio);
771                 KKASSERT(error == 0);
772                 callback(dio, cluster, chain, arg_p, (off_t)i);
773                 return;
774         }
775
776         /*
777          * Otherwise issue a read
778          */
779         hammer2_adjreadcounter(&chain->bref, chain->bytes);
780         hammer2_io_breadcb(hmp, bref->data_off, chain->bytes,
781                            callback, cluster, chain, arg_p, (off_t)i);
782 }
783
784 /*
785  * Unlock and deref a chain element.
786  *
787  * On the last lock release any non-embedded data (chain->dio) will be
788  * retired.
789  */
790 void
791 hammer2_chain_unlock(hammer2_chain_t *chain)
792 {
793         ccms_state_t ostate;
794         long *counterp;
795         u_int lockcnt;
796
797         /*
798          * The core->cst lock can be shared across several chains so we
799          * need to track the per-chain lockcnt separately.
800          *
801          * If multiple locks are present (or being attempted) on this
802          * particular chain we can just unlock, drop refs, and return.
803          *
804          * Otherwise fall-through on the 1->0 transition.
805          */
806         for (;;) {
807                 lockcnt = chain->lockcnt;
808                 KKASSERT(lockcnt > 0);
809                 cpu_ccfence();
810                 if (lockcnt > 1) {
811                         if (atomic_cmpset_int(&chain->lockcnt,
812                                               lockcnt, lockcnt - 1)) {
813                                 ccms_thread_unlock(&chain->core.cst);
814                                 hammer2_chain_drop(chain);
815                                 return;
816                         }
817                 } else {
818                         if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
819                                 break;
820                 }
821                 /* retry */
822         }
823
824         /*
825          * On the 1->0 transition we upgrade the core lock (if necessary)
826          * to exclusive for terminal processing.  If after upgrading we find
827          * that lockcnt is non-zero, another thread is racing us and will
828          * handle the unload for us later on, so just cleanup and return
829          * leaving the data/io intact
830          *
831          * Otherwise if lockcnt is still 0 it is possible for it to become
832          * non-zero and race, but since we hold the core->cst lock
833          * exclusively all that will happen is that the chain will be
834          * reloaded after we unload it.
835          */
836         ostate = ccms_thread_lock_upgrade(&chain->core.cst);
837         if (chain->lockcnt) {
838                 ccms_thread_unlock_upgraded(&chain->core.cst, ostate);
839                 hammer2_chain_drop(chain);
840                 return;
841         }
842
843         /*
844          * Shortcut the case if the data is embedded or not resolved.
845          *
846          * Do NOT NULL out chain->data (e.g. inode data), it might be
847          * dirty.
848          */
849         if (chain->dio == NULL) {
850                 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0)
851                         hammer2_chain_drop_data(chain, 0);
852                 ccms_thread_unlock_upgraded(&chain->core.cst, ostate);
853                 hammer2_chain_drop(chain);
854                 return;
855         }
856
857         /*
858          * Statistics
859          */
860         if (hammer2_io_isdirty(chain->dio) == 0) {
861                 ;
862         } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
863                 switch(chain->bref.type) {
864                 case HAMMER2_BREF_TYPE_DATA:
865                         counterp = &hammer2_ioa_file_write;
866                         break;
867                 case HAMMER2_BREF_TYPE_INODE:
868                         counterp = &hammer2_ioa_meta_write;
869                         break;
870                 case HAMMER2_BREF_TYPE_INDIRECT:
871                         counterp = &hammer2_ioa_indr_write;
872                         break;
873                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
874                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
875                         counterp = &hammer2_ioa_fmap_write;
876                         break;
877                 default:
878                         counterp = &hammer2_ioa_volu_write;
879                         break;
880                 }
881                 *counterp += chain->bytes;
882         } else {
883                 switch(chain->bref.type) {
884                 case HAMMER2_BREF_TYPE_DATA:
885                         counterp = &hammer2_iod_file_write;
886                         break;
887                 case HAMMER2_BREF_TYPE_INODE:
888                         counterp = &hammer2_iod_meta_write;
889                         break;
890                 case HAMMER2_BREF_TYPE_INDIRECT:
891                         counterp = &hammer2_iod_indr_write;
892                         break;
893                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
894                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
895                         counterp = &hammer2_iod_fmap_write;
896                         break;
897                 default:
898                         counterp = &hammer2_iod_volu_write;
899                         break;
900                 }
901                 *counterp += chain->bytes;
902         }
903
904         /*
905          * Clean out the dio.
906          *
907          * If a device buffer was used for data be sure to destroy the
908          * buffer when we are done to avoid aliases (XXX what about the
909          * underlying VM pages?).
910          *
911          * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
912          *       is possible.
913          *
914          * NOTE: The isdirty check tracks whether we have to bdwrite() the
915          *       buffer or not.  The buffer might already be dirty.  The
916          *       flag is re-set when chain_modify() is called, even if
917          *       MODIFIED is already set, allowing the OS to retire the
918          *       buffer independent of a hammer2 flush.
919          */
920         chain->data = NULL;
921         if ((chain->flags & HAMMER2_CHAIN_IOFLUSH) &&
922             hammer2_io_isdirty(chain->dio)) {
923                 hammer2_io_bawrite(&chain->dio);
924         } else {
925                 hammer2_io_bqrelse(&chain->dio);
926         }
927         ccms_thread_unlock_upgraded(&chain->core.cst, ostate);
928         hammer2_chain_drop(chain);
929 }
930
931 /*
932  * This counts the number of live blockrefs in a block array and
933  * also calculates the point at which all remaining blockrefs are empty.
934  * This routine can only be called on a live chain (DUPLICATED flag not set).
935  *
936  * NOTE: Flag is not set until after the count is complete, allowing
937  *       callers to test the flag without holding the spinlock.
938  *
939  * NOTE: If base is NULL the related chain is still in the INITIAL
940  *       state and there are no blockrefs to count.
941  *
942  * NOTE: live_count may already have some counts accumulated due to
943  *       creation and deletion and could even be initially negative.
944  */
945 void
946 hammer2_chain_countbrefs(hammer2_chain_t *chain,
947                          hammer2_blockref_t *base, int count)
948 {
949         spin_lock(&chain->core.cst.spin);
950         if ((chain->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0) {
951                 if (base) {
952                         while (--count >= 0) {
953                                 if (base[count].type)
954                                         break;
955                         }
956                         chain->core.live_zero = count + 1;
957                         while (count >= 0) {
958                                 if (base[count].type)
959                                         atomic_add_int(&chain->core.live_count,
960                                                        1);
961                                 --count;
962                         }
963                 } else {
964                         chain->core.live_zero = 0;
965                 }
966                 /* else do not modify live_count */
967                 atomic_set_int(&chain->core.flags, HAMMER2_CORE_COUNTEDBREFS);
968         }
969         spin_unlock(&chain->core.cst.spin);
970 }
971
972 /*
973  * Resize the chain's physical storage allocation in-place.  This function does
974  * not adjust the data pointer and must be followed by (typically) a
975  * hammer2_chain_modify() call to copy any old data over and adjust the
976  * data pointer.
977  *
978  * Chains can be resized smaller without reallocating the storage.  Resizing
979  * larger will reallocate the storage.  Excess or prior storage is reclaimed
980  * asynchronously at a later time.
981  *
982  * Must be passed an exclusively locked parent and chain.
983  *
984  * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
985  * to avoid instantiating a device buffer that conflicts with the vnode data
986  * buffer.  However, because H2 can compress or encrypt data, the chain may
987  * have a dio assigned to it in those situations, and they do not conflict.
988  *
989  * XXX return error if cannot resize.
990  */
991 void
992 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
993                      hammer2_chain_t *parent, hammer2_chain_t *chain,
994                      int nradix, int flags)
995 {
996         hammer2_mount_t *hmp;
997         size_t obytes;
998         size_t nbytes;
999
1000         hmp = chain->hmp;
1001
1002         /*
1003          * Only data and indirect blocks can be resized for now.
1004          * (The volu root, inodes, and freemap elements use a fixed size).
1005          */
1006         KKASSERT(chain != &hmp->vchain);
1007         KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
1008                  chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
1009
1010         /*
1011          * Nothing to do if the element is already the proper size
1012          */
1013         obytes = chain->bytes;
1014         nbytes = 1U << nradix;
1015         if (obytes == nbytes)
1016                 return;
1017
1018         /*
1019          * Make sure the old data is instantiated so we can copy it.  If this
1020          * is a data block, the device data may be superfluous since the data
1021          * might be in a logical block, but compressed or encrypted data is
1022          * another matter.
1023          *
1024          * NOTE: The modify will set BMAPUPD for us if BMAPPED is set.
1025          */
1026         hammer2_chain_modify(trans, chain, 0);
1027
1028         /*
1029          * Relocate the block, even if making it smaller (because different
1030          * block sizes may be in different regions).
1031          *
1032          * (data blocks only, we aren't copying the storage here).
1033          */
1034         hammer2_freemap_alloc(trans, chain, nbytes);
1035         chain->bytes = nbytes;
1036         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1037         /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
1038
1039         /*
1040          * We don't want the followup chain_modify() to try to copy data
1041          * from the old (wrong-sized) buffer.  It won't know how much to
1042          * copy.  This case should only occur during writes when the
1043          * originator already has the data to write in-hand.
1044          */
1045         if (chain->dio) {
1046                 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA);
1047                 hammer2_io_brelse(&chain->dio);
1048                 chain->data = NULL;
1049         }
1050 }
1051
1052 #if 0
1053
1054 /*
1055  * REMOVED - see cluster code
1056  *
1057  * Set a chain modified, making it read-write and duplicating it if necessary.
1058  * This function will assign a new physical block to the chain if necessary
1059  *
1060  * Duplication of already-modified chains is possible when the modification
1061  * crosses a flush synchronization boundary.
1062  *
1063  * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
1064  *                   level or the COW operation will not work.
1065  *
1066  * Data blocks     - The chain is usually locked RESOLVE_NEVER so as not to
1067  *                   run the data through the device buffers.
1068  *
1069  * This function may return a different chain than was passed, in which case
1070  * the old chain will be unlocked and the new chain will be locked.
1071  *
1072  * ip->chain may be adjusted by hammer2_chain_modify_ip().
1073  */
1074 hammer2_inode_data_t *
1075 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1076                         hammer2_chain_t **chainp, int flags)
1077 {
1078         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1079         hammer2_chain_modify(trans, chainp, flags);
1080         if (ip->chain != *chainp)
1081                 hammer2_inode_repoint(ip, NULL, *chainp);
1082         if (ip->vp)
1083                 vsetisdirty(ip->vp);
1084         return(&ip->chain->data->ipdata);
1085 }
1086
1087 #endif
1088
1089 void
1090 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t *chain, int flags)
1091 {
1092         hammer2_mount_t *hmp;
1093         hammer2_io_t *dio;
1094         int error;
1095         int wasinitial;
1096         int newmod;
1097         char *bdata;
1098
1099         hmp = chain->hmp;
1100
1101         /*
1102          * data is not optional for freemap chains (we must always be sure
1103          * to copy the data on COW storage allocations).
1104          */
1105         if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1106             chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1107                 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1108                          (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1109         }
1110
1111         /*
1112          * Data must be resolved if already assigned unless explicitly
1113          * flagged otherwise.
1114          */
1115         if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1116             (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1117                 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1118                 hammer2_chain_unlock(chain);
1119         }
1120
1121         /*
1122          * Otherwise do initial-chain handling.  Set MODIFIED to indicate
1123          * that the chain has been modified.  Set UPDATE to ensure that
1124          * the blockref is updated in the parent.
1125          */
1126         if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1127                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1128                 hammer2_chain_ref(chain);
1129                 hammer2_pfs_memory_inc(chain->pmp);
1130                 newmod = 1;
1131         } else {
1132                 newmod = 0;
1133         }
1134         if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
1135                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
1136                 hammer2_chain_ref(chain);
1137         }
1138
1139         /*
1140          * The modification or re-modification requires an allocation and
1141          * possible COW.
1142          *
1143          * We normally always allocate new storage here.  If storage exists
1144          * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1145          */
1146         if (chain != &hmp->vchain && chain != &hmp->fchain) {
1147                 if ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1148                      ((flags & HAMMER2_MODIFY_NOREALLOC) == 0 && newmod)
1149                 ) {
1150                         hammer2_freemap_alloc(trans, chain, chain->bytes);
1151                         /* XXX failed allocation */
1152                 } else if (chain->flags & HAMMER2_CHAIN_FORCECOW) {
1153                         hammer2_freemap_alloc(trans, chain, chain->bytes);
1154                         /* XXX failed allocation */
1155                 }
1156                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_FORCECOW);
1157         }
1158
1159         /*
1160          * Set BMAPUPD to tell the flush code that an existing blockmap entry
1161          * requires updating as well as to tell the delete code that the
1162          * chain's blockref might not exactly match (in terms of physical size
1163          * or block offset) the one in the parent's blocktable.  The base key
1164          * of course will still match.
1165          */
1166         if (chain->flags & HAMMER2_CHAIN_BMAPPED)
1167                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPUPD);
1168
1169         /*
1170          * Do not COW BREF_TYPE_DATA when OPTDATA is set.  This is because
1171          * data modifications are done via the logical buffer cache so COWing
1172          * it here would result in unnecessary extra copies (and possibly extra
1173          * block reallocations).  The INITIAL flag remains unchanged in this
1174          * situation.
1175          *
1176          * (This is a bit of a hack).
1177          */
1178         if (chain->bref.type == HAMMER2_BREF_TYPE_DATA &&
1179             (flags & HAMMER2_MODIFY_OPTDATA)) {
1180                 goto skip2;
1181         }
1182
1183         /*
1184          * Clearing the INITIAL flag (for indirect blocks) indicates that
1185          * we've processed the uninitialized storage allocation.
1186          *
1187          * If this flag is already clear we are likely in a copy-on-write
1188          * situation but we have to be sure NOT to bzero the storage if
1189          * no data is present.
1190          */
1191         if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1192                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1193                 wasinitial = 1;
1194         } else {
1195                 wasinitial = 0;
1196         }
1197
1198         /*
1199          * Instantiate data buffer and possibly execute COW operation
1200          */
1201         switch(chain->bref.type) {
1202         case HAMMER2_BREF_TYPE_VOLUME:
1203         case HAMMER2_BREF_TYPE_FREEMAP:
1204                 /*
1205                  * The data is embedded, no copy-on-write operation is
1206                  * needed.
1207                  */
1208                 KKASSERT(chain->dio == NULL);
1209                 break;
1210         case HAMMER2_BREF_TYPE_INODE:
1211         case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1212         case HAMMER2_BREF_TYPE_DATA:
1213         case HAMMER2_BREF_TYPE_INDIRECT:
1214         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1215                 /*
1216                  * Perform the copy-on-write operation
1217                  *
1218                  * zero-fill or copy-on-write depending on whether
1219                  * chain->data exists or not and set the dirty state for
1220                  * the new buffer.  hammer2_io_new() will handle the
1221                  * zero-fill.
1222                  */
1223                 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1224
1225                 if (wasinitial) {
1226                         error = hammer2_io_new(hmp, chain->bref.data_off,
1227                                                chain->bytes, &dio);
1228                 } else {
1229                         error = hammer2_io_bread(hmp, chain->bref.data_off,
1230                                                  chain->bytes, &dio);
1231                 }
1232                 hammer2_adjreadcounter(&chain->bref, chain->bytes);
1233                 KKASSERT(error == 0);
1234
1235                 bdata = hammer2_io_data(dio, chain->bref.data_off);
1236
1237                 if (chain->data) {
1238                         KKASSERT(chain->dio != NULL);
1239                         if (chain->data != (void *)bdata) {
1240                                 bcopy(chain->data, bdata, chain->bytes);
1241                         }
1242                 } else if (wasinitial == 0) {
1243                         /*
1244                          * We have a problem.  We were asked to COW but
1245                          * we don't have any data to COW with!
1246                          */
1247                         panic("hammer2_chain_modify: having a COW %p\n",
1248                               chain);
1249                 }
1250
1251                 /*
1252                  * Retire the old buffer, replace with the new
1253                  */
1254                 if (chain->dio)
1255                         hammer2_io_brelse(&chain->dio);
1256                 chain->data = (void *)bdata;
1257                 chain->dio = dio;
1258                 hammer2_io_setdirty(dio);       /* modified by bcopy above */
1259                 break;
1260         default:
1261                 panic("hammer2_chain_modify: illegal non-embedded type %d",
1262                       chain->bref.type);
1263                 break;
1264
1265         }
1266 skip2:
1267         /*
1268          * setflush on parent indicating that the parent must recurse down
1269          * to us.  Do not call on chain itself which might already have it
1270          * set.
1271          */
1272         if (chain->parent)
1273                 hammer2_chain_setflush(trans, chain->parent);
1274 }
1275
1276 /*
1277  * Volume header data locks
1278  */
1279 void
1280 hammer2_voldata_lock(hammer2_mount_t *hmp)
1281 {
1282         lockmgr(&hmp->vollk, LK_EXCLUSIVE);
1283 }
1284
1285 void
1286 hammer2_voldata_unlock(hammer2_mount_t *hmp)
1287 {
1288         lockmgr(&hmp->vollk, LK_RELEASE);
1289 }
1290
1291 void
1292 hammer2_voldata_modify(hammer2_mount_t *hmp)
1293 {
1294         if ((hmp->vchain.flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1295                 atomic_set_int(&hmp->vchain.flags, HAMMER2_CHAIN_MODIFIED);
1296                 hammer2_chain_ref(&hmp->vchain);
1297                 hammer2_pfs_memory_inc(hmp->vchain.pmp);
1298         }
1299 }
1300
1301 /*
1302  * This function returns the chain at the nearest key within the specified
1303  * range.  The returned chain will be referenced but not locked.
1304  *
1305  * This function will recurse through chain->rbtree as necessary and will
1306  * return a *key_nextp suitable for iteration.  *key_nextp is only set if
1307  * the iteration value is less than the current value of *key_nextp.
1308  *
1309  * The caller should use (*key_nextp) to calculate the actual range of
1310  * the returned element, which will be (key_beg to *key_nextp - 1), because
1311  * there might be another element which is superior to the returned element
1312  * and overlaps it.
1313  *
1314  * (*key_nextp) can be passed as key_beg in an iteration only while non-NULL
1315  * chains continue to be returned.  On EOF (*key_nextp) may overflow since
1316  * it will wind up being (key_end + 1).
1317  *
1318  * WARNING!  Must be called with child's spinlock held.  Spinlock remains
1319  *           held through the operation.
1320  */
1321 struct hammer2_chain_find_info {
1322         hammer2_chain_t         *best;
1323         hammer2_key_t           key_beg;
1324         hammer2_key_t           key_end;
1325         hammer2_key_t           key_next;
1326 };
1327
1328 static int hammer2_chain_find_cmp(hammer2_chain_t *child, void *data);
1329 static int hammer2_chain_find_callback(hammer2_chain_t *child, void *data);
1330
1331 static
1332 hammer2_chain_t *
1333 hammer2_chain_find(hammer2_chain_t *parent, hammer2_key_t *key_nextp,
1334                           hammer2_key_t key_beg, hammer2_key_t key_end)
1335 {
1336         struct hammer2_chain_find_info info;
1337
1338         info.best = NULL;
1339         info.key_beg = key_beg;
1340         info.key_end = key_end;
1341         info.key_next = *key_nextp;
1342
1343         RB_SCAN(hammer2_chain_tree, &parent->core.rbtree,
1344                 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1345                 &info);
1346         *key_nextp = info.key_next;
1347 #if 0
1348         kprintf("chain_find %p %016jx:%016jx next=%016jx\n",
1349                 parent, key_beg, key_end, *key_nextp);
1350 #endif
1351
1352         return (info.best);
1353 }
1354
1355 static
1356 int
1357 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1358 {
1359         struct hammer2_chain_find_info *info = data;
1360         hammer2_key_t child_beg;
1361         hammer2_key_t child_end;
1362
1363         child_beg = child->bref.key;
1364         child_end = child_beg + ((hammer2_key_t)1 << child->bref.keybits) - 1;
1365
1366         if (child_end < info->key_beg)
1367                 return(-1);
1368         if (child_beg > info->key_end)
1369                 return(1);
1370         return(0);
1371 }
1372
1373 static
1374 int
1375 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1376 {
1377         struct hammer2_chain_find_info *info = data;
1378         hammer2_chain_t *best;
1379         hammer2_key_t child_end;
1380
1381         /*
1382          * WARNING! Do not discard DUPLICATED chains, it is possible that
1383          *          we are catching an insertion half-way done.  If a
1384          *          duplicated chain turns out to be the best choice the
1385          *          caller will re-check its flags after locking it.
1386          *
1387          * WARNING! Layerq is scanned forwards, exact matches should keep
1388          *          the existing info->best.
1389          */
1390         if ((best = info->best) == NULL) {
1391                 /*
1392                  * No previous best.  Assign best
1393                  */
1394                 info->best = child;
1395         } else if (best->bref.key <= info->key_beg &&
1396                    child->bref.key <= info->key_beg) {
1397                 /*
1398                  * Illegal overlap.
1399                  */
1400                 KKASSERT(0);
1401                 /*info->best = child;*/
1402         } else if (child->bref.key < best->bref.key) {
1403                 /*
1404                  * Child has a nearer key and best is not flush with key_beg.
1405                  * Set best to child.  Truncate key_next to the old best key.
1406                  */
1407                 info->best = child;
1408                 if (info->key_next > best->bref.key || info->key_next == 0)
1409                         info->key_next = best->bref.key;
1410         } else if (child->bref.key == best->bref.key) {
1411                 /*
1412                  * If our current best is flush with the child then this
1413                  * is an illegal overlap.
1414                  *
1415                  * key_next will automatically be limited to the smaller of
1416                  * the two end-points.
1417                  */
1418                 KKASSERT(0);
1419                 info->best = child;
1420         } else {
1421                 /*
1422                  * Keep the current best but truncate key_next to the child's
1423                  * base.
1424                  *
1425                  * key_next will also automatically be limited to the smaller
1426                  * of the two end-points (probably not necessary for this case
1427                  * but we do it anyway).
1428                  */
1429                 if (info->key_next > child->bref.key || info->key_next == 0)
1430                         info->key_next = child->bref.key;
1431         }
1432
1433         /*
1434          * Always truncate key_next based on child's end-of-range.
1435          */
1436         child_end = child->bref.key + ((hammer2_key_t)1 << child->bref.keybits);
1437         if (child_end && (info->key_next > child_end || info->key_next == 0))
1438                 info->key_next = child_end;
1439
1440         return(0);
1441 }
1442
1443 /*
1444  * Retrieve the specified chain from a media blockref, creating the
1445  * in-memory chain structure which reflects it.
1446  *
1447  * To handle insertion races pass the INSERT_RACE flag along with the
1448  * generation number of the core.  NULL will be returned if the generation
1449  * number changes before we have a chance to insert the chain.  Insert
1450  * races can occur because the parent might be held shared.
1451  *
1452  * Caller must hold the parent locked shared or exclusive since we may
1453  * need the parent's bref array to find our block.
1454  *
1455  * WARNING! chain->pmp is left NULL if the bref represents a PFS mount
1456  *          point.
1457  */
1458 hammer2_chain_t *
1459 hammer2_chain_get(hammer2_chain_t *parent, int generation,
1460                   hammer2_blockref_t *bref)
1461 {
1462         hammer2_mount_t *hmp = parent->hmp;
1463         hammer2_chain_t *chain;
1464         int error;
1465
1466         /*
1467          * Allocate a chain structure representing the existing media
1468          * entry.  Resulting chain has one ref and is not locked.
1469          */
1470         if (bref->flags & HAMMER2_BREF_FLAG_PFSROOT)
1471                 chain = hammer2_chain_alloc(hmp, NULL, NULL, bref);
1472         else
1473                 chain = hammer2_chain_alloc(hmp, parent->pmp, NULL, bref);
1474         hammer2_chain_core_alloc(NULL, chain);
1475         /* ref'd chain returned */
1476
1477         /*
1478          * Flag that the chain is in the parent's blockmap so delete/flush
1479          * knows what to do with it.
1480          */
1481         atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPPED);
1482
1483         /*
1484          * Link the chain into its parent.  A spinlock is required to safely
1485          * access the RBTREE, and it is possible to collide with another
1486          * hammer2_chain_get() operation because the caller might only hold
1487          * a shared lock on the parent.
1488          */
1489         KKASSERT(parent->refs > 0);
1490         error = hammer2_chain_insert(parent, chain,
1491                                      HAMMER2_CHAIN_INSERT_SPIN |
1492                                      HAMMER2_CHAIN_INSERT_RACE,
1493                                      generation);
1494         if (error) {
1495                 KKASSERT((chain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
1496                 kprintf("chain %p get race\n", chain);
1497                 hammer2_chain_drop(chain);
1498                 chain = NULL;
1499         } else {
1500                 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
1501         }
1502
1503         /*
1504          * Return our new chain referenced but not locked, or NULL if
1505          * a race occurred.
1506          */
1507         return (chain);
1508 }
1509
1510 /*
1511  * Lookup initialization/completion API
1512  */
1513 hammer2_chain_t *
1514 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1515 {
1516         if (flags & HAMMER2_LOOKUP_SHARED) {
1517                 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1518                                            HAMMER2_RESOLVE_SHARED);
1519         } else {
1520                 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1521         }
1522         return (parent);
1523 }
1524
1525 void
1526 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1527 {
1528         if (parent)
1529                 hammer2_chain_unlock(parent);
1530 }
1531
1532 static
1533 hammer2_chain_t *
1534 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1535 {
1536         hammer2_chain_t *oparent;
1537         hammer2_chain_t *nparent;
1538
1539         /*
1540          * Be careful of order, oparent must be unlocked before nparent
1541          * is locked below to avoid a deadlock.
1542          */
1543         oparent = *parentp;
1544         spin_lock(&oparent->core.cst.spin);
1545         nparent = oparent->parent;
1546         hammer2_chain_ref(nparent);
1547         spin_unlock(&oparent->core.cst.spin);
1548         if (oparent) {
1549                 hammer2_chain_unlock(oparent);
1550                 oparent = NULL;
1551         }
1552
1553         hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1554         *parentp = nparent;
1555
1556         return (nparent);
1557 }
1558
1559 /*
1560  * Locate the first chain whos key range overlaps (key_beg, key_end) inclusive.
1561  * (*parentp) typically points to an inode but can also point to a related
1562  * indirect block and this function will recurse upwards and find the inode
1563  * again.
1564  *
1565  * (*parentp) must be exclusively locked and referenced and can be an inode
1566  * or an existing indirect block within the inode.
1567  *
1568  * On return (*parentp) will be modified to point at the deepest parent chain
1569  * element encountered during the search, as a helper for an insertion or
1570  * deletion.   The new (*parentp) will be locked and referenced and the old
1571  * will be unlocked and dereferenced (no change if they are both the same).
1572  *
1573  * The matching chain will be returned exclusively locked.  If NOLOCK is
1574  * requested the chain will be returned only referenced.
1575  *
1576  * NULL is returned if no match was found, but (*parentp) will still
1577  * potentially be adjusted.
1578  *
1579  * On return (*key_nextp) will point to an iterative value for key_beg.
1580  * (If NULL is returned (*key_nextp) is set to key_end).
1581  *
1582  * This function will also recurse up the chain if the key is not within the
1583  * current parent's range.  (*parentp) can never be set to NULL.  An iteration
1584  * can simply allow (*parentp) to float inside the loop.
1585  *
1586  * NOTE!  chain->data is not always resolved.  By default it will not be
1587  *        resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF.  Use
1588  *        HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1589  *        BREF_TYPE_DATA as the device buffer can alias the logical file
1590  *        buffer).
1591  */
1592 hammer2_chain_t *
1593 hammer2_chain_lookup(hammer2_chain_t **parentp, hammer2_key_t *key_nextp,
1594                      hammer2_key_t key_beg, hammer2_key_t key_end,
1595                      int *cache_indexp, int flags, int *ddflagp)
1596 {
1597         hammer2_mount_t *hmp;
1598         hammer2_chain_t *parent;
1599         hammer2_chain_t *chain;
1600         hammer2_blockref_t *base;
1601         hammer2_blockref_t *bref;
1602         hammer2_blockref_t bcopy;
1603         hammer2_key_t scan_beg;
1604         hammer2_key_t scan_end;
1605         int count = 0;
1606         int how_always = HAMMER2_RESOLVE_ALWAYS;
1607         int how_maybe = HAMMER2_RESOLVE_MAYBE;
1608         int how;
1609         int generation;
1610         int maxloops = 300000;
1611
1612         *ddflagp = 0;
1613         if (flags & HAMMER2_LOOKUP_ALWAYS) {
1614                 how_maybe = how_always;
1615                 how = HAMMER2_RESOLVE_ALWAYS;
1616         } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1617                 how = HAMMER2_RESOLVE_NEVER;
1618         } else {
1619                 how = HAMMER2_RESOLVE_MAYBE;
1620         }
1621         if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1622                 how_maybe |= HAMMER2_RESOLVE_SHARED;
1623                 how_always |= HAMMER2_RESOLVE_SHARED;
1624                 how |= HAMMER2_RESOLVE_SHARED;
1625         }
1626
1627         /*
1628          * Recurse (*parentp) upward if necessary until the parent completely
1629          * encloses the key range or we hit the inode.
1630          *
1631          * This function handles races against the flusher doing a delete-
1632          * duplicate above us and re-homes the parent to the duplicate in
1633          * that case, otherwise we'd wind up recursing down a stale chain.
1634          */
1635         parent = *parentp;
1636         hmp = parent->hmp;
1637
1638         while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1639                parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1640                 scan_beg = parent->bref.key;
1641                 scan_end = scan_beg +
1642                            ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1643                 if (key_beg >= scan_beg && key_end <= scan_end)
1644                         break;
1645                 parent = hammer2_chain_getparent(parentp, how_maybe);
1646         }
1647
1648 again:
1649         if (--maxloops == 0)
1650                 panic("hammer2_chain_lookup: maxloops");
1651         /*
1652          * Locate the blockref array.  Currently we do a fully associative
1653          * search through the array.
1654          */
1655         switch(parent->bref.type) {
1656         case HAMMER2_BREF_TYPE_INODE:
1657                 /*
1658                  * Special shortcut for embedded data returns the inode
1659                  * itself.  Callers must detect this condition and access
1660                  * the embedded data (the strategy code does this for us).
1661                  *
1662                  * This is only applicable to regular files and softlinks.
1663                  */
1664                 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1665                         if (flags & HAMMER2_LOOKUP_NOLOCK)
1666                                 hammer2_chain_ref(parent);
1667                         else
1668                                 hammer2_chain_lock(parent, how_always);
1669                         *key_nextp = key_end + 1;
1670                         *ddflagp = 1;
1671                         return (parent);
1672                 }
1673                 base = &parent->data->ipdata.u.blockset.blockref[0];
1674                 count = HAMMER2_SET_COUNT;
1675                 break;
1676         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1677         case HAMMER2_BREF_TYPE_INDIRECT:
1678                 /*
1679                  * Handle MATCHIND on the parent
1680                  */
1681                 if (flags & HAMMER2_LOOKUP_MATCHIND) {
1682                         scan_beg = parent->bref.key;
1683                         scan_end = scan_beg +
1684                                ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1685                         if (key_beg == scan_beg && key_end == scan_end) {
1686                                 chain = parent;
1687                                 hammer2_chain_lock(chain, how_maybe);
1688                                 *key_nextp = scan_end + 1;
1689                                 goto done;
1690                         }
1691                 }
1692                 /*
1693                  * Optimize indirect blocks in the INITIAL state to avoid
1694                  * I/O.
1695                  */
1696                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1697                         base = NULL;
1698                 } else {
1699                         if (parent->data == NULL)
1700                                 panic("parent->data is NULL");
1701                         base = &parent->data->npdata[0];
1702                 }
1703                 count = parent->bytes / sizeof(hammer2_blockref_t);
1704                 break;
1705         case HAMMER2_BREF_TYPE_VOLUME:
1706                 base = &hmp->voldata.sroot_blockset.blockref[0];
1707                 count = HAMMER2_SET_COUNT;
1708                 break;
1709         case HAMMER2_BREF_TYPE_FREEMAP:
1710                 base = &hmp->voldata.freemap_blockset.blockref[0];
1711                 count = HAMMER2_SET_COUNT;
1712                 break;
1713         default:
1714                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1715                       parent->bref.type);
1716                 base = NULL;    /* safety */
1717                 count = 0;      /* safety */
1718         }
1719
1720         /*
1721          * Merged scan to find next candidate.
1722          *
1723          * hammer2_base_*() functions require the parent->core.live_* fields
1724          * to be synchronized.
1725          *
1726          * We need to hold the spinlock to access the block array and RB tree
1727          * and to interlock chain creation.
1728          */
1729         if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
1730                 hammer2_chain_countbrefs(parent, base, count);
1731
1732         /*
1733          * Combined search
1734          */
1735         spin_lock(&parent->core.cst.spin);
1736         chain = hammer2_combined_find(parent, base, count,
1737                                       cache_indexp, key_nextp,
1738                                       key_beg, key_end,
1739                                       &bref);
1740         generation = parent->core.generation;
1741
1742         /*
1743          * Exhausted parent chain, iterate.
1744          */
1745         if (bref == NULL) {
1746                 spin_unlock(&parent->core.cst.spin);
1747                 if (key_beg == key_end) /* short cut single-key case */
1748                         return (NULL);
1749
1750                 /*
1751                  * Stop if we reached the end of the iteration.
1752                  */
1753                 if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1754                     parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1755                         return (NULL);
1756                 }
1757
1758                 /*
1759                  * Calculate next key, stop if we reached the end of the
1760                  * iteration, otherwise go up one level and loop.
1761                  */
1762                 key_beg = parent->bref.key +
1763                           ((hammer2_key_t)1 << parent->bref.keybits);
1764                 if (key_beg == 0 || key_beg > key_end)
1765                         return (NULL);
1766                 parent = hammer2_chain_getparent(parentp, how_maybe);
1767                 goto again;
1768         }
1769
1770         /*
1771          * Selected from blockref or in-memory chain.
1772          */
1773         if (chain == NULL) {
1774                 bcopy = *bref;
1775                 spin_unlock(&parent->core.cst.spin);
1776                 chain = hammer2_chain_get(parent, generation,
1777                                           &bcopy);
1778                 if (chain == NULL) {
1779                         kprintf("retry lookup parent %p keys %016jx:%016jx\n",
1780                                 parent, key_beg, key_end);
1781                         goto again;
1782                 }
1783                 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
1784                         hammer2_chain_drop(chain);
1785                         goto again;
1786                 }
1787         } else {
1788                 hammer2_chain_ref(chain);
1789                 spin_unlock(&parent->core.cst.spin);
1790         }
1791
1792         /*
1793          * chain is referenced but not locked.  We must lock the chain
1794          * to obtain definitive DUPLICATED/DELETED state
1795          */
1796         if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1797             chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1798                 hammer2_chain_lock(chain, how_maybe | HAMMER2_RESOLVE_NOREF);
1799         } else {
1800                 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1801         }
1802
1803         /*
1804          * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
1805          *
1806          * NOTE: Chain's key range is not relevant as there might be
1807          *       one-offs within the range that are not deleted.
1808          *
1809          * NOTE: Lookups can race delete-duplicate because
1810          *       delete-duplicate does not lock the parent's core
1811          *       (they just use the spinlock on the core).  We must
1812          *       check for races by comparing the DUPLICATED flag before
1813          *       releasing the spinlock with the flag after locking the
1814          *       chain.
1815          */
1816         if (chain->flags & HAMMER2_CHAIN_DELETED) {
1817                 hammer2_chain_unlock(chain);
1818                 key_beg = *key_nextp;
1819                 if (key_beg == 0 || key_beg > key_end)
1820                         return(NULL);
1821                 goto again;
1822         }
1823
1824         /*
1825          * If the chain element is an indirect block it becomes the new
1826          * parent and we loop on it.  We must maintain our top-down locks
1827          * to prevent the flusher from interfering (i.e. doing a
1828          * delete-duplicate and leaving us recursing down a deleted chain).
1829          *
1830          * The parent always has to be locked with at least RESOLVE_MAYBE
1831          * so we can access its data.  It might need a fixup if the caller
1832          * passed incompatible flags.  Be careful not to cause a deadlock
1833          * as a data-load requires an exclusive lock.
1834          *
1835          * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1836          * range is within the requested key range we return the indirect
1837          * block and do NOT loop.  This is usually only used to acquire
1838          * freemap nodes.
1839          */
1840         if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1841             chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1842                 hammer2_chain_unlock(parent);
1843                 *parentp = parent = chain;
1844                 goto again;
1845         }
1846 done:
1847         /*
1848          * All done, return the chain
1849          */
1850         return (chain);
1851 }
1852
1853 /*
1854  * After having issued a lookup we can iterate all matching keys.
1855  *
1856  * If chain is non-NULL we continue the iteration from just after it's index.
1857  *
1858  * If chain is NULL we assume the parent was exhausted and continue the
1859  * iteration at the next parent.
1860  *
1861  * parent must be locked on entry and remains locked throughout.  chain's
1862  * lock status must match flags.  Chain is always at least referenced.
1863  *
1864  * WARNING!  The MATCHIND flag does not apply to this function.
1865  */
1866 hammer2_chain_t *
1867 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1868                    hammer2_key_t *key_nextp,
1869                    hammer2_key_t key_beg, hammer2_key_t key_end,
1870                    int *cache_indexp, int flags)
1871 {
1872         hammer2_chain_t *parent;
1873         int how_maybe;
1874         int ddflag;
1875
1876         /*
1877          * Calculate locking flags for upward recursion.
1878          */
1879         how_maybe = HAMMER2_RESOLVE_MAYBE;
1880         if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1881                 how_maybe |= HAMMER2_RESOLVE_SHARED;
1882
1883         parent = *parentp;
1884
1885         /*
1886          * Calculate the next index and recalculate the parent if necessary.
1887          */
1888         if (chain) {
1889                 key_beg = chain->bref.key +
1890                           ((hammer2_key_t)1 << chain->bref.keybits);
1891                 if (flags & HAMMER2_LOOKUP_NOLOCK)
1892                         hammer2_chain_drop(chain);
1893                 else
1894                         hammer2_chain_unlock(chain);
1895
1896                 /*
1897                  * Any scan where the lookup returned degenerate data embedded
1898                  * in the inode has an invalid index and must terminate.
1899                  */
1900                 if (chain == parent)
1901                         return(NULL);
1902                 if (key_beg == 0 || key_beg > key_end)
1903                         return(NULL);
1904                 chain = NULL;
1905         } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1906                    parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1907                 /*
1908                  * We reached the end of the iteration.
1909                  */
1910                 return (NULL);
1911         } else {
1912                 /*
1913                  * Continue iteration with next parent unless the current
1914                  * parent covers the range.
1915                  */
1916                 key_beg = parent->bref.key +
1917                           ((hammer2_key_t)1 << parent->bref.keybits);
1918                 if (key_beg == 0 || key_beg > key_end)
1919                         return (NULL);
1920                 parent = hammer2_chain_getparent(parentp, how_maybe);
1921         }
1922
1923         /*
1924          * And execute
1925          */
1926         return (hammer2_chain_lookup(parentp, key_nextp,
1927                                      key_beg, key_end,
1928                                      cache_indexp, flags, &ddflag));
1929 }
1930
1931 /*
1932  * The raw scan function is similar to lookup/next but does not seek to a key.
1933  * Blockrefs are iterated via first_chain = (parent, NULL) and
1934  * next_chain = (parent, chain).
1935  *
1936  * The passed-in parent must be locked and its data resolved.  The returned
1937  * chain will be locked.  Pass chain == NULL to acquire the first sub-chain
1938  * under parent and then iterate with the passed-in chain (which this
1939  * function will unlock).
1940  */
1941 hammer2_chain_t *
1942 hammer2_chain_scan(hammer2_chain_t *parent, hammer2_chain_t *chain,
1943                    int *cache_indexp, int flags)
1944 {
1945         hammer2_mount_t *hmp;
1946         hammer2_blockref_t *base;
1947         hammer2_blockref_t *bref;
1948         hammer2_blockref_t bcopy;
1949         hammer2_key_t key;
1950         hammer2_key_t next_key;
1951         int count = 0;
1952         int how_always = HAMMER2_RESOLVE_ALWAYS;
1953         int how_maybe = HAMMER2_RESOLVE_MAYBE;
1954         int how;
1955         int generation;
1956         int maxloops = 300000;
1957
1958         hmp = parent->hmp;
1959
1960         /*
1961          * Scan flags borrowed from lookup
1962          */
1963         if (flags & HAMMER2_LOOKUP_ALWAYS) {
1964                 how_maybe = how_always;
1965                 how = HAMMER2_RESOLVE_ALWAYS;
1966         } else if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK)) {
1967                 how = HAMMER2_RESOLVE_NEVER;
1968         } else {
1969                 how = HAMMER2_RESOLVE_MAYBE;
1970         }
1971         if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1972                 how_maybe |= HAMMER2_RESOLVE_SHARED;
1973                 how_always |= HAMMER2_RESOLVE_SHARED;
1974                 how |= HAMMER2_RESOLVE_SHARED;
1975         }
1976
1977         /*
1978          * Calculate key to locate first/next element, unlocking the previous
1979          * element as we go.  Be careful, the key calculation can overflow.
1980          */
1981         if (chain) {
1982                 key = chain->bref.key +
1983                       ((hammer2_key_t)1 << chain->bref.keybits);
1984                 hammer2_chain_unlock(chain);
1985                 chain = NULL;
1986                 if (key == 0)
1987                         goto done;
1988         } else {
1989                 key = 0;
1990         }
1991
1992 again:
1993         if (--maxloops == 0)
1994                 panic("hammer2_chain_scan: maxloops");
1995         /*
1996          * Locate the blockref array.  Currently we do a fully associative
1997          * search through the array.
1998          */
1999         switch(parent->bref.type) {
2000         case HAMMER2_BREF_TYPE_INODE:
2001                 /*
2002                  * An inode with embedded data has no sub-chains.
2003                  */
2004                 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
2005                         goto done;
2006                 base = &parent->data->ipdata.u.blockset.blockref[0];
2007                 count = HAMMER2_SET_COUNT;
2008                 break;
2009         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2010         case HAMMER2_BREF_TYPE_INDIRECT:
2011                 /*
2012                  * Optimize indirect blocks in the INITIAL state to avoid
2013                  * I/O.
2014                  */
2015                 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2016                         base = NULL;
2017                 } else {
2018                         if (parent->data == NULL)
2019                                 panic("parent->data is NULL");
2020                         base = &parent->data->npdata[0];
2021                 }
2022                 count = parent->bytes / sizeof(hammer2_blockref_t);
2023                 break;
2024         case HAMMER2_BREF_TYPE_VOLUME:
2025                 base = &hmp->voldata.sroot_blockset.blockref[0];
2026                 count = HAMMER2_SET_COUNT;
2027                 break;
2028         case HAMMER2_BREF_TYPE_FREEMAP:
2029                 base = &hmp->voldata.freemap_blockset.blockref[0];
2030                 count = HAMMER2_SET_COUNT;
2031                 break;
2032         default:
2033                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
2034                       parent->bref.type);
2035                 base = NULL;    /* safety */
2036                 count = 0;      /* safety */
2037         }
2038
2039         /*
2040          * Merged scan to find next candidate.
2041          *
2042          * hammer2_base_*() functions require the parent->core.live_* fields
2043          * to be synchronized.
2044          *
2045          * We need to hold the spinlock to access the block array and RB tree
2046          * and to interlock chain creation.
2047          */
2048         if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2049                 hammer2_chain_countbrefs(parent, base, count);
2050
2051         next_key = 0;
2052         spin_lock(&parent->core.cst.spin);
2053         chain = hammer2_combined_find(parent, base, count,
2054                                       cache_indexp, &next_key,
2055                                       key, HAMMER2_KEY_MAX,
2056                                       &bref);
2057         generation = parent->core.generation;
2058
2059         /*
2060          * Exhausted parent chain, we're done.
2061          */
2062         if (bref == NULL) {
2063                 spin_unlock(&parent->core.cst.spin);
2064                 KKASSERT(chain == NULL);
2065                 goto done;
2066         }
2067
2068         /*
2069          * Selected from blockref or in-memory chain.
2070          */
2071         if (chain == NULL) {
2072                 bcopy = *bref;
2073                 spin_unlock(&parent->core.cst.spin);
2074                 chain = hammer2_chain_get(parent, generation, &bcopy);
2075                 if (chain == NULL) {
2076                         kprintf("retry scan parent %p keys %016jx\n",
2077                                 parent, key);
2078                         goto again;
2079                 }
2080                 if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2081                         hammer2_chain_drop(chain);
2082                         chain = NULL;
2083                         goto again;
2084                 }
2085         } else {
2086                 hammer2_chain_ref(chain);
2087                 spin_unlock(&parent->core.cst.spin);
2088         }
2089
2090         /*
2091          * chain is referenced but not locked.  We must lock the chain
2092          * to obtain definitive DUPLICATED/DELETED state
2093          */
2094         hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
2095
2096         /*
2097          * Skip deleted chains (XXX cache 'i' end-of-block-array? XXX)
2098          *
2099          * NOTE: chain's key range is not relevant as there might be
2100          *       one-offs within the range that are not deleted.
2101          *
2102          * NOTE: XXX this could create problems with scans used in
2103          *       situations other than mount-time recovery.
2104          *
2105          * NOTE: Lookups can race delete-duplicate because
2106          *       delete-duplicate does not lock the parent's core
2107          *       (they just use the spinlock on the core).  We must
2108          *       check for races by comparing the DUPLICATED flag before
2109          *       releasing the spinlock with the flag after locking the
2110          *       chain.
2111          */
2112         if (chain->flags & HAMMER2_CHAIN_DELETED) {
2113                 hammer2_chain_unlock(chain);
2114                 chain = NULL;
2115
2116                 key = next_key;
2117                 if (key == 0)
2118                         goto done;
2119                 goto again;
2120         }
2121
2122 done:
2123         /*
2124          * All done, return the chain or NULL
2125          */
2126         return (chain);
2127 }
2128
2129 /*
2130  * Create and return a new hammer2 system memory structure of the specified
2131  * key, type and size and insert it under (*parentp).  This is a full
2132  * insertion, based on the supplied key/keybits, and may involve creating
2133  * indirect blocks and moving other chains around via delete/duplicate.
2134  *
2135  * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (*parentp) TO THE INSERTION
2136  * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2137  * FULL.  This typically means that the caller is creating the chain after
2138  * doing a hammer2_chain_lookup().
2139  *
2140  * (*parentp) must be exclusive locked and may be replaced on return
2141  * depending on how much work the function had to do.
2142  *
2143  * (*chainp) usually starts out NULL and returns the newly created chain,
2144  * but if the caller desires the caller may allocate a disconnected chain
2145  * and pass it in instead.
2146  *
2147  * This function should NOT be used to insert INDIRECT blocks.  It is
2148  * typically used to create/insert inodes and data blocks.
2149  *
2150  * Caller must pass-in an exclusively locked parent the new chain is to
2151  * be inserted under, and optionally pass-in a disconnected, exclusively
2152  * locked chain to insert (else we create a new chain).  The function will
2153  * adjust (*parentp) as necessary, create or connect the chain, and
2154  * return an exclusively locked chain in *chainp.
2155  */
2156 int
2157 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2158                      hammer2_chain_t **chainp, hammer2_pfsmount_t *pmp,
2159                      hammer2_key_t key, int keybits, int type, size_t bytes)
2160 {
2161         hammer2_mount_t *hmp;
2162         hammer2_chain_t *chain;
2163         hammer2_chain_t *parent;
2164         hammer2_blockref_t *base;
2165         hammer2_blockref_t dummy;
2166         int allocated = 0;
2167         int error = 0;
2168         int count;
2169         int maxloops = 300000;
2170
2171         /*
2172          * Topology may be crossing a PFS boundary.
2173          */
2174         parent = *parentp;
2175         KKASSERT(ccms_thread_lock_owned(&parent->core.cst));
2176         hmp = parent->hmp;
2177         chain = *chainp;
2178
2179         if (chain == NULL) {
2180                 /*
2181                  * First allocate media space and construct the dummy bref,
2182                  * then allocate the in-memory chain structure.  Set the
2183                  * INITIAL flag for fresh chains which do not have embedded
2184                  * data.
2185                  */
2186                 bzero(&dummy, sizeof(dummy));
2187                 dummy.type = type;
2188                 dummy.key = key;
2189                 dummy.keybits = keybits;
2190                 dummy.data_off = hammer2_getradix(bytes);
2191                 dummy.methods = parent->bref.methods;
2192                 chain = hammer2_chain_alloc(hmp, pmp, trans, &dummy);
2193                 hammer2_chain_core_alloc(trans, chain);
2194
2195                 /*
2196                  * Lock the chain manually, chain_lock will load the chain
2197                  * which we do NOT want to do.  (note: chain->refs is set
2198                  * to 1 by chain_alloc() for us, but lockcnt is not).
2199                  */
2200                 chain->lockcnt = 1;
2201                 ccms_thread_lock(&chain->core.cst, CCMS_STATE_EXCLUSIVE);
2202                 allocated = 1;
2203
2204                 /*
2205                  * We do NOT set INITIAL here (yet).  INITIAL is only
2206                  * used for indirect blocks.
2207                  *
2208                  * Recalculate bytes to reflect the actual media block
2209                  * allocation.
2210                  */
2211                 bytes = (hammer2_off_t)1 <<
2212                         (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2213                 chain->bytes = bytes;
2214
2215                 switch(type) {
2216                 case HAMMER2_BREF_TYPE_VOLUME:
2217                 case HAMMER2_BREF_TYPE_FREEMAP:
2218                         panic("hammer2_chain_create: called with volume type");
2219                         break;
2220                 case HAMMER2_BREF_TYPE_INDIRECT:
2221                         panic("hammer2_chain_create: cannot be used to"
2222                               "create indirect block");
2223                         break;
2224                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2225                         panic("hammer2_chain_create: cannot be used to"
2226                               "create freemap root or node");
2227                         break;
2228                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2229                         KKASSERT(bytes == sizeof(chain->data->bmdata));
2230                         /* fall through */
2231                 case HAMMER2_BREF_TYPE_INODE:
2232                 case HAMMER2_BREF_TYPE_DATA:
2233                 default:
2234                         /*
2235                          * leave chain->data NULL, set INITIAL
2236                          */
2237                         KKASSERT(chain->data == NULL);
2238                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2239                         break;
2240                 }
2241         } else {
2242                 /*
2243                  * We are reattaching a previously deleted chain, possibly
2244                  * under a new parent and possibly with a new key/keybits.
2245                  * The chain does not have to be in a modified state.  The
2246                  * UPDATE flag will be set later on in this routine.
2247                  *
2248                  * Do NOT mess with the current state of the INITIAL flag.
2249                  */
2250                 chain->bref.key = key;
2251                 chain->bref.keybits = keybits;
2252                 if (chain->flags & HAMMER2_CHAIN_DELETED)
2253                         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2254                 KKASSERT(chain->parent == NULL);
2255         }
2256
2257         /*
2258          * Calculate how many entries we have in the blockref array and
2259          * determine if an indirect block is required.
2260          */
2261 again:
2262         if (--maxloops == 0)
2263                 panic("hammer2_chain_create: maxloops");
2264
2265         switch(parent->bref.type) {
2266         case HAMMER2_BREF_TYPE_INODE:
2267                 KKASSERT((parent->data->ipdata.op_flags &
2268                           HAMMER2_OPFLAG_DIRECTDATA) == 0);
2269                 KKASSERT(parent->data != NULL);
2270                 base = &parent->data->ipdata.u.blockset.blockref[0];
2271                 count = HAMMER2_SET_COUNT;
2272                 break;
2273         case HAMMER2_BREF_TYPE_INDIRECT:
2274         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2275                 if (parent->flags & HAMMER2_CHAIN_INITIAL)
2276                         base = NULL;
2277                 else
2278                         base = &parent->data->npdata[0];
2279                 count = parent->bytes / sizeof(hammer2_blockref_t);
2280                 break;
2281         case HAMMER2_BREF_TYPE_VOLUME:
2282                 KKASSERT(parent->data != NULL);
2283                 base = &hmp->voldata.sroot_blockset.blockref[0];
2284                 count = HAMMER2_SET_COUNT;
2285                 break;
2286         case HAMMER2_BREF_TYPE_FREEMAP:
2287                 KKASSERT(parent->data != NULL);
2288                 base = &hmp->voldata.freemap_blockset.blockref[0];
2289                 count = HAMMER2_SET_COUNT;
2290                 break;
2291         default:
2292                 panic("hammer2_chain_create: unrecognized blockref type: %d",
2293                       parent->bref.type);
2294                 base = NULL;
2295                 count = 0;
2296                 break;
2297         }
2298
2299         /*
2300          * Make sure we've counted the brefs
2301          */
2302         if ((parent->core.flags & HAMMER2_CORE_COUNTEDBREFS) == 0)
2303                 hammer2_chain_countbrefs(parent, base, count);
2304
2305         KKASSERT(parent->core.live_count >= 0 &&
2306                  parent->core.live_count <= count);
2307
2308         /*
2309          * If no free blockref could be found we must create an indirect
2310          * block and move a number of blockrefs into it.  With the parent
2311          * locked we can safely lock each child in order to delete+duplicate
2312          * it without causing a deadlock.
2313          *
2314          * This may return the new indirect block or the old parent depending
2315          * on where the key falls.  NULL is returned on error.
2316          */
2317         if (parent->core.live_count == count) {
2318                 hammer2_chain_t *nparent;
2319
2320                 nparent = hammer2_chain_create_indirect(trans, parent,
2321                                                         key, keybits,
2322                                                         type, &error);
2323                 if (nparent == NULL) {
2324                         if (allocated)
2325                                 hammer2_chain_drop(chain);
2326                         chain = NULL;
2327                         goto done;
2328                 }
2329                 if (parent != nparent) {
2330                         hammer2_chain_unlock(parent);
2331                         parent = *parentp = nparent;
2332                 }
2333                 goto again;
2334         }
2335
2336         /*
2337          * Link the chain into its parent.
2338          */
2339         if (chain->parent != NULL)
2340                 panic("hammer2: hammer2_chain_create: chain already connected");
2341         KKASSERT(chain->parent == NULL);
2342         hammer2_chain_insert(parent, chain,
2343                              HAMMER2_CHAIN_INSERT_SPIN |
2344                              HAMMER2_CHAIN_INSERT_LIVE,
2345                              0);
2346
2347         if (allocated) {
2348                 /*
2349                  * Mark the newly created chain modified.  This will cause
2350                  * UPDATE to be set.
2351                  *
2352                  * Device buffers are not instantiated for DATA elements
2353                  * as these are handled by logical buffers.
2354                  *
2355                  * Indirect and freemap node indirect blocks are handled
2356                  * by hammer2_chain_create_indirect() and not by this
2357                  * function.
2358                  *
2359                  * Data for all other bref types is expected to be
2360                  * instantiated (INODE, LEAF).
2361                  */
2362                 switch(chain->bref.type) {
2363                 case HAMMER2_BREF_TYPE_DATA:
2364                 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2365                 case HAMMER2_BREF_TYPE_INODE:
2366                         hammer2_chain_modify(trans, chain,
2367                                              HAMMER2_MODIFY_OPTDATA);
2368                         break;
2369                 default:
2370                         /*
2371                          * Remaining types are not supported by this function.
2372                          * In particular, INDIRECT and LEAF_NODE types are
2373                          * handled by create_indirect().
2374                          */
2375                         panic("hammer2_chain_create: bad type: %d",
2376                               chain->bref.type);
2377                         /* NOT REACHED */
2378                         break;
2379                 }
2380         } else {
2381                 /*
2382                  * When reconnecting a chain we must set UPDATE and
2383                  * setflush so the flush recognizes that it must update
2384                  * the bref in the parent.
2385                  */
2386                 if ((chain->flags & HAMMER2_CHAIN_UPDATE) == 0) {
2387                         hammer2_chain_ref(chain);
2388                         atomic_set_int(&chain->flags, HAMMER2_CHAIN_UPDATE);
2389                 }
2390         }
2391
2392         /*
2393          * We must setflush(parent) to ensure that it recurses through to
2394          * chain.  setflush(chain) might not work because ONFLUSH is possibly
2395          * already set in the chain (so it won't recurse up to set it in the
2396          * parent).
2397          */
2398         hammer2_chain_setflush(trans, parent);
2399
2400 done:
2401         *chainp = chain;
2402
2403         return (error);
2404 }
2405
2406 /*
2407  * Move the chain from its old parent to a new parent.  The chain must have
2408  * already been deleted or already disconnected (or never associated) with
2409  * a parent.  The chain is reassociated with the new parent and the deleted
2410  * flag will be cleared (no longer deleted).  The chain's modification state
2411  * is not altered.
2412  *
2413  * THE CALLER MUST HAVE ALREADY PROPERLY SEEKED (parent) TO THE INSERTION
2414  * POINT SANS ANY REQUIRED INDIRECT BLOCK CREATIONS DUE TO THE ARRAY BEING
2415  * FULL.  This typically means that the caller is creating the chain after
2416  * doing a hammer2_chain_lookup().
2417  *
2418  * A non-NULL bref is typically passed when key and keybits must be overridden.
2419  * Note that hammer2_cluster_duplicate() *ONLY* uses the key and keybits fields
2420  * from a passed-in bref and uses the old chain's bref for everything else.
2421  *
2422  * If (parent) is non-NULL then the new duplicated chain is inserted under
2423  * the parent.
2424  *
2425  * If (parent) is NULL then the newly duplicated chain is not inserted
2426  * anywhere, similar to if it had just been chain_alloc()'d (suitable for
2427  * passing into hammer2_chain_create() after this function returns).
2428  *
2429  * WARNING! This function calls create which means it can insert indirect
2430  *          blocks.  This can cause other unrelated chains in the parent to
2431  *          be moved to a newly inserted indirect block in addition to the
2432  *          specific chain.
2433  */
2434 void
2435 hammer2_chain_rename(hammer2_trans_t *trans, hammer2_blockref_t *bref,
2436                      hammer2_chain_t **parentp, hammer2_chain_t *chain)
2437 {
2438         hammer2_mount_t *hmp;
2439         hammer2_chain_t *parent;
2440         size_t bytes;
2441
2442         /*
2443          * WARNING!  We should never resolve DATA to device buffers
2444          *           (XXX allow it if the caller did?), and since
2445          *           we currently do not have the logical buffer cache
2446          *           buffer in-hand to fix its cached physical offset
2447          *           we also force the modify code to not COW it. XXX
2448          */
2449         hmp = chain->hmp;
2450         KKASSERT(chain->parent == NULL);
2451
2452         /*
2453          * Now create a duplicate of the chain structure, associating
2454          * it with the same core, making it the same size, pointing it
2455          * to the same bref (the same media block).
2456          */
2457         if (bref == NULL)
2458                 bref = &chain->bref;
2459         bytes = (hammer2_off_t)1 <<
2460                 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2461
2462         /*
2463          * If parent is not NULL the duplicated chain will be entered under
2464          * the parent and the UPDATE bit set to tell flush to update
2465          * the blockref.
2466          *
2467          * We must setflush(parent) to ensure that it recurses through to
2468          * chain.  setflush(chain) might not work because ONFLUSH is possibly
2469          * already set in the chain (so it won't recurse up to set it in the
2470          * parent).
2471          *
2472          * Having both chains locked is extremely important for atomicy.
2473          */
2474         if (parentp && (parent = *parentp) != NULL) {
2475                 KKASSERT(ccms_thread_lock_owned(&parent->core.cst));
2476                 KKASSERT(parent->refs > 0);
2477
2478                 hammer2_chain_create(trans, parentp, &chain, chain->pmp,
2479                                      bref->key, bref->keybits, bref->type,
2480                                      chain->bytes);
2481                 KKASSERT(chain->flags & HAMMER2_CHAIN_UPDATE);
2482                 hammer2_chain_setflush(trans, *parentp);
2483         }
2484 }
2485
2486 /*
2487  * Helper function for deleting chains.
2488  *
2489  * The chain is removed from the live view (the RBTREE) as well as the parent's
2490  * blockmap.  Both chain and its parent must be locked.
2491  */
2492 static void
2493 _hammer2_chain_delete_helper(hammer2_trans_t *trans,
2494                              hammer2_chain_t *parent, hammer2_chain_t *chain)
2495 {
2496         hammer2_mount_t *hmp;
2497
2498         KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2499         hmp = chain->hmp;
2500
2501         if (chain->flags & HAMMER2_CHAIN_BMAPPED) {
2502                 /*
2503                  * Chain is blockmapped, so there must be a parent.
2504                  * Atomically remove the chain from the parent and remove
2505                  * the blockmap entry.
2506                  */
2507                 hammer2_blockref_t *base;
2508                 int count;
2509
2510                 KKASSERT(parent != NULL);
2511                 KKASSERT((parent->flags & HAMMER2_CHAIN_INITIAL) == 0);
2512                 hammer2_chain_modify(trans, parent,
2513                                      HAMMER2_MODIFY_OPTDATA);
2514
2515                 /*
2516                  * Calculate blockmap pointer
2517                  */
2518                 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
2519                 spin_lock(&parent->core.cst.spin);
2520
2521                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2522                 atomic_add_int(&parent->core.live_count, -1);
2523                 ++parent->core.generation;
2524                 RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain);
2525                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2526                 --parent->core.chain_count;
2527                 chain->parent = NULL;
2528
2529                 switch(parent->bref.type) {
2530                 case HAMMER2_BREF_TYPE_INODE:
2531                         /*
2532                          * Access the inode's block array.  However, there
2533                          * is no block array if the inode is flagged
2534                          * DIRECTDATA.  The DIRECTDATA case typicaly only
2535                          * occurs when a hardlink has been shifted up the
2536                          * tree and the original inode gets replaced with
2537                          * an OBJTYPE_HARDLINK placeholding inode.
2538                          */
2539                         if (parent->data &&
2540                             (parent->data->ipdata.op_flags &
2541                              HAMMER2_OPFLAG_DIRECTDATA) == 0) {
2542                                 base =
2543                                    &parent->data->ipdata.u.blockset.blockref[0];
2544                         } else {
2545                                 base = NULL;
2546                         }
2547                         count = HAMMER2_SET_COUNT;
2548                         break;
2549                 case HAMMER2_BREF_TYPE_INDIRECT:
2550                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2551                         if (parent->data)
2552                                 base = &parent->data->npdata[0];
2553                         else
2554                                 base = NULL;
2555                         count = parent->bytes / sizeof(hammer2_blockref_t);
2556                         break;
2557                 case HAMMER2_BREF_TYPE_VOLUME:
2558                         base = &hmp->voldata.sroot_blockset.blockref[0];
2559                         count = HAMMER2_SET_COUNT;
2560                         break;
2561                 case HAMMER2_BREF_TYPE_FREEMAP:
2562                         base = &parent->data->npdata[0];
2563                         count = HAMMER2_SET_COUNT;
2564                         break;
2565                 default:
2566                         base = NULL;
2567                         count = 0;
2568                         panic("hammer2_flush_pass2: "
2569                               "unrecognized blockref type: %d",
2570                               parent->bref.type);
2571                 }
2572                 if (base) {
2573                         int cache_index = -1;
2574                         hammer2_base_delete(trans, parent, base, count,
2575                                             &cache_index, chain);
2576                 }
2577                 spin_unlock(&parent->core.cst.spin);
2578         } else if (chain->flags & HAMMER2_CHAIN_ONRBTREE) {
2579                 /*
2580                  * Chain is not blockmapped but a parent is present.
2581                  * Atomically remove the chain from the parent.  There is
2582                  * no blockmap entry to remove.
2583                  */
2584                 spin_lock(&parent->core.cst.spin);
2585                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2586                 atomic_add_int(&parent->core.live_count, -1);
2587                 ++parent->core.generation;
2588                 RB_REMOVE(hammer2_chain_tree, &parent->core.rbtree, chain);
2589                 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2590                 --parent->core.chain_count;
2591                 chain->parent = NULL;
2592                 spin_unlock(&parent->core.cst.spin);
2593         } else {
2594                 /*
2595                  * Chain is not blockmapped and has no parent.  This
2596                  * is a degenerate case.
2597                  */
2598                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
2599         }
2600 }
2601
2602 /*
2603  * Create an indirect block that covers one or more of the elements in the
2604  * current parent.  Either returns the existing parent with no locking or
2605  * ref changes or returns the new indirect block locked and referenced
2606  * and leaving the original parent lock/ref intact as well.
2607  *
2608  * If an error occurs, NULL is returned and *errorp is set to the error.
2609  *
2610  * The returned chain depends on where the specified key falls.
2611  *
2612  * The key/keybits for the indirect mode only needs to follow three rules:
2613  *
2614  * (1) That all elements underneath it fit within its key space and
2615  *
2616  * (2) That all elements outside it are outside its key space.
2617  *
2618  * (3) When creating the new indirect block any elements in the current
2619  *     parent that fit within the new indirect block's keyspace must be
2620  *     moved into the new indirect block.
2621  *
2622  * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2623  *     keyspace the the current parent, but lookup/iteration rules will
2624  *     ensure (and must ensure) that rule (2) for all parents leading up
2625  *     to the nearest inode or the root volume header is adhered to.  This
2626  *     is accomplished by always recursing through matching keyspaces in
2627  *     the hammer2_chain_lookup() and hammer2_chain_next() API.
2628  *
2629  * The current implementation calculates the current worst-case keyspace by
2630  * iterating the current parent and then divides it into two halves, choosing
2631  * whichever half has the most elements (not necessarily the half containing
2632  * the requested key).
2633  *
2634  * We can also opt to use the half with the least number of elements.  This
2635  * causes lower-numbered keys (aka logical file offsets) to recurse through
2636  * fewer indirect blocks and higher-numbered keys to recurse through more.
2637  * This also has the risk of not moving enough elements to the new indirect
2638  * block and being forced to create several indirect blocks before the element
2639  * can be inserted.
2640  *
2641  * Must be called with an exclusively locked parent.
2642  */
2643 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
2644                                 hammer2_key_t *keyp, int keybits,
2645                                 hammer2_blockref_t *base, int count);
2646 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
2647                                 hammer2_key_t *keyp, int keybits,
2648                                 hammer2_blockref_t *base, int count);
2649 static
2650 hammer2_chain_t *
2651 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2652                               hammer2_key_t create_key, int create_bits,
2653                               int for_type, int *errorp)
2654 {
2655         hammer2_mount_t *hmp;
2656         hammer2_blockref_t *base;
2657         hammer2_blockref_t *bref;
2658         hammer2_blockref_t bcopy;
2659         hammer2_chain_t *chain;
2660         hammer2_chain_t *ichain;
2661         hammer2_chain_t dummy;
2662         hammer2_key_t key = create_key;
2663         hammer2_key_t key_beg;
2664         hammer2_key_t key_end;
2665         hammer2_key_t key_next;
2666         int keybits = create_bits;
2667         int count;
2668         int nbytes;
2669         int cache_index;
2670         int loops;
2671         int reason;
2672         int generation;
2673         int maxloops = 300000;
2674
2675         /*
2676          * Calculate the base blockref pointer or NULL if the chain
2677          * is known to be empty.  We need to calculate the array count
2678          * for RB lookups either way.
2679          */
2680         hmp = parent->hmp;
2681         *errorp = 0;
2682         KKASSERT(ccms_thread_lock_owned(&parent->core.cst));
2683
2684         /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
2685         if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2686                 base = NULL;
2687
2688                 switch(parent->bref.type) {
2689                 case HAMMER2_BREF_TYPE_INODE:
2690                         count = HAMMER2_SET_COUNT;
2691                         break;
2692                 case HAMMER2_BREF_TYPE_INDIRECT:
2693                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2694                         count = parent->bytes / sizeof(hammer2_blockref_t);
2695                         break;
2696                 case HAMMER2_BREF_TYPE_VOLUME:
2697                         count = HAMMER2_SET_COUNT;
2698                         break;
2699                 case HAMMER2_BREF_TYPE_FREEMAP:
2700                         count = HAMMER2_SET_COUNT;
2701                         break;
2702                 default:
2703                         panic("hammer2_chain_create_indirect: "
2704                               "unrecognized blockref type: %d",
2705                               parent->bref.type);
2706                         count = 0;
2707                         break;
2708                 }
2709         } else {
2710                 switch(parent->bref.type) {
2711                 case HAMMER2_BREF_TYPE_INODE:
2712                         base = &parent->data->ipdata.u.blockset.blockref[0];
2713                         count = HAMMER2_SET_COUNT;
2714                         break;
2715                 case HAMMER2_BREF_TYPE_INDIRECT:
2716                 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2717                         base = &parent->data->npdata[0];
2718                         count = parent->bytes / sizeof(hammer2_blockref_t);
2719                         break;
2720                 case HAMMER2_BREF_TYPE_VOLUME:
2721                         base = &hmp->voldata.sroot_blockset.blockref[0];
2722                         count = HAMMER2_SET_COUNT;
2723                         break;
2724                 case HAMMER2_BREF_TYPE_FREEMAP:
2725                         base = &hmp->voldata.freemap_blockset.blockref[0];
2726                         count = HAMMER2_SET_COUNT;
2727                         break;
2728                 default:
2729                         panic("hammer2_chain_create_indirect: "
2730                               "unrecognized blockref type: %d",
2731                               parent->bref.type);
2732                         count = 0;
2733                         break;
2734                 }
2735         }
2736
2737         /*
2738          * dummy used in later chain allocation (no longer used for lookups).
2739          */
2740         bzero(&dummy, sizeof(dummy));
2741
2742         /*
2743          * When creating an indirect block for a freemap node or leaf
2744          * the key/keybits must be fitted to static radix levels because
2745          * particular radix levels use particular reserved blocks in the
2746          * related zone.
2747          *
2748          * This routine calculates the key/radix of the indirect block
2749          * we need to create, and whether it is on the high-side or the
2750          * low-side.
2751          */
2752         if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2753             for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2754                 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
2755                                                        base, count);
2756         } else {
2757                 keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
2758                                                       base, count);
2759         }
2760
2761         /*
2762          * Normalize the key for the radix being represented, keeping the
2763          * high bits and throwing away the low bits.
2764          */
2765         key &= ~(((hammer2_key_t)1 << keybits) - 1);
2766
2767         /*
2768          * How big should our new indirect block be?  It has to be at least
2769          * as large as its parent.
2770          */
2771         if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2772                 nbytes = HAMMER2_IND_BYTES_MIN;
2773         else
2774                 nbytes = HAMMER2_IND_BYTES_MAX;
2775         if (nbytes < count * sizeof(hammer2_blockref_t))
2776                 nbytes = count * sizeof(hammer2_blockref_t);
2777
2778         /*
2779          * Ok, create our new indirect block
2780          */
2781         if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2782             for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2783                 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2784         } else {
2785                 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2786         }
2787         dummy.bref.key = key;
2788         dummy.bref.keybits = keybits;
2789         dummy.bref.data_off = hammer2_getradix(nbytes);
2790         dummy.bref.methods = parent->bref.methods;
2791
2792         ichain = hammer2_chain_alloc(hmp, parent->pmp, trans, &dummy.bref);
2793         atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2794         hammer2_chain_core_alloc(trans, ichain);
2795         hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2796         hammer2_chain_drop(ichain);     /* excess ref from alloc */
2797
2798         /*
2799          * We have to mark it modified to allocate its block, but use
2800          * OPTDATA to allow it to remain in the INITIAL state.  Otherwise
2801          * it won't be acted upon by the flush code.
2802          */
2803         hammer2_chain_modify(trans, ichain, HAMMER2_MODIFY_OPTDATA);
2804
2805         /*
2806          * Iterate the original parent and move the matching brefs into
2807          * the new indirect block.
2808          *
2809          * XXX handle flushes.
2810          */
2811         key_beg = 0;
2812         key_end = HAMMER2_KEY_MAX;
2813         cache_index = 0;
2814         spin_lock(&parent->core.cst.spin);
2815         loops = 0;
2816         reason = 0;
2817
2818         for (;;) {
2819                 if (++loops > 100000) {
2820                     spin_unlock(&parent->core.cst.spin);
2821                     panic("excessive loops r=%d p=%p base/count %p:%d %016jx\n",
2822                           reason, parent, base, count, key_next);
2823                 }
2824
2825                 /*
2826                  * NOTE: spinlock stays intact, returned chain (if not NULL)
2827                  *       is not referenced or locked which means that we
2828                  *       cannot safely check its flagged / deletion status
2829                  *       until we lock it.
2830                  */
2831                 chain = hammer2_combined_find(parent, base, count,
2832                                               &cache_index, &key_next,
2833                                               key_beg, key_end,
2834                                               &bref);
2835                 generation = parent->core.generation;
2836                 if (bref == NULL)
2837                         break;
2838                 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
2839
2840                 /*
2841                  * Skip keys that are not within the key/radix of the new
2842                  * indirect block.  They stay in the parent.
2843                  */
2844                 if ((~(((hammer2_key_t)1 << keybits) - 1) &
2845                     (key ^ bref->key)) != 0) {
2846                         goto next_key_spinlocked;
2847                 }
2848
2849                 /*
2850                  * Load the new indirect block by acquiring the related
2851                  * chains (potentially from media as it might not be
2852                  * in-memory).  Then move it to the new parent (ichain)
2853                  * via DELETE-DUPLICATE.
2854                  *
2855                  * chain is referenced but not locked.  We must lock the
2856                  * chain to obtain definitive DUPLICATED/DELETED state
2857                  */
2858                 if (chain) {
2859                         /*
2860                          * Use chain already present in the RBTREE
2861                          */
2862                         hammer2_chain_ref(chain);
2863                         spin_unlock(&parent->core.cst.spin);
2864                         hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
2865                                                   HAMMER2_RESOLVE_NOREF);
2866                 } else {
2867                         /*
2868                          * Get chain for blockref element.  _get returns NULL
2869                          * on insertion race.
2870                          */
2871                         bcopy = *bref;
2872                         spin_unlock(&parent->core.cst.spin);
2873                         chain = hammer2_chain_get(parent, generation, &bcopy);
2874                         if (chain == NULL) {
2875                                 reason = 1;
2876                                 spin_lock(&parent->core.cst.spin);
2877                                 continue;
2878                         }
2879                         if (bcmp(&bcopy, bref, sizeof(bcopy))) {
2880                                 kprintf("REASON 2\n");
2881                                 reason = 2;
2882                                 hammer2_chain_drop(chain);
2883                                 spin_lock(&parent->core.cst.spin);
2884                                 continue;
2885                         }
2886                         hammer2_chain_lock(chain, HAMMER2_RESOLVE_NEVER |
2887                                                   HAMMER2_RESOLVE_NOREF);
2888                 }
2889
2890                 /*
2891                  * This is always live so if the chain has been deleted
2892                  * we raced someone and we have to retry.
2893                  *
2894                  * NOTE: Lookups can race delete-duplicate because
2895                  *       delete-duplicate does not lock the parent's core
2896                  *       (they just use the spinlock on the core).  We must
2897                  *       check for races by comparing the DUPLICATED flag before
2898                  *       releasing the spinlock with the flag after locking the
2899                  *       chain.
2900                  *
2901                  *       (note reversed logic for this one)
2902                  */
2903                 if (chain->flags & HAMMER2_CHAIN_DELETED) {
2904                         hammer2_chain_unlock(chain);
2905                         goto next_key;
2906                 }
2907
2908                 /*
2909                  * Shift the chain to the indirect block.
2910                  *
2911                  * WARNING! Can cause held-over chains to require a refactor.
2912                  *          Fortunately we have none (our locked chains are
2913                  *          passed into and modified by the call).
2914                  */
2915                 hammer2_chain_delete(trans, parent, chain, 0);
2916                 hammer2_chain_rename(trans, NULL, &ichain, chain);
2917                 hammer2_chain_unlock(chain);
2918                 KKASSERT(parent->refs > 0);
2919                 chain = NULL;
2920 next_key:
2921                 spin_lock(&parent->core.cst.spin);
2922 next_key_spinlocked:
2923                 if (--maxloops == 0)
2924                         panic("hammer2_chain_create_indirect: maxloops");
2925                 reason = 4;
2926                 if (key_next == 0 || key_next > key_end)
2927                         break;
2928                 key_beg = key_next;
2929                 /* loop */
2930         }
2931         spin_unlock(&parent->core.cst.spin);
2932
2933         /*
2934          * Insert the new indirect block into the parent now that we've
2935          * cleared out some entries in the parent.  We calculated a good
2936          * insertion index in the loop above (ichain->index).
2937          *
2938          * We don't have to set UPDATE here because we mark ichain
2939          * modified down below (so the normal modified -> flush -> set-moved
2940          * sequence applies).
2941          *
2942          * The insertion shouldn't race as this is a completely new block
2943          * and the parent is locked.
2944          */
2945         KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
2946         hammer2_chain_insert(parent, ichain,
2947                              HAMMER2_CHAIN_INSERT_SPIN |
2948                              HAMMER2_CHAIN_INSERT_LIVE,
2949                              0);
2950
2951         /*
2952          * Make sure flushes propogate after our manual insertion.
2953          */
2954         hammer2_chain_setflush(trans, ichain);
2955         hammer2_chain_setflush(trans, parent);
2956
2957         /*
2958          * Figure out what to return.
2959          */
2960         if (~(((hammer2_key_t)1 << keybits) - 1) &
2961                    (create_key ^ key)) {
2962                 /*
2963                  * Key being created is outside the key range,
2964                  * return the original parent.
2965                  */
2966                 hammer2_chain_unlock(ichain);
2967         } else {
2968                 /*
2969                  * Otherwise its in the range, return the new parent.
2970                  * (leave both the new and old parent locked).
2971                  */
2972                 parent = ichain;
2973         }
2974
2975         return(parent);
2976 }
2977
2978 /*
2979  * Calculate the keybits and highside/lowside of the freemap node the
2980  * caller is creating.
2981  *
2982  * This routine will specify the next higher-level freemap key/radix
2983  * representing the lowest-ordered set.  By doing so, eventually all
2984  * low-ordered sets will be moved one level down.
2985  *
2986  * We have to be careful here because the freemap reserves a limited
2987  * number of blocks for a limited number of levels.  So we can't just
2988  * push indiscriminately.
2989  */
2990 int
2991 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
2992                              int keybits, hammer2_blockref_t *base, int count)
2993 {
2994         hammer2_chain_t *chain;
2995         hammer2_blockref_t *bref;
2996         hammer2_key_t key;
2997         hammer2_key_t key_beg;
2998         hammer2_key_t key_end;
2999         hammer2_key_t key_next;
3000         int cache_index;
3001         int locount;
3002         int hicount;
3003         int maxloops = 300000;
3004
3005         key = *keyp;
3006         locount = 0;
3007         hicount = 0;
3008         keybits = 64;
3009
3010         /*
3011          * Calculate the range of keys in the array being careful to skip
3012          * slots which are overridden with a deletion.
3013          */
3014         key_beg = 0;
3015         key_end = HAMMER2_KEY_MAX;
3016         cache_index = 0;
3017         spin_lock(&parent->core.cst.spin);
3018
3019         for (;;) {
3020                 if (--maxloops == 0) {
3021                         panic("indkey_freemap shit %p %p:%d\n",
3022                               parent, base, count);
3023                 }
3024                 chain = hammer2_combined_find(parent, base, count,
3025                                               &cache_index, &key_next,
3026                                               key_beg, key_end,
3027                                               &bref);
3028
3029                 /*
3030                  * Exhausted search
3031                  */
3032                 if (bref == NULL)
3033                         break;
3034
3035                 /*
3036                  * Skip deleted chains.
3037                  */
3038                 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3039                         if (key_next == 0 || key_next > key_end)
3040                                 break;
3041                         key_beg = key_next;
3042                         continue;
3043                 }
3044
3045                 /*
3046                  * Use the full live (not deleted) element for the scan
3047                  * iteration.  HAMMER2 does not allow partial replacements.
3048                  *
3049                  * XXX should be built into hammer2_combined_find().
3050                  */
3051                 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3052
3053                 if (keybits > bref->keybits) {
3054                         key = bref->key;
3055                         keybits = bref->keybits;
3056                 } else if (keybits == bref->keybits && bref->key < key) {
3057                         key = bref->key;
3058                 }
3059                 if (key_next == 0)
3060                         break;
3061                 key_beg = key_next;
3062         }
3063         spin_unlock(&parent->core.cst.spin);
3064
3065         /*
3066          * Return the keybits for a higher-level FREEMAP_NODE covering
3067          * this node.
3068          */
3069         switch(keybits) {
3070         case HAMMER2_FREEMAP_LEVEL0_RADIX:
3071                 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3072                 break;
3073         case HAMMER2_FREEMAP_LEVEL1_RADIX:
3074                 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3075                 break;
3076         case HAMMER2_FREEMAP_LEVEL2_RADIX:
3077                 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3078                 break;
3079         case HAMMER2_FREEMAP_LEVEL3_RADIX:
3080                 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3081                 break;
3082         case HAMMER2_FREEMAP_LEVEL4_RADIX:
3083                 panic("hammer2_chain_indkey_freemap: level too high");
3084                 break;
3085         default:
3086                 panic("hammer2_chain_indkey_freemap: bad radix");
3087                 break;
3088         }
3089         *keyp = key;
3090
3091         return (keybits);
3092 }
3093
3094 /*
3095  * Calculate the keybits and highside/lowside of the indirect block the
3096  * caller is creating.
3097  */
3098 static int
3099 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3100                             int keybits, hammer2_blockref_t *base, int count)
3101 {
3102         hammer2_blockref_t *bref;
3103         hammer2_chain_t *chain;
3104         hammer2_key_t key_beg;
3105         hammer2_key_t key_end;
3106         hammer2_key_t key_next;
3107         hammer2_key_t key;
3108         int nkeybits;
3109         int locount;
3110         int hicount;
3111         int cache_index;
3112         int maxloops = 300000;
3113
3114         key = *keyp;
3115         locount = 0;
3116         hicount = 0;
3117
3118         /*
3119          * Calculate the range of keys in the array being careful to skip
3120          * slots which are overridden with a deletion.  Once the scan
3121          * completes we will cut the key range in half and shift half the
3122          * range into the new indirect block.
3123          */
3124         key_beg = 0;
3125         key_end = HAMMER2_KEY_MAX;
3126         cache_index = 0;
3127         spin_lock(&parent->core.cst.spin);
3128
3129         for (;;) {
3130                 if (--maxloops == 0) {
3131                         panic("indkey_freemap shit %p %p:%d\n",
3132                               parent, base, count);
3133                 }
3134                 chain = hammer2_combined_find(parent, base, count,
3135                                               &cache_index, &key_next,
3136                                               key_beg, key_end,
3137                                               &bref);
3138
3139                 /*
3140                  * Exhausted search
3141                  */
3142                 if (bref == NULL)
3143                         break;
3144
3145                 /*
3146                  * NOTE: No need to check DUPLICATED here because we do
3147                  *       not release the spinlock.
3148                  */
3149                 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED)) {
3150                         if (key_next == 0 || key_next > key_end)
3151                                 break;
3152                         key_beg = key_next;
3153                         continue;
3154                 }
3155
3156                 /*
3157                  * Use the full live (not deleted) element for the scan
3158                  * iteration.  HAMMER2 does not allow partial replacements.
3159                  *
3160                  * XXX should be built into hammer2_combined_find().
3161                  */
3162                 key_next = bref->key + ((hammer2_key_t)1 << bref->keybits);
3163
3164                 /*
3165                  * Expand our calculated key range (key, keybits) to fit
3166                  * the scanned key.  nkeybits represents the full range
3167                  * that we will later cut in half (two halves @ nkeybits - 1).
3168                  */
3169                 nkeybits = keybits;
3170                 if (nkeybits < bref->keybits) {
3171                         if (bref->keybits > 64) {
3172                                 kprintf("bad bref chain %p bref %p\n",
3173                                         chain, bref);
3174                                 Debugger("fubar");
3175                         }
3176                         nkeybits = bref->keybits;
3177                 }
3178                 while (nkeybits < 64 &&
3179                        (~(((hammer2_key_t)1 << nkeybits) - 1) &
3180                         (key ^ bref->key)) != 0) {
3181                         ++nkeybits;
3182                 }
3183
3184                 /*
3185                  * If the new key range is larger we have to determine
3186                  * which side of the new key range the existing keys fall
3187                  * under by checking the high bit, then collapsing the
3188                  * locount into the hicount or vise-versa.
3189                  */
3190                 if (keybits != nkeybits) {
3191                         if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3192                                 hicount += locount;
3193                                 locount = 0;
3194                         } else {
3195                                 locount += hicount;
3196                                 hicount = 0;
3197                         }
3198                         keybits = nkeybits;
3199                 }
3200
3201                 /*
3202                  * The newly scanned key will be in the lower half or the
3203                  * upper half of the (new) key range.
3204                  */
3205                 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3206                         ++hicount;
3207                 else
3208                         ++locount;
3209
3210                 if (key_next == 0)
3211                         break;
3212                 key_beg = key_next;
3213         }
3214         spin_unlock(&parent->core.cst.spin);
3215         bref = NULL;    /* now invalid (safety) */
3216
3217         /*
3218          * Adjust keybits to represent half of the full range calculated
3219          * above (radix 63 max)
3220          */
3221         --keybits;
3222
3223         /*
3224          * Select whichever half contains the most elements.  Theoretically
3225          * we can select either side as long as it contains at least one
3226          * element (in order to ensure that a free slot is present to hold
3227          * the indirect block).
3228          */
3229         if (hammer2_indirect_optimize) {
3230                 /*
3231                  * Insert node for least number of keys, this will arrange
3232                  * the first few blocks of a large file or the first few
3233                  * inodes in a directory with fewer indirect blocks when
3234                  * created linearly.
3235                  */
3236                 if (hicount < locount && hicount != 0)
3237                         key |= (hammer2_key_t)1 << keybits;
3238                 else
3239                         key &= ~(hammer2_key_t)1 << keybits;
3240         } else {
3241                 /*
3242                  * Insert node for most number of keys, best for heavily
3243                  * fragmented files.
3244                  */
3245                 if (hicount > locount)
3246                         key |= (hammer2_key_t)1 << keybits;
3247                 else
3248                         key &= ~(hammer2_key_t)1 << keybits;
3249         }
3250         *keyp = key;
3251
3252         return (keybits);
3253 }
3254
3255 /*
3256  * Sets CHAIN_DELETED and remove the chain's blockref from the parent if
3257  * it exists.
3258  *
3259  * Both parent and chain must be locked exclusively.
3260  *
3261  * This function will modify the parent if the blockref requires removal
3262  * from the parent's block table.
3263  *
3264  * This function is NOT recursive.  Any entity already pushed into the
3265  * chain (such as an inode) may still need visibility into its contents,
3266  * as well as the ability to read and modify the contents.  For example,
3267  * for an unlinked file which is still open.
3268  */
3269 void
3270 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
3271                      hammer2_chain_t *chain, int flags)
3272 {
3273         KKASSERT(ccms_thread_lock_owned(&chain->core.cst));
3274
3275         /*
3276          * Nothing to do if already marked.
3277          *
3278          * We need the spinlock on the core whos RBTREE contains chain
3279          * to protect against races.
3280          */
3281         if ((chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
3282                 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0 &&
3283                          chain->parent == parent);
3284                 _hammer2_chain_delete_helper(trans, parent, chain);
3285         }
3286
3287         if (flags & HAMMER2_DELETE_PERMANENT) {
3288                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROY);
3289                 hammer2_flush(trans, chain);
3290         } else {
3291                 /* XXX might not be needed */
3292                 hammer2_chain_setflush(trans, chain);
3293         }
3294 }
3295
3296 /*
3297  * Returns the index of the nearest element in the blockref array >= elm.
3298  * Returns (count) if no element could be found.
3299  *
3300  * Sets *key_nextp to the next key for loop purposes but does not modify
3301  * it if the next key would be higher than the current value of *key_nextp.
3302  * Note that *key_nexp can overflow to 0, which should be tested by the
3303  * caller.
3304  *
3305  * (*cache_indexp) is a heuristic and can be any value without effecting
3306  * the result.
3307  *
3308  * WARNING!  Must be called with parent's spinlock held.  Spinlock remains
3309  *           held through the operation.
3310  */
3311 static int
3312 hammer2_base_find(hammer2_chain_t *parent,
3313                   hammer2_blockref_t *base, int count,
3314                   int *cache_indexp, hammer2_key_t *key_nextp,
3315                   hammer2_key_t key_beg, hammer2_key_t key_end)
3316 {
3317         hammer2_blockref_t *scan;
3318         hammer2_key_t scan_end;
3319         int i;
3320         int limit;
3321
3322         /*
3323          * Require the live chain's already have their core's counted
3324          * so we can optimize operations.
3325          */
3326         KKASSERT(parent->core.flags & HAMMER2_CORE_COUNTEDBREFS);
3327
3328         /*
3329          * Degenerate case
3330          */
3331         if (count == 0 || base == NULL)
3332                 return(count);
3333
3334         /*
3335          * Sequential optimization using *cache_indexp.  This is the most
3336          * likely scenario.
3337          *
3338          * We can avoid trailing empty entries on live chains, otherwise
3339          * we might have to check the whole block array.
3340          */
3341         i = *cache_indexp;
3342         cpu_ccfence();
3343         limit = parent->core.live_zero;
3344         if (i >= limit)
3345                 i = limit - 1;
3346         if (i < 0)
3347                 i = 0;
3348         KKASSERT(i < count);
3349
3350         /*
3351          * Search backwards
3352          */
3353         scan = &base[i];
3354         while (i > 0 && (scan->type == 0 || scan->key > key_beg)) {
3355                 --scan;
3356                 --i;
3357         }
3358         *cache_indexp = i;
3359
3360         /*
3361          * Search forwards, stop when we find a scan element which
3362          * encloses the key or until we know that there are no further
3363          * elements.
3364          */
3365         while (i < count) {
3366                 if (scan->type != 0) {
3367                         scan_end = scan->key +
3368                                    ((hammer2_key_t)1 << scan->keybits) - 1;
3369                         if (scan->key > key_beg || scan_end >= key_beg)
3370                                 break;
3371                 }
3372                 if (i >= limit)
3373                         return (count);
3374                 ++scan;
3375                 ++i;
3376         }
3377         if (i != count) {
3378                 *cache_indexp = i;
3379                 if (i >= limit) {
3380                         i = count;
3381                 } else {
3382                         scan_end = scan->key +
3383                                    ((hammer2_key_t)1 << scan->keybits);
3384                         if (scan_end && (*key_nextp > scan_end ||
3385                                          *key_nextp == 0)) {
3386                                 *key_nextp = scan_end;
3387                         }
3388                 }
3389         }
3390         return (i);
3391 }
3392
3393 /*
3394  * Do a combined search and return the next match either from the blockref
3395  * array or from the in-memory chain.  Sets *bresp to the returned bref in
3396  * both cases, or sets it to NULL if the search exhausted.  Only returns
3397  * a non-NULL chain if the search matched from the in-memory chain.
3398  *
3399  * When no in-memory chain has been found and a non-NULL bref is returned
3400  * in *bresp.
3401  *
3402  *
3403  * The returned chain is not locked or referenced.  Use the returned bref
3404  * to determine if the search exhausted or not.  Iterate if the base find
3405  * is chosen but matches a deleted chain.
3406  *
3407  * WARNING!  Must be called with parent's spinlock held.  Spinlock remains
3408  *           held through the operation.
3409  */
3410 static hammer2_chain_t *
3411 hammer2_combined_find(hammer2_chain_t *parent,
3412                       hammer2_blockref_t *base, int count,
3413                       int *cache_indexp, hammer2_key_t *key_nextp,
3414                       hammer2_key_t key_beg, hammer2_key_t key_end,
3415                       hammer2_blockref_t **bresp)
3416 {
3417         hammer2_blockref_t *bref;
3418         hammer2_chain_t *chain;
3419         int i;
3420
3421         /*
3422          * Lookup in block array and in rbtree.
3423          */
3424         *key_nextp = key_end + 1;
3425         i = hammer2_base_find(parent, base, count, cache_indexp,
3426                               key_nextp, key_beg, key_end);
3427         chain = hammer2_chain_find(parent, key_nextp, key_beg, key_end);
3428
3429         /*
3430          * Neither matched
3431          */
3432         if (i == count && chain == NULL) {
3433                 *bresp = NULL;
3434                 return(NULL);
3435         }
3436
3437         /*
3438          * Only chain matched.
3439          */
3440         if (i == count) {
3441                 bref = &chain->bref;
3442                 goto found;
3443         }
3444
3445         /*
3446          * Only blockref matched.
3447          */
3448         if (chain == NULL) {
3449                 bref = &base[i];
3450                 goto found;
3451         }
3452
3453         /*
3454          * Both in-memory and blockref matched, select the nearer element.
3455          *
3456          * If both are flush with the left-hand side or both are the
3457          * same distance away, select the chain.  In this situation the
3458          * chain must have been loaded from the matching blockmap.
3459          */
3460         if ((chain->bref.key <= key_beg && base[i].key <= key_beg) ||
3461             chain->bref.key == base[i].key) {
3462                 KKASSERT(chain->bref.key == base[i].key);
3463                 bref = &chain->bref;
3464                 goto found;
3465         }
3466
3467         /*
3468          * Select the nearer key
3469          */
3470         if (chain->bref.key < base[i].key) {
3471                 bref = &chain->bref;
3472         } else {
3473                 bref = &base[i];
3474                 chain = NULL;
3475         }
3476
3477         /*
3478          * If the bref is out of bounds we've exhausted our search.
3479          */
3480 found:
3481         if (bref->key > key_end) {
3482                 *bresp = NULL;
3483                 chain = NULL;
3484         } else {
3485                 *bresp = bref;
3486         }
3487         return(chain);
3488 }
3489
3490 /*
3491  * Locate the specified block array element and delete it.  The element
3492  * must exist.
3493  *
3494  * The spin lock on the related chain must be held.
3495  *
3496  * NOTE: live_count was adjusted when the chain was deleted, so it does not
3497  *       need to be adjusted when we commit the media change.
3498  */
3499 void
3500 hammer2_base_delete(hammer2_trans_t *trans, hammer2_chain_t *parent,
3501                     hammer2_blockref_t *base, int count,
3502                     int *cache_indexp, hammer2_chain_t *chain)
3503 {
3504         hammer2_blockref_t *elm = &chain->bref;
3505         hammer2_key_t key_next;
3506         int i;
3507
3508         /*
3509          * Delete element.  Expect the element to exist.
3510          *
3511          * XXX see caller, flush code not yet sophisticated enough to prevent
3512          *     re-flushed in some cases.
3513          */
3514         key_next = 0; /* max range */
3515         i = hammer2_base_find(parent, base, count, cache_indexp,
3516                               &key_next, elm->key, elm->key);
3517         if (i == count || base[i].type == 0 ||
3518             base[i].key != elm->key ||
3519             ((chain->flags & HAMMER2_CHAIN_BMAPUPD) == 0 &&
3520              base[i].keybits != elm->keybits)) {
3521                 spin_unlock(&parent->core.cst.spin);
3522                 panic("delete base %p element not found at %d/%d elm %p\n",
3523                       base, i, count, elm);
3524                 return;
3525         }
3526         bzero(&base[i], sizeof(*base));
3527
3528         /*
3529          * We can only optimize parent->core.live_zero for live chains.
3530          */
3531         if (parent->core.live_zero == i + 1) {
3532                 while (--i >= 0 && base[i].type == 0)
3533                         ;
3534                 parent->core.live_zero = i + 1;
3535         }
3536
3537         /*
3538          * Clear appropriate blockmap flags in chain.
3539          */
3540         atomic_clear_int(&chain->flags, HAMMER2_CHAIN_BMAPPED |
3541                                         HAMMER2_CHAIN_BMAPUPD);
3542 }
3543
3544 /*
3545  * Insert the specified element.  The block array must not already have the
3546  * element and must have space available for the insertion.
3547  *
3548  * The spin lock on the related chain must be held.
3549  *
3550  * NOTE: live_count was adjusted when the chain was deleted, so it does not
3551  *       need to be adjusted when we commit the media change.
3552  */
3553 void
3554 hammer2_base_insert(hammer2_trans_t *trans __unused, hammer2_chain_t *parent,
3555                     hammer2_blockref_t *base, int count,
3556                     int *cache_indexp, hammer2_chain_t *chain)
3557 {
3558         hammer2_blockref_t *elm = &chain->bref;
3559         hammer2_key_t key_next;
3560         hammer2_key_t xkey;
3561         int i;
3562         int j;
3563         int k;
3564         int l;
3565         int u = 1;
3566
3567         /*
3568          * Insert new element.  Expect the element to not already exist
3569          * unless we are replacing it.
3570          *
3571          * XXX see caller, flush code not yet sophisticated enough to prevent
3572          *     re-flushed in some cases.
3573          */
3574         key_next = 0; /* max range */
3575         i = hammer2_base_find(parent, base, count, cache_indexp,
3576                               &key_next, elm->key, elm->key);
3577
3578         /*
3579          * Shortcut fill optimization, typical ordered insertion(s) may not
3580          * require a search.
3581          */
3582         KKASSERT(i >= 0 && i <= count);
3583
3584         /*
3585          * Set appropriate blockmap flags in chain.
3586          */
3587         atomic_set_int(&chain->flags, HAMMER2_CHAIN_BMAPPED);
3588
3589         /*
3590          * We can only optimize parent->core.live_zero for live chains.
3591          */
3592         if (i == count && parent->core.live_zero < count) {
3593                 i = parent->core.live_zero++;
3594                 base[i] = *elm;
3595                 return;
3596         }
3597
3598         xkey = elm->key + ((hammer2_key_t)1 << elm->keybits) - 1;
3599         if (i != count && (base[i].key < elm->key || xkey >= base[i].key)) {
3600                 spin_unlock(&parent->core.cst.spin);
3601                 panic("insert base %p overlapping elements at %d elm %p\n",
3602                       base, i, elm);
3603         }
3604
3605         /*
3606          * Try to find an empty slot before or after.
3607          */
3608         j = i;
3609         k = i;
3610         while (j > 0 || k < count) {
3611                 --j;
3612                 if (j >= 0 && base[j].type == 0) {
3613                         if (j == i - 1) {
3614                                 base[j] = *elm;
3615                         } else {
3616                                 bcopy(&base[j+1], &base[j],
3617                                       (i - j - 1) * sizeof(*base));
3618                                 base[i - 1] = *elm;
3619                         }
3620                         goto validate;
3621                 }
3622                 ++k;
3623                 if (k < count && base[k].type == 0) {
3624                         bcopy(&base[i], &base[i+1],
3625                               (k - i) * sizeof(hammer2_blockref_t));
3626                         base[i] = *elm;
3627
3628                         /*
3629                          * We can only update parent->core.live_zero for live
3630                          * chains.
3631                          */
3632                         if (parent->core.live_zero <= k)
3633                                 parent->core.live_zero = k + 1;
3634                         u = 2;
3635                         goto validate;
3636                 }
3637         }
3638         panic("hammer2_base_insert: no room!");
3639
3640         /*
3641          * Debugging
3642          */
3643 validate:
3644         key_next = 0;
3645         for (l = 0; l < count; ++l) {
3646                 if (base[l].type) {
3647                         key_next = base[l].key +
3648                                    ((hammer2_key_t)1 << base[l].keybits) - 1;
3649                         break;
3650                 }
3651         }
3652         while (++l < count) {
3653                 if (base[l].type) {
3654                         if (base[l].key <= key_next)
3655                                 panic("base_insert %d %d,%d,%d fail %p:%d", u, i, j, k, base, l);
3656                         key_next = base[l].key +
3657                                    ((hammer2_key_t)1 << base[l].keybits) - 1;
3658
3659                 }
3660         }
3661
3662 }
3663
3664 #if 0
3665
3666 /*
3667  * Sort the blockref array for the chain.  Used by the flush code to
3668  * sort the blockref[] array.
3669  *
3670  * The chain must be exclusively locked AND spin-locked.
3671  */
3672 typedef hammer2_blockref_t *hammer2_blockref_p;
3673
3674 static
3675 int
3676 hammer2_base_sort_callback(const void *v1, const void *v2)
3677 {
3678         hammer2_blockref_p bref1 = *(const hammer2_blockref_p *)v1;
3679         hammer2_blockref_p bref2 = *(const hammer2_blockref_p *)v2;
3680
3681         /*
3682          * Make sure empty elements are placed at the end of the array
3683          */
3684         if (bref1->type == 0) {
3685                 if (bref2->type == 0)
3686                         return(0);
3687                 return(1);
3688         } else if (bref2->type == 0) {
3689                 return(-1);
3690         }
3691
3692         /*
3693          * Sort by key
3694          */
3695         if (bref1->key < bref2->key)
3696                 return(-1);
3697         if (bref1->key > bref2->key)
3698                 return(1);
3699         return(0);
3700 }
3701
3702 void
3703 hammer2_base_sort(hammer2_chain_t *chain)
3704 {
3705         hammer2_blockref_t *base;
3706         int count;
3707
3708         switch(chain->bref.type) {
3709         case HAMMER2_BREF_TYPE_INODE:
3710                 /*
3711                  * Special shortcut for embedded data returns the inode
3712                  * itself.  Callers must detect this condition and access
3713                  * the embedded data (the strategy code does this for us).
3714                  *
3715                  * This is only applicable to regular files and softlinks.
3716                  */
3717                 if (chain->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA)
3718                         return;
3719                 base = &chain->data->ipdata.u.blockset.blockref[0];
3720                 count = HAMMER2_SET_COUNT;
3721                 break;
3722         case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3723         case HAMMER2_BREF_TYPE_INDIRECT:
3724                 /*
3725                  * Optimize indirect blocks in the INITIAL state to avoid
3726                  * I/O.
3727                  */
3728                 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) == 0);
3729                 base = &chain->data->npdata[0];
3730                 count = chain->bytes / sizeof(hammer2_blockref_t);
3731                 break;
3732         case HAMMER2_BREF_TYPE_VOLUME:
3733                 base = &chain->hmp->voldata.sroot_blockset.blockref[0];
3734                 count = HAMMER2_SET_COUNT;
3735                 break;
3736         case HAMMER2_BREF_TYPE_FREEMAP:
3737                 base = &chain->hmp->voldata.freemap_blockset.blockref[0];
3738                 count = HAMMER2_SET_COUNT;
3739                 break;
3740         default:
3741                 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
3742                       chain->bref.type);
3743                 base = NULL;    /* safety */
3744                 count = 0;      /* safety */
3745         }
3746         kqsort(base, count, sizeof(*base), hammer2_base_sort_callback);
3747 }
3748
3749 #endif
3750
3751 /*
3752  * Chain memory management
3753  */
3754 void
3755 hammer2_chain_wait(hammer2_chain_t *chain)
3756 {
3757         tsleep(chain, 0, "chnflw", 1);
3758 }