Merge branch 'vendor/LIBARCHIVE'
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36
37 static int hammer_mem_lookup(hammer_cursor_t cursor);
38 static void hammer_mem_first(hammer_cursor_t cursor);
39 static int hammer_frontend_trunc_callback(hammer_record_t record,
40                                 void *data __unused);
41 static int hammer_bulk_scan_callback(hammer_record_t record, void *data);
42 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
43 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
44                                 hammer_btree_leaf_elm_t leaf);
45 static int hammer_cursor_localize_data(hammer_data_ondisk_t data,
46                                 hammer_btree_leaf_elm_t leaf);
47
48 struct rec_trunc_info {
49         u_int16_t       rec_type;
50         int64_t         trunc_off;
51 };
52
53 struct hammer_bulk_info {
54         hammer_record_t record;
55         hammer_record_t conflict;
56 };
57
58 /*
59  * Red-black tree support.  Comparison code for insertion.
60  */
61 static int
62 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
63 {
64         if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
65                 return(-1);
66         if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
67                 return(1);
68
69         if (rec1->leaf.base.key < rec2->leaf.base.key)
70                 return(-1);
71         if (rec1->leaf.base.key > rec2->leaf.base.key)
72                 return(1);
73
74         /*
75          * For search & insertion purposes records deleted by the
76          * frontend or deleted/committed by the backend are silently
77          * ignored.  Otherwise pipelined insertions will get messed
78          * up.
79          *
80          * rec1 is greater then rec2 if rec1 is marked deleted.
81          * rec1 is less then rec2 if rec2 is marked deleted.
82          *
83          * Multiple deleted records may be present, do not return 0
84          * if both are marked deleted.
85          */
86         if (rec1->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
87                            HAMMER_RECF_COMMITTED)) {
88                 return(1);
89         }
90         if (rec2->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
91                            HAMMER_RECF_COMMITTED)) {
92                 return(-1);
93         }
94
95         return(0);
96 }
97
98 /*
99  * Basic record comparison code similar to hammer_btree_cmp().
100  *
101  * obj_id is not compared and may not yet be assigned in the record.
102  */
103 static int
104 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
105 {
106         if (elm->rec_type < rec->leaf.base.rec_type)
107                 return(-3);
108         if (elm->rec_type > rec->leaf.base.rec_type)
109                 return(3);
110
111         if (elm->key < rec->leaf.base.key)
112                 return(-2);
113         if (elm->key > rec->leaf.base.key)
114                 return(2);
115
116         /*
117          * Never match against an item deleted by the frontend
118          * or backend, or committed by the backend.
119          *
120          * elm is less then rec if rec is marked deleted.
121          */
122         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
123                           HAMMER_RECF_COMMITTED)) {
124                 return(-1);
125         }
126         return(0);
127 }
128
129 /*
130  * Ranged scan to locate overlapping record(s).  This is used by
131  * hammer_ip_get_bulk() to locate an overlapping record.  We have
132  * to use a ranged scan because the keys for data records with the
133  * same file base offset can be different due to differing data_len's.
134  *
135  * NOTE: The base file offset of a data record is (key - data_len), not (key).
136  */
137 static int
138 hammer_rec_overlap_cmp(hammer_record_t rec, void *data)
139 {
140         struct hammer_bulk_info *info = data;
141         hammer_btree_leaf_elm_t leaf = &info->record->leaf;
142
143         if (rec->leaf.base.rec_type < leaf->base.rec_type)
144                 return(-3);
145         if (rec->leaf.base.rec_type > leaf->base.rec_type)
146                 return(3);
147
148         /*
149          * Overlap compare
150          */
151         if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
152                 /* rec_beg >= leaf_end */
153                 if (rec->leaf.base.key - rec->leaf.data_len >= leaf->base.key)
154                         return(2);
155                 /* rec_end <= leaf_beg */
156                 if (rec->leaf.base.key <= leaf->base.key - leaf->data_len)
157                         return(-2);
158         } else {
159                 if (rec->leaf.base.key < leaf->base.key)
160                         return(-2);
161                 if (rec->leaf.base.key > leaf->base.key)
162                         return(2);
163         }
164
165         /*
166          * We have to return 0 at this point, even if DELETED_FE is set,
167          * because returning anything else will cause the scan to ignore
168          * one of the branches when we really want it to check both.
169          */
170         return(0);
171 }
172
173 /*
174  * RB_SCAN comparison code for hammer_mem_first().  The argument order
175  * is reversed so the comparison result has to be negated.  key_beg and
176  * key_end are both range-inclusive.
177  *
178  * Localized deletions are not cached in-memory.
179  */
180 static
181 int
182 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
183 {
184         hammer_cursor_t cursor = data;
185         int r;
186
187         r = hammer_rec_cmp(&cursor->key_beg, rec);
188         if (r > 1)
189                 return(-1);
190         r = hammer_rec_cmp(&cursor->key_end, rec);
191         if (r < -1)
192                 return(1);
193         return(0);
194 }
195
196 /*
197  * This compare function is used when simply looking up key_beg.
198  */
199 static
200 int
201 hammer_rec_find_cmp(hammer_record_t rec, void *data)
202 {
203         hammer_cursor_t cursor = data;
204         int r;
205
206         r = hammer_rec_cmp(&cursor->key_beg, rec);
207         if (r > 1)
208                 return(-1);
209         if (r < -1)
210                 return(1);
211         return(0);
212 }
213
214 /*
215  * Locate blocks within the truncation range.  Partial blocks do not count.
216  */
217 static
218 int
219 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
220 {
221         struct rec_trunc_info *info = data;
222
223         if (rec->leaf.base.rec_type < info->rec_type)
224                 return(-1);
225         if (rec->leaf.base.rec_type > info->rec_type)
226                 return(1);
227
228         switch(rec->leaf.base.rec_type) {
229         case HAMMER_RECTYPE_DB:
230                 /*
231                  * DB record key is not beyond the truncation point, retain.
232                  */
233                 if (rec->leaf.base.key < info->trunc_off)
234                         return(-1);
235                 break;
236         case HAMMER_RECTYPE_DATA:
237                 /*
238                  * DATA record offset start is not beyond the truncation point,
239                  * retain.
240                  */
241                 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
242                         return(-1);
243                 break;
244         default:
245                 panic("hammer_rec_trunc_cmp: unexpected record type");
246         }
247
248         /*
249          * The record start is >= the truncation point, return match,
250          * the record should be destroyed.
251          */
252         return(0);
253 }
254
255 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
256
257 /*
258  * Allocate a record for the caller to finish filling in.  The record is
259  * returned referenced.
260  */
261 hammer_record_t
262 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
263 {
264         hammer_record_t record;
265         hammer_mount_t hmp;
266
267         hmp = ip->hmp;
268         ++hammer_count_records;
269         record = kmalloc(sizeof(*record), hmp->m_misc,
270                          M_WAITOK | M_ZERO | M_USE_RESERVE);
271         record->flush_state = HAMMER_FST_IDLE;
272         record->ip = ip;
273         record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
274         record->leaf.data_len = data_len;
275         hammer_ref(&record->lock);
276
277         if (data_len) {
278                 record->data = kmalloc(data_len, hmp->m_misc, M_WAITOK | M_ZERO);
279                 record->flags |= HAMMER_RECF_ALLOCDATA;
280                 ++hammer_count_record_datas;
281         }
282
283         return (record);
284 }
285
286 void
287 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
288 {
289         while (record->flush_state == HAMMER_FST_FLUSH) {
290                 record->flags |= HAMMER_RECF_WANTED;
291                 tsleep(record, 0, ident, 0);
292         }
293 }
294
295 /*
296  * Called from the backend, hammer_inode.c, after a record has been
297  * flushed to disk.  The record has been exclusively locked by the
298  * caller and interlocked with BE.
299  *
300  * We clean up the state, unlock, and release the record (the record
301  * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
302  */
303 void
304 hammer_flush_record_done(hammer_record_t record, int error)
305 {
306         hammer_inode_t target_ip;
307
308         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
309         KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
310
311         /*
312          * If an error occured, the backend was unable to sync the
313          * record to its media.  Leave the record intact.
314          */
315         if (error) {
316                 hammer_critical_error(record->ip->hmp, record->ip, error,
317                                       "while flushing record");
318         }
319
320         --record->flush_group->refs;
321         record->flush_group = NULL;
322
323         /*
324          * Adjust the flush state and dependancy based on success or
325          * failure.
326          */
327         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
328                 if ((target_ip = record->target_ip) != NULL) {
329                         TAILQ_REMOVE(&target_ip->target_list, record,
330                                      target_entry);
331                         record->target_ip = NULL;
332                         hammer_test_inode(target_ip);
333                 }
334                 record->flush_state = HAMMER_FST_IDLE;
335         } else {
336                 if (record->target_ip) {
337                         record->flush_state = HAMMER_FST_SETUP;
338                         hammer_test_inode(record->ip);
339                         hammer_test_inode(record->target_ip);
340                 } else {
341                         record->flush_state = HAMMER_FST_IDLE;
342                 }
343         }
344         record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
345
346         /*
347          * Cleanup
348          */
349         if (record->flags & HAMMER_RECF_WANTED) {
350                 record->flags &= ~HAMMER_RECF_WANTED;
351                 wakeup(record);
352         }
353         hammer_rel_mem_record(record);
354 }
355
356 /*
357  * Release a memory record.  Records marked for deletion are immediately
358  * removed from the RB-Tree but otherwise left intact until the last ref
359  * goes away.
360  */
361 void
362 hammer_rel_mem_record(struct hammer_record *record)
363 {
364         hammer_mount_t hmp;
365         hammer_reserve_t resv;
366         hammer_inode_t ip;
367         hammer_inode_t target_ip;
368         int diddrop;
369
370         hammer_rel(&record->lock);
371
372         if (hammer_norefs(&record->lock)) {
373                 /*
374                  * Upon release of the last reference wakeup any waiters.
375                  * The record structure may get destroyed so callers will
376                  * loop up and do a relookup.
377                  *
378                  * WARNING!  Record must be removed from RB-TREE before we
379                  * might possibly block.  hammer_test_inode() can block!
380                  */
381                 ip = record->ip;
382                 hmp = ip->hmp;
383
384                 /*
385                  * Upon release of the last reference a record marked deleted
386                  * by the front or backend, or committed by the backend,
387                  * is destroyed.
388                  */
389                 if (record->flags & (HAMMER_RECF_DELETED_FE |
390                                      HAMMER_RECF_DELETED_BE |
391                                      HAMMER_RECF_COMMITTED)) {
392                         KKASSERT(hammer_isactive(&ip->lock) > 0);
393                         KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
394
395                         /*
396                          * target_ip may have zero refs, we have to ref it
397                          * to prevent it from being ripped out from under
398                          * us.
399                          */
400                         if ((target_ip = record->target_ip) != NULL) {
401                                 TAILQ_REMOVE(&target_ip->target_list,
402                                              record, target_entry);
403                                 record->target_ip = NULL;
404                                 hammer_ref(&target_ip->lock);
405                         }
406
407                         /*
408                          * Remove the record from the B-Tree
409                          */
410                         if (record->flags & HAMMER_RECF_ONRBTREE) {
411                                 RB_REMOVE(hammer_rec_rb_tree,
412                                           &record->ip->rec_tree,
413                                           record);
414                                 record->flags &= ~HAMMER_RECF_ONRBTREE;
415                                 KKASSERT(ip->rsv_recs > 0);
416                                 if (RB_EMPTY(&record->ip->rec_tree)) {
417                                         record->ip->flags &=
418                                                         ~HAMMER_INODE_XDIRTY;
419                                         record->ip->sync_flags &=
420                                                         ~HAMMER_INODE_XDIRTY;
421                                 }
422                                 diddrop = 1;
423                         } else {
424                                 diddrop = 0;
425                         }
426
427                         /*
428                          * We must wait for any direct-IO to complete before
429                          * we can destroy the record because the bio may
430                          * have a reference to it.
431                          */
432                         if (record->gflags &
433                            (HAMMER_RECG_DIRECT_IO | HAMMER_RECG_DIRECT_INVAL)) {
434                                 hammer_io_direct_wait(record);
435                         }
436
437                         /*
438                          * Account for the completion after the direct IO
439                          * has completed.
440                          */
441                         if (diddrop) {
442                                 --hmp->rsv_recs;
443                                 --ip->rsv_recs;
444                                 hmp->rsv_databytes -= record->leaf.data_len;
445
446                                 if (RB_EMPTY(&record->ip->rec_tree))
447                                         hammer_test_inode(record->ip);
448                                 if ((ip->flags & HAMMER_INODE_RECSW) &&
449                                     ip->rsv_recs <= hammer_limit_inode_recs/2) {
450                                         ip->flags &= ~HAMMER_INODE_RECSW;
451                                         wakeup(&ip->rsv_recs);
452                                 }
453                         }
454
455                         /*
456                          * Do this test after removing record from the B-Tree.
457                          */
458                         if (target_ip) {
459                                 hammer_test_inode(target_ip);
460                                 hammer_rel_inode(target_ip, 0);
461                         }
462
463                         if (record->flags & HAMMER_RECF_ALLOCDATA) {
464                                 --hammer_count_record_datas;
465                                 kfree(record->data, hmp->m_misc);
466                                 record->flags &= ~HAMMER_RECF_ALLOCDATA;
467                         }
468
469                         /*
470                          * Release the reservation.
471                          *
472                          * If the record was not committed we can theoretically
473                          * undo the reservation.  However, doing so might
474                          * create weird edge cases with the ordering of
475                          * direct writes because the related buffer cache
476                          * elements are per-vnode.  So we don't try.
477                          */
478                         if ((resv = record->resv) != NULL) {
479                                 /* XXX undo leaf.data_offset,leaf.data_len */
480                                 hammer_blockmap_reserve_complete(hmp, resv);
481                                 record->resv = NULL;
482                         }
483                         record->data = NULL;
484                         --hammer_count_records;
485                         kfree(record, hmp->m_misc);
486                 }
487         }
488 }
489
490 /*
491  * Record visibility depends on whether the record is being accessed by
492  * the backend or the frontend.  Backend tests ignore the frontend delete
493  * flag.  Frontend tests do NOT ignore the backend delete/commit flags and
494  * must also check for commit races.
495  *
496  * Return non-zero if the record is visible, zero if it isn't or if it is
497  * deleted.  Returns 0 if the record has been comitted (unless the special
498  * delete-visibility flag is set).  A committed record must be located
499  * via the media B-Tree.  Returns non-zero if the record is good.
500  *
501  * If HAMMER_CURSOR_DELETE_VISIBILITY is set we allow deleted memory
502  * records to be returned.  This is so pending deletions are detected
503  * when using an iterator to locate an unused hash key, or when we need
504  * to locate historical records on-disk to destroy.
505  */
506 static __inline
507 int
508 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
509 {
510         if (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY)
511                 return(1);
512         if (cursor->flags & HAMMER_CURSOR_BACKEND) {
513                 if (record->flags & (HAMMER_RECF_DELETED_BE |
514                                      HAMMER_RECF_COMMITTED)) {
515                         return(0);
516                 }
517         } else {
518                 if (record->flags & (HAMMER_RECF_DELETED_FE |
519                                      HAMMER_RECF_DELETED_BE |
520                                      HAMMER_RECF_COMMITTED)) {
521                         return(0);
522                 }
523         }
524         return(1);
525 }
526
527 /*
528  * This callback is used as part of the RB_SCAN function for in-memory
529  * records.  We terminate it (return -1) as soon as we get a match.
530  *
531  * This routine is used by frontend code.
532  *
533  * The primary compare code does not account for ASOF lookups.  This
534  * code handles that case as well as a few others.
535  */
536 static
537 int
538 hammer_rec_scan_callback(hammer_record_t rec, void *data)
539 {
540         hammer_cursor_t cursor = data;
541
542         /*
543          * We terminate on success, so this should be NULL on entry.
544          */
545         KKASSERT(cursor->iprec == NULL);
546
547         /*
548          * Skip if the record was marked deleted or committed.
549          */
550         if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
551                 return(0);
552
553         /*
554          * Skip if not visible due to our as-of TID
555          */
556         if (cursor->flags & HAMMER_CURSOR_ASOF) {
557                 if (cursor->asof < rec->leaf.base.create_tid)
558                         return(0);
559                 if (rec->leaf.base.delete_tid &&
560                     cursor->asof >= rec->leaf.base.delete_tid) {
561                         return(0);
562                 }
563         }
564
565         /*
566          * ref the record.  The record is protected from backend B-Tree
567          * interactions by virtue of the cursor's IP lock.
568          */
569         hammer_ref(&rec->lock);
570
571         /*
572          * The record may have been deleted or committed while we
573          * were blocked.  XXX remove?
574          */
575         if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
576                 hammer_rel_mem_record(rec);
577                 return(0);
578         }
579
580         /*
581          * Set the matching record and stop the scan.
582          */
583         cursor->iprec = rec;
584         return(-1);
585 }
586
587
588 /*
589  * Lookup an in-memory record given the key specified in the cursor.  Works
590  * just like hammer_btree_lookup() but operates on an inode's in-memory
591  * record list.
592  *
593  * The lookup must fail if the record is marked for deferred deletion.
594  *
595  * The API for mem/btree_lookup() does not mess with the ATE/EOF bits.
596  */
597 static
598 int
599 hammer_mem_lookup(hammer_cursor_t cursor)
600 {
601         KKASSERT(cursor->ip);
602         if (cursor->iprec) {
603                 hammer_rel_mem_record(cursor->iprec);
604                 cursor->iprec = NULL;
605         }
606         hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
607                                    hammer_rec_scan_callback, cursor);
608
609         return (cursor->iprec ? 0 : ENOENT);
610 }
611
612 /*
613  * hammer_mem_first() - locate the first in-memory record matching the
614  * cursor within the bounds of the key range.
615  *
616  * WARNING!  API is slightly different from btree_first().  hammer_mem_first()
617  * will set ATEMEM the same as MEMEOF, and does not return any error.
618  */
619 static
620 void
621 hammer_mem_first(hammer_cursor_t cursor)
622 {
623         hammer_inode_t ip;
624
625         ip = cursor->ip;
626         KKASSERT(ip != NULL);
627
628         if (cursor->iprec) {
629                 hammer_rel_mem_record(cursor->iprec);
630                 cursor->iprec = NULL;
631         }
632         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
633                                    hammer_rec_scan_callback, cursor);
634
635         if (cursor->iprec)
636                 cursor->flags &= ~(HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM);
637         else
638                 cursor->flags |= HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM;
639 }
640
641 /************************************************************************
642  *                   HAMMER IN-MEMORY RECORD FUNCTIONS                  *
643  ************************************************************************
644  *
645  * These functions manipulate in-memory records.  Such records typically
646  * exist prior to being committed to disk or indexed via the on-disk B-Tree.
647  */
648
649 /*
650  * Add a directory entry (dip,ncp) which references inode (ip).
651  *
652  * Note that the low 32 bits of the namekey are set temporarily to create
653  * a unique in-memory record, and may be modified a second time when the
654  * record is synchronized to disk.  In particular, the low 32 bits cannot be
655  * all 0's when synching to disk, which is not handled here.
656  *
657  * NOTE: bytes does not include any terminating \0 on name, and name might
658  * not be terminated.
659  */
660 int
661 hammer_ip_add_directory(struct hammer_transaction *trans,
662                      struct hammer_inode *dip, const char *name, int bytes,
663                      struct hammer_inode *ip)
664 {
665         struct hammer_cursor cursor;
666         hammer_record_t record;
667         int error;
668         u_int32_t max_iterations;
669
670         KKASSERT(dip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY);
671
672         record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
673
674         record->type = HAMMER_MEM_RECORD_ADD;
675         record->leaf.base.localization = dip->obj_localization +
676                                          hammer_dir_localization(dip);
677         record->leaf.base.obj_id = dip->obj_id;
678         record->leaf.base.key = hammer_directory_namekey(dip, name, bytes,
679                                                          &max_iterations);
680         record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
681         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
682         record->data->entry.obj_id = ip->obj_id;
683         record->data->entry.localization = ip->obj_localization;
684         bcopy(name, record->data->entry.name, bytes);
685
686         ++ip->ino_data.nlinks;
687         ip->ino_data.ctime = trans->time;
688         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
689
690         /*
691          * Find an unused namekey.  Both the in-memory record tree and
692          * the B-Tree are checked.  We do not want historically deleted
693          * names to create a collision as our iteration space may be limited,
694          * and since create_tid wouldn't match anyway an ASOF search
695          * must be used to locate collisions.
696          *
697          * delete-visibility is set so pending deletions do not give us
698          * a false-negative on our ability to use an iterator.
699          *
700          * The iterator must not rollover the key.  Directory keys only
701          * use the positive key space.
702          */
703         hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
704         cursor.key_beg = record->leaf.base;
705         cursor.flags |= HAMMER_CURSOR_ASOF;
706         cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
707         cursor.asof = ip->obj_asof;
708
709         while (hammer_ip_lookup(&cursor) == 0) {
710                 ++record->leaf.base.key;
711                 KKASSERT(record->leaf.base.key > 0);
712                 cursor.key_beg.key = record->leaf.base.key;
713                 if (--max_iterations == 0) {
714                         hammer_rel_mem_record(record);
715                         error = ENOSPC;
716                         goto failed;
717                 }
718         }
719
720         /*
721          * The target inode and the directory entry are bound together.
722          */
723         record->target_ip = ip;
724         record->flush_state = HAMMER_FST_SETUP;
725         TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
726
727         /*
728          * The inode now has a dependancy and must be taken out of the idle
729          * state.  An inode not in an idle state is given an extra reference.
730          *
731          * When transitioning to a SETUP state flag for an automatic reflush
732          * when the dependancies are disposed of if someone is waiting on
733          * the inode.
734          */
735         if (ip->flush_state == HAMMER_FST_IDLE) {
736                 hammer_ref(&ip->lock);
737                 ip->flush_state = HAMMER_FST_SETUP;
738                 if (ip->flags & HAMMER_INODE_FLUSHW)
739                         ip->flags |= HAMMER_INODE_REFLUSH;
740         }
741         error = hammer_mem_add(record);
742         if (error == 0) {
743                 dip->ino_data.mtime = trans->time;
744                 hammer_modify_inode(trans, dip, HAMMER_INODE_MTIME);
745         }
746 failed:
747         hammer_done_cursor(&cursor);
748         return(error);
749 }
750
751 /*
752  * Delete the directory entry and update the inode link count.  The
753  * cursor must be seeked to the directory entry record being deleted.
754  *
755  * The related inode should be share-locked by the caller.  The caller is
756  * on the frontend.  It could also be NULL indicating that the directory
757  * entry being removed has no related inode.
758  *
759  * This function can return EDEADLK requiring the caller to terminate
760  * the cursor, any locks, wait on the returned record, and retry.
761  */
762 int
763 hammer_ip_del_directory(struct hammer_transaction *trans,
764                      hammer_cursor_t cursor, struct hammer_inode *dip,
765                      struct hammer_inode *ip)
766 {
767         hammer_record_t record;
768         int error;
769
770         if (hammer_cursor_inmem(cursor)) {
771                 /*
772                  * In-memory (unsynchronized) records can simply be freed.
773                  *
774                  * Even though the HAMMER_RECF_DELETED_FE flag is ignored
775                  * by the backend, we must still avoid races against the
776                  * backend potentially syncing the record to the media.
777                  *
778                  * We cannot call hammer_ip_delete_record(), that routine may
779                  * only be called from the backend.
780                  */
781                 record = cursor->iprec;
782                 if (record->flags & (HAMMER_RECF_INTERLOCK_BE |
783                                      HAMMER_RECF_DELETED_BE |
784                                      HAMMER_RECF_COMMITTED)) {
785                         KKASSERT(cursor->deadlk_rec == NULL);
786                         hammer_ref(&record->lock);
787                         cursor->deadlk_rec = record;
788                         error = EDEADLK;
789                 } else {
790                         KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
791                         record->flags |= HAMMER_RECF_DELETED_FE;
792                         error = 0;
793                 }
794         } else {
795                 /*
796                  * If the record is on-disk we have to queue the deletion by
797                  * the record's key.  This also causes lookups to skip the
798                  * record (lookups for the purposes of finding an unused
799                  * directory key do not skip the record).
800                  */
801                 KKASSERT(dip->flags &
802                          (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
803                 record = hammer_alloc_mem_record(dip, 0);
804                 record->type = HAMMER_MEM_RECORD_DEL;
805                 record->leaf.base = cursor->leaf->base;
806                 KKASSERT(dip->obj_id == record->leaf.base.obj_id);
807
808                 /*
809                  * ip may be NULL, indicating the deletion of a directory
810                  * entry which has no related inode.
811                  */
812                 record->target_ip = ip;
813                 if (ip) {
814                         record->flush_state = HAMMER_FST_SETUP;
815                         TAILQ_INSERT_TAIL(&ip->target_list, record,
816                                           target_entry);
817                 } else {
818                         record->flush_state = HAMMER_FST_IDLE;
819                 }
820
821                 /*
822                  * The inode now has a dependancy and must be taken out of
823                  * the idle state.  An inode not in an idle state is given
824                  * an extra reference.
825                  *
826                  * When transitioning to a SETUP state flag for an automatic
827                  * reflush when the dependancies are disposed of if someone
828                  * is waiting on the inode.
829                  */
830                 if (ip && ip->flush_state == HAMMER_FST_IDLE) {
831                         hammer_ref(&ip->lock);
832                         ip->flush_state = HAMMER_FST_SETUP;
833                         if (ip->flags & HAMMER_INODE_FLUSHW)
834                                 ip->flags |= HAMMER_INODE_REFLUSH;
835                 }
836
837                 error = hammer_mem_add(record);
838         }
839
840         /*
841          * One less link.  The file may still be open in the OS even after
842          * all links have gone away.
843          *
844          * We have to terminate the cursor before syncing the inode to
845          * avoid deadlocking against ourselves.  XXX this may no longer
846          * be true.
847          *
848          * If nlinks drops to zero and the vnode is inactive (or there is
849          * no vnode), call hammer_inode_unloadable_check() to zonk the
850          * inode.  If we don't do this here the inode will not be destroyed
851          * on-media until we unmount.
852          */
853         if (error == 0) {
854                 if (ip) {
855                         --ip->ino_data.nlinks;  /* do before we might block */
856                         ip->ino_data.ctime = trans->time;
857                 }
858                 dip->ino_data.mtime = trans->time;
859                 hammer_modify_inode(trans, dip, HAMMER_INODE_MTIME);
860                 if (ip) {
861                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
862                         if (ip->ino_data.nlinks == 0 &&
863                             (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
864                                 hammer_done_cursor(cursor);
865                                 hammer_inode_unloadable_check(ip, 1);
866                                 hammer_flush_inode(ip, 0);
867                         }
868                 }
869
870         }
871         return(error);
872 }
873
874 /*
875  * Add a record to an inode.
876  *
877  * The caller must allocate the record with hammer_alloc_mem_record(ip) and
878  * initialize the following additional fields:
879  *
880  * The related inode should be share-locked by the caller.  The caller is
881  * on the frontend.
882  *
883  * record->rec.entry.base.base.key
884  * record->rec.entry.base.base.rec_type
885  * record->rec.entry.base.base.data_len
886  * record->data         (a copy will be kmalloc'd if it cannot be embedded)
887  */
888 int
889 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
890 {
891         hammer_inode_t ip = record->ip;
892         int error;
893
894         KKASSERT(record->leaf.base.localization != 0);
895         record->leaf.base.obj_id = ip->obj_id;
896         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
897         error = hammer_mem_add(record);
898         return(error);
899 }
900
901 /*
902  * Locate a pre-existing bulk record in memory.  The caller wishes to
903  * replace the record with a new one.  The existing record may have a
904  * different length (and thus a different key) so we have to use an
905  * overlap check function.
906  */
907 static hammer_record_t
908 hammer_ip_get_bulk(hammer_record_t record)
909 {
910         struct hammer_bulk_info info;
911         hammer_inode_t ip = record->ip;
912
913         info.record = record;
914         info.conflict = NULL;
915         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_overlap_cmp,
916                                    hammer_bulk_scan_callback, &info);
917
918         return(info.conflict);  /* may be NULL */
919 }
920
921 /*
922  * Take records vetted by overlap_cmp.  The first non-deleted record
923  * (if any) stops the scan.
924  */
925 static int
926 hammer_bulk_scan_callback(hammer_record_t record, void *data)
927 {
928         struct hammer_bulk_info *info = data;
929
930         if (record->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
931                              HAMMER_RECF_COMMITTED)) {
932                 return(0);
933         }
934         hammer_ref(&record->lock);
935         info->conflict = record;
936         return(-1);                     /* stop scan */
937 }
938
939 /*
940  * Reserve blockmap space placemarked with an in-memory record.  
941  *
942  * This routine is called by the frontend in order to be able to directly
943  * flush a buffer cache buffer.  The frontend has locked the related buffer
944  * cache buffers and we should be able to manipulate any overlapping
945  * in-memory records.
946  *
947  * The caller is responsible for adding the returned record and deleting
948  * the returned conflicting record (if any), typically by calling
949  * hammer_ip_replace_bulk() (via hammer_io_direct_write()).
950  */
951 hammer_record_t
952 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
953                    int *errorp)
954 {
955         hammer_record_t record;
956         hammer_dedup_cache_t dcp;
957         hammer_crc_t crc;
958         int zone;
959
960         /*
961          * Create a record to cover the direct write.  The record cannot
962          * be added to the in-memory RB tree here as it might conflict
963          * with an existing memory record.  See hammer_io_direct_write().
964          *
965          * The backend is responsible for finalizing the space reserved in
966          * this record.
967          *
968          * XXX bytes not aligned, depend on the reservation code to
969          * align the reservation.
970          */
971         record = hammer_alloc_mem_record(ip, 0);
972         zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
973                                            HAMMER_ZONE_SMALL_DATA_INDEX;
974         if (bytes == 0)
975                 crc = 0;
976         else
977                 crc = crc32(data, bytes);
978
979         if (hammer_live_dedup == 0)
980                 goto nodedup;
981         if ((dcp = hammer_dedup_cache_lookup(ip->hmp, crc)) != NULL) {
982                 struct hammer_dedup_cache tmp = *dcp;
983
984                 record->resv = hammer_blockmap_reserve_dedup(ip->hmp, zone,
985                         bytes, tmp.data_offset, errorp);
986                 if (record->resv == NULL)
987                         goto nodedup;
988
989                 if (!hammer_dedup_validate(&tmp, zone, bytes, data)) {
990                         hammer_blockmap_reserve_complete(ip->hmp, record->resv);
991                         goto nodedup;
992                 }
993
994                 record->leaf.data_offset = tmp.data_offset;
995                 record->flags |= HAMMER_RECF_DEDUPED;
996         } else {
997 nodedup:
998                 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
999                        &record->leaf.data_offset, errorp);
1000                 if (record->resv == NULL) {
1001                         kprintf("hammer_ip_add_bulk: reservation failed\n");
1002                         hammer_rel_mem_record(record);
1003                         return(NULL);
1004                 }
1005         }
1006
1007         record->type = HAMMER_MEM_RECORD_DATA;
1008         record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
1009         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
1010         record->leaf.base.obj_id = ip->obj_id;
1011         record->leaf.base.key = file_offset + bytes;
1012         record->leaf.base.localization = ip->obj_localization +
1013                                          HAMMER_LOCALIZE_MISC;
1014         record->leaf.data_len = bytes;
1015         record->leaf.data_crc = crc;
1016         KKASSERT(*errorp == 0);
1017
1018         return(record);
1019 }
1020
1021 /*
1022  * Called by hammer_io_direct_write() prior to any possible completion
1023  * of the BIO to emplace the memory record associated with the I/O and
1024  * to replace any prior memory record which might still be active.
1025  *
1026  * Setting the FE deleted flag on the old record (if any) avoids any RB
1027  * tree insertion conflict, amoung other things.
1028  *
1029  * This has to be done prior to the caller completing any related buffer
1030  * cache I/O or a reinstantiation of the buffer may load data from the
1031  * old media location instead of the new media location.  The holding
1032  * of the locked buffer cache buffer serves to interlock the record
1033  * replacement operation.
1034  */
1035 void
1036 hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record)
1037 {
1038         hammer_record_t conflict;
1039         int error __debugvar;
1040
1041         while ((conflict = hammer_ip_get_bulk(record)) != NULL) {
1042                 if ((conflict->flags & HAMMER_RECF_INTERLOCK_BE) == 0) {
1043                         conflict->flags |= HAMMER_RECF_DELETED_FE;
1044                         break;
1045                 }
1046                 conflict->flags |= HAMMER_RECF_WANTED;
1047                 tsleep(conflict, 0, "hmrrc3", 0);
1048                 hammer_rel_mem_record(conflict);
1049         }
1050         error = hammer_mem_add(record);
1051         if (conflict)
1052                 hammer_rel_mem_record(conflict);
1053         KKASSERT(error == 0);
1054 }
1055
1056 /*
1057  * Frontend truncation code.  Scan in-memory records only.  On-disk records
1058  * and records in a flushing state are handled by the backend.  The vnops
1059  * setattr code will handle the block containing the truncation point.
1060  *
1061  * Partial blocks are not deleted.
1062  *
1063  * This code is only called on regular files.
1064  */
1065 int
1066 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
1067 {
1068         struct rec_trunc_info info;
1069
1070         switch(ip->ino_data.obj_type) {
1071         case HAMMER_OBJTYPE_REGFILE:
1072                 info.rec_type = HAMMER_RECTYPE_DATA;
1073                 break;
1074         case HAMMER_OBJTYPE_DBFILE:
1075                 info.rec_type = HAMMER_RECTYPE_DB;
1076                 break;
1077         default:
1078                 return(EINVAL);
1079         }
1080         info.trunc_off = file_size;
1081         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
1082                                    hammer_frontend_trunc_callback, &info);
1083         return(0);
1084 }
1085
1086 /*
1087  * Scan callback for frontend records to destroy during a truncation.
1088  * We must ensure that DELETED_FE is set on the record or the frontend
1089  * will get confused in future read() calls.
1090  *
1091  * NOTE: DELETED_FE cannot be set while the record interlock (BE) is held.
1092  *       In this rare case we must wait for the interlock to be cleared.
1093  *
1094  * NOTE: This function is only called on regular files.  There are further
1095  *       restrictions to the setting of DELETED_FE on directory records
1096  *       undergoing a flush due to sensitive inode link count calculations.
1097  */
1098 static int
1099 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
1100 {
1101         if (record->flags & HAMMER_RECF_DELETED_FE)
1102                 return(0);
1103 #if 0
1104         if (record->flush_state == HAMMER_FST_FLUSH)
1105                 return(0);
1106 #endif
1107         hammer_ref(&record->lock);
1108         while (record->flags & HAMMER_RECF_INTERLOCK_BE)
1109                 hammer_wait_mem_record_ident(record, "hmmtrr");
1110         record->flags |= HAMMER_RECF_DELETED_FE;
1111         hammer_rel_mem_record(record);
1112         return(0);
1113 }
1114
1115 /*
1116  * Return 1 if the caller must check for and delete existing records
1117  * before writing out a new data record.
1118  *
1119  * Return 0 if the caller can just insert the record into the B-Tree without
1120  * checking.
1121  */
1122 static int
1123 hammer_record_needs_overwrite_delete(hammer_record_t record)
1124 {
1125         hammer_inode_t ip = record->ip;
1126         int64_t file_offset;
1127         int r;
1128
1129         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
1130                 file_offset = record->leaf.base.key;
1131         else
1132                 file_offset = record->leaf.base.key - record->leaf.data_len;
1133         r = (file_offset < ip->save_trunc_off);
1134         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1135                 if (ip->save_trunc_off <= record->leaf.base.key)
1136                         ip->save_trunc_off = record->leaf.base.key + 1;
1137         } else {
1138                 if (ip->save_trunc_off < record->leaf.base.key)
1139                         ip->save_trunc_off = record->leaf.base.key;
1140         }
1141         return(r);
1142 }
1143
1144 /*
1145  * Backend code.  Sync a record to the media.
1146  */
1147 int
1148 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1149 {
1150         hammer_transaction_t trans = cursor->trans;
1151         int64_t file_offset;
1152         int bytes;
1153         void *bdata;
1154         int error;
1155         int doprop;
1156
1157         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1158         KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1159         KKASSERT(record->leaf.base.localization != 0);
1160
1161         /*
1162          * Any direct-write related to the record must complete before we
1163          * can sync the record to the on-disk media.
1164          */
1165         if (record->gflags & (HAMMER_RECG_DIRECT_IO | HAMMER_RECG_DIRECT_INVAL))
1166                 hammer_io_direct_wait(record);
1167
1168         /*
1169          * If this is a bulk-data record placemarker there may be an existing
1170          * record on-disk, indicating a data overwrite.  If there is the
1171          * on-disk record must be deleted before we can insert our new record.
1172          *
1173          * We've synthesized this record and do not know what the create_tid
1174          * on-disk is, nor how much data it represents.
1175          *
1176          * Keep in mind that (key) for data records is (base_offset + len),
1177          * not (base_offset).  Also, we only want to get rid of on-disk
1178          * records since we are trying to sync our in-memory record, call
1179          * hammer_ip_delete_range() with truncating set to 1 to make sure
1180          * it skips in-memory records.
1181          *
1182          * It is ok for the lookup to return ENOENT.
1183          *
1184          * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1185          * to call hammer_ip_delete_range() or not.  This also means we must
1186          * update sync_trunc_off() as we write.
1187          */
1188         if (record->type == HAMMER_MEM_RECORD_DATA &&
1189             hammer_record_needs_overwrite_delete(record)) {
1190                 file_offset = record->leaf.base.key - record->leaf.data_len;
1191                 bytes = (record->leaf.data_len + HAMMER_BUFMASK) & 
1192                         ~HAMMER_BUFMASK;
1193                 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1194                 error = hammer_ip_delete_range(
1195                                 cursor, record->ip,
1196                                 file_offset, file_offset + bytes - 1,
1197                                 1);
1198                 if (error && error != ENOENT)
1199                         goto done;
1200         }
1201
1202         /*
1203          * If this is a general record there may be an on-disk version
1204          * that must be deleted before we can insert the new record.
1205          */
1206         if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1207                 error = hammer_delete_general(cursor, record->ip,
1208                                               &record->leaf);
1209                 if (error && error != ENOENT)
1210                         goto done;
1211         }
1212
1213         /*
1214          * Setup the cursor.
1215          */
1216         hammer_normalize_cursor(cursor);
1217         cursor->key_beg = record->leaf.base;
1218         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1219         cursor->flags |= HAMMER_CURSOR_BACKEND;
1220         cursor->flags &= ~HAMMER_CURSOR_INSERT;
1221
1222         /*
1223          * Records can wind up on-media before the inode itself is on-media.
1224          * Flag the case.
1225          */
1226         record->ip->flags |= HAMMER_INODE_DONDISK;
1227
1228         /*
1229          * If we are deleting a directory entry an exact match must be
1230          * found on-disk.
1231          */
1232         if (record->type == HAMMER_MEM_RECORD_DEL) {
1233                 error = hammer_btree_lookup(cursor);
1234                 if (error == 0) {
1235                         KKASSERT(cursor->iprec == NULL);
1236                         error = hammer_ip_delete_record(cursor, record->ip,
1237                                                         trans->tid);
1238                         if (error == 0) {
1239                                 record->flags |= HAMMER_RECF_DELETED_BE |
1240                                                  HAMMER_RECF_COMMITTED;
1241                                 ++record->ip->rec_generation;
1242                         }
1243                 }
1244                 goto done;
1245         }
1246
1247         /*
1248          * We are inserting.
1249          *
1250          * Issue a lookup to position the cursor and locate the insertion
1251          * point.  The target key should not exist.  If we are creating a
1252          * directory entry we may have to iterate the low 32 bits of the
1253          * key to find an unused key.
1254          */
1255         hammer_sync_lock_sh(trans);
1256         cursor->flags |= HAMMER_CURSOR_INSERT;
1257         error = hammer_btree_lookup(cursor);
1258         if (hammer_debug_inode)
1259                 kprintf("DOINSERT LOOKUP %d\n", error);
1260         if (error == 0) {
1261                 kprintf("hammer_ip_sync_record: duplicate rec "
1262                         "at (%016llx)\n", (long long)record->leaf.base.key);
1263                 if (hammer_debug_critical)
1264                         Debugger("duplicate record1");
1265                 error = EIO;
1266         }
1267 #if 0
1268         if (record->type == HAMMER_MEM_RECORD_DATA)
1269                 kprintf("sync_record  %016llx ---------------- %016llx %d\n",
1270                         record->leaf.base.key - record->leaf.data_len,
1271                         record->leaf.data_offset, error);
1272 #endif
1273
1274         if (error != ENOENT)
1275                 goto done_unlock;
1276
1277         /*
1278          * Allocate the record and data.  The result buffers will be
1279          * marked as being modified and further calls to
1280          * hammer_modify_buffer() will result in unneeded UNDO records.
1281          *
1282          * Support zero-fill records (data == NULL and data_len != 0)
1283          */
1284         if (record->type == HAMMER_MEM_RECORD_DATA) {
1285                 /*
1286                  * The data portion of a bulk-data record has already been
1287                  * committed to disk, we need only adjust the layer2
1288                  * statistics in the same transaction as our B-Tree insert.
1289                  */
1290                 KKASSERT(record->leaf.data_offset != 0);
1291                 error = hammer_blockmap_finalize(trans,
1292                                                  record->resv,
1293                                                  record->leaf.data_offset,
1294                                                  record->leaf.data_len);
1295
1296                 if (hammer_live_dedup == 2 &&
1297                     (record->flags & HAMMER_RECF_DEDUPED) == 0) {
1298                         hammer_dedup_cache_add(record->ip, &record->leaf);
1299                 }
1300         } else if (record->data && record->leaf.data_len) {
1301                 /*
1302                  * Wholely cached record, with data.  Allocate the data.
1303                  */
1304                 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1305                                           record->leaf.base.rec_type,
1306                                           &record->leaf.data_offset,
1307                                           &cursor->data_buffer,
1308                                           0, &error);
1309                 if (bdata == NULL)
1310                         goto done_unlock;
1311                 hammer_crc_set_leaf(record->data, &record->leaf);
1312                 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1313                 bcopy(record->data, bdata, record->leaf.data_len);
1314                 hammer_modify_buffer_done(cursor->data_buffer);
1315         } else {
1316                 /*
1317                  * Wholely cached record, without data.
1318                  */
1319                 record->leaf.data_offset = 0;
1320                 record->leaf.data_crc = 0;
1321         }
1322
1323         error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1324         if (hammer_debug_inode && error) {
1325                 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n",
1326                         error,
1327                         (long long)cursor->node->node_offset,
1328                         cursor->index,
1329                         (long long)record->leaf.base.key);
1330         }
1331
1332         /*
1333          * Our record is on-disk and we normally mark the in-memory version
1334          * as having been committed (and not BE-deleted).
1335          *
1336          * If the record represented a directory deletion but we had to
1337          * sync a valid directory entry to disk due to dependancies,
1338          * we must convert the record to a covering delete so the
1339          * frontend does not have visibility on the synced entry.
1340          *
1341          * WARNING: cursor's leaf pointer may have changed after do_propagation
1342          *          returns!
1343          */
1344         if (error == 0) {
1345                 if (doprop) {
1346                         hammer_btree_do_propagation(cursor,
1347                                                     record->ip->pfsm,
1348                                                     &record->leaf);
1349                 }
1350                 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1351                         /*
1352                          * Must convert deleted directory entry add
1353                          * to a directory entry delete.
1354                          */
1355                         KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1356                         record->flags &= ~HAMMER_RECF_DELETED_FE;
1357                         record->type = HAMMER_MEM_RECORD_DEL;
1358                         KKASSERT(record->ip->obj_id == record->leaf.base.obj_id);
1359                         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1360                         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1361                         KKASSERT((record->flags & (HAMMER_RECF_COMMITTED |
1362                                                  HAMMER_RECF_DELETED_BE)) == 0);
1363                         /* converted record is not yet committed */
1364                         /* hammer_flush_record_done takes care of the rest */
1365                 } else {
1366                         /*
1367                          * Everything went fine and we are now done with
1368                          * this record.
1369                          */
1370                         record->flags |= HAMMER_RECF_COMMITTED;
1371                         ++record->ip->rec_generation;
1372                 }
1373         } else {
1374                 if (record->leaf.data_offset) {
1375                         hammer_blockmap_free(trans, record->leaf.data_offset,
1376                                              record->leaf.data_len);
1377                 }
1378         }
1379 done_unlock:
1380         hammer_sync_unlock(trans);
1381 done:
1382         return(error);
1383 }
1384
1385 /*
1386  * Add the record to the inode's rec_tree.  The low 32 bits of a directory
1387  * entry's key is used to deal with hash collisions in the upper 32 bits.
1388  * A unique 64 bit key is generated in-memory and may be regenerated a
1389  * second time when the directory record is flushed to the on-disk B-Tree.
1390  *
1391  * A referenced record is passed to this function.  This function
1392  * eats the reference.  If an error occurs the record will be deleted.
1393  *
1394  * A copy of the temporary record->data pointer provided by the caller
1395  * will be made.
1396  */
1397 int
1398 hammer_mem_add(hammer_record_t record)
1399 {
1400         hammer_mount_t hmp = record->ip->hmp;
1401
1402         /*
1403          * Make a private copy of record->data
1404          */
1405         if (record->data)
1406                 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1407
1408         /*
1409          * Insert into the RB tree.  A unique key should have already
1410          * been selected if this is a directory entry.
1411          */
1412         if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1413                 record->flags |= HAMMER_RECF_DELETED_FE;
1414                 hammer_rel_mem_record(record);
1415                 return (EEXIST);
1416         }
1417         ++hmp->count_newrecords;
1418         ++hmp->rsv_recs;
1419         ++record->ip->rsv_recs;
1420         record->ip->hmp->rsv_databytes += record->leaf.data_len;
1421         record->flags |= HAMMER_RECF_ONRBTREE;
1422         hammer_modify_inode(NULL, record->ip, HAMMER_INODE_XDIRTY);
1423         hammer_rel_mem_record(record);
1424         return(0);
1425 }
1426
1427 /************************************************************************
1428  *                   HAMMER INODE MERGED-RECORD FUNCTIONS               *
1429  ************************************************************************
1430  *
1431  * These functions augment the B-Tree scanning functions in hammer_btree.c
1432  * by merging in-memory records with on-disk records.
1433  */
1434
1435 /*
1436  * Locate a particular record either in-memory or on-disk.
1437  *
1438  * NOTE: This is basically a standalone routine, hammer_ip_next() may
1439  * NOT be called to iterate results.
1440  */
1441 int
1442 hammer_ip_lookup(hammer_cursor_t cursor)
1443 {
1444         int error;
1445
1446         /*
1447          * If the element is in-memory return it without searching the
1448          * on-disk B-Tree
1449          */
1450         KKASSERT(cursor->ip);
1451         error = hammer_mem_lookup(cursor);
1452         if (error == 0) {
1453                 cursor->leaf = &cursor->iprec->leaf;
1454                 return(error);
1455         }
1456         if (error != ENOENT)
1457                 return(error);
1458
1459         /*
1460          * If the inode has on-disk components search the on-disk B-Tree.
1461          */
1462         if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1463                 return(error);
1464         error = hammer_btree_lookup(cursor);
1465         if (error == 0)
1466                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1467         return(error);
1468 }
1469
1470 /*
1471  * Helper for hammer_ip_first()/hammer_ip_next()
1472  *
1473  * NOTE: Both ATEDISK and DISKEOF will be set the same.  This sets up
1474  * hammer_ip_first() for calling hammer_ip_next(), and sets up the re-seek
1475  * state if hammer_ip_next() needs to re-seek.
1476  */
1477 static __inline
1478 int
1479 _hammer_ip_seek_btree(hammer_cursor_t cursor)
1480 {
1481         hammer_inode_t ip = cursor->ip;
1482         int error;
1483
1484         if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1485                 error = hammer_btree_lookup(cursor);
1486                 if (error == ENOENT || error == EDEADLK) {
1487                         if (hammer_debug_general & 0x2000) {
1488                                 kprintf("error %d node %p %016llx index %d\n",
1489                                         error, cursor->node,
1490                                         (long long)cursor->node->node_offset,
1491                                         cursor->index);
1492                         }
1493                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1494                         error = hammer_btree_iterate(cursor);
1495                 }
1496                 if (error == 0) {
1497                         cursor->flags &= ~(HAMMER_CURSOR_DISKEOF |
1498                                            HAMMER_CURSOR_ATEDISK);
1499                 } else {
1500                         cursor->flags |= HAMMER_CURSOR_DISKEOF |
1501                                          HAMMER_CURSOR_ATEDISK;
1502                         if (error == ENOENT)
1503                                 error = 0;
1504                 }
1505         } else {
1506                 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_ATEDISK;
1507                 error = 0;
1508         }
1509         return(error);
1510 }
1511
1512 /*
1513  * Helper for hammer_ip_next()
1514  *
1515  * The caller has determined that the media cursor is further along than the
1516  * memory cursor and must be reseeked after a generation number change.
1517  */
1518 static
1519 int
1520 _hammer_ip_reseek(hammer_cursor_t cursor)
1521 {
1522         struct hammer_base_elm save;
1523         hammer_btree_elm_t elm;
1524         int error __debugvar;
1525         int r;
1526         int again = 0;
1527
1528         /*
1529          * Do the re-seek.
1530          */
1531         kprintf("HAMMER: Debug: re-seeked during scan @ino=%016llx\n",
1532                 (long long)cursor->ip->obj_id);
1533         save = cursor->key_beg;
1534         cursor->key_beg = cursor->iprec->leaf.base;
1535         error = _hammer_ip_seek_btree(cursor);
1536         KKASSERT(error == 0);
1537         cursor->key_beg = save;
1538
1539         /*
1540          * If the memory record was previous returned to
1541          * the caller and the media record matches
1542          * (-1/+1: only create_tid differs), then iterate
1543          * the media record to avoid a double result.
1544          */
1545         if ((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0 &&
1546             (cursor->flags & HAMMER_CURSOR_LASTWASMEM)) {
1547                 elm = &cursor->node->ondisk->elms[cursor->index];
1548                 r = hammer_btree_cmp(&elm->base,
1549                                      &cursor->iprec->leaf.base);
1550                 if (cursor->flags & HAMMER_CURSOR_ASOF) {
1551                         if (r >= -1 && r <= 1) {
1552                                 kprintf("HAMMER: Debug: iterated after "
1553                                         "re-seek (asof r=%d)\n", r);
1554                                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1555                                 again = 1;
1556                         }
1557                 } else {
1558                         if (r == 0) {
1559                                 kprintf("HAMMER: Debug: iterated after "
1560                                         "re-seek\n");
1561                                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1562                                 again = 1;
1563                         }
1564                 }
1565         }
1566         return(again);
1567 }
1568
1569 /*
1570  * Locate the first record within the cursor's key_beg/key_end range,
1571  * restricted to a particular inode.  0 is returned on success, ENOENT
1572  * if no records matched the requested range, or some other error.
1573  *
1574  * When 0 is returned hammer_ip_next() may be used to iterate additional
1575  * records within the requested range.
1576  *
1577  * This function can return EDEADLK, requiring the caller to terminate
1578  * the cursor and try again.
1579  */
1580
1581 int
1582 hammer_ip_first(hammer_cursor_t cursor)
1583 {
1584         hammer_inode_t ip __debugvar = cursor->ip;
1585         int error;
1586
1587         KKASSERT(ip != NULL);
1588
1589         /*
1590          * Clean up fields and setup for merged scan
1591          */
1592         cursor->flags &= ~HAMMER_CURSOR_RETEST;
1593
1594         /*
1595          * Search the in-memory record list (Red-Black tree).  Unlike the
1596          * B-Tree search, mem_first checks for records in the range.
1597          *
1598          * This function will setup both ATEMEM and MEMEOF properly for
1599          * the ip iteration.  ATEMEM will be set if MEMEOF is set.
1600          */
1601         hammer_mem_first(cursor);
1602
1603         /*
1604          * Detect generation changes during blockages, including
1605          * blockages which occur on the initial btree search.
1606          */
1607         cursor->rec_generation = cursor->ip->rec_generation;
1608
1609         /*
1610          * Initial search and result
1611          */
1612         error = _hammer_ip_seek_btree(cursor);
1613         if (error == 0)
1614                 error = hammer_ip_next(cursor);
1615
1616         return (error);
1617 }
1618
1619 /*
1620  * Retrieve the next record in a merged iteration within the bounds of the
1621  * cursor.  This call may be made multiple times after the cursor has been
1622  * initially searched with hammer_ip_first().
1623  *
1624  * There are numerous special cases in this code to deal with races between
1625  * in-memory records and on-media records.
1626  *
1627  * 0 is returned on success, ENOENT if no further records match the
1628  * requested range, or some other error code is returned.
1629  */
1630 int
1631 hammer_ip_next(hammer_cursor_t cursor)
1632 {
1633         hammer_btree_elm_t elm;
1634         hammer_record_t rec;
1635         hammer_record_t tmprec;
1636         int error;
1637         int r;
1638
1639 again:
1640         /*
1641          * Get the next on-disk record
1642          *
1643          * NOTE: If we deleted the last on-disk record we had scanned
1644          *       ATEDISK will be clear and RETEST will be set, forcing
1645          *       a call to iterate.  The fact that ATEDISK is clear causes
1646          *       iterate to re-test the 'current' element.  If ATEDISK is
1647          *       set, iterate will skip the 'current' element.
1648          */
1649         error = 0;
1650         if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1651                 if (cursor->flags & (HAMMER_CURSOR_ATEDISK |
1652                                      HAMMER_CURSOR_RETEST)) {
1653                         error = hammer_btree_iterate(cursor);
1654                         cursor->flags &= ~HAMMER_CURSOR_RETEST;
1655                         if (error == 0) {
1656                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1657                                 hammer_cache_node(&cursor->ip->cache[1],
1658                                                   cursor->node);
1659                         } else if (error == ENOENT) {
1660                                 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1661                                                  HAMMER_CURSOR_ATEDISK;
1662                                 error = 0;
1663                         }
1664                 }
1665         }
1666
1667         /*
1668          * If the generation changed the backend has deleted or committed
1669          * one or more memory records since our last check.
1670          *
1671          * When this case occurs if the disk cursor is > current memory record
1672          * or the disk cursor is at EOF, we must re-seek the disk-cursor.
1673          * Since the cursor is ahead it must have not yet been eaten (if
1674          * not at eof anyway). (XXX data offset case?)
1675          *
1676          * NOTE: we are not doing a full check here.  That will be handled
1677          * later on.
1678          *
1679          * If we have exhausted all memory records we do not have to do any
1680          * further seeks.
1681          */
1682         while (cursor->rec_generation != cursor->ip->rec_generation &&
1683                error == 0
1684         ) {
1685                 kprintf("HAMMER: Debug: generation changed during scan @ino=%016llx\n", (long long)cursor->ip->obj_id);
1686                 cursor->rec_generation = cursor->ip->rec_generation;
1687                 if (cursor->flags & HAMMER_CURSOR_MEMEOF)
1688                         break;
1689                 if (cursor->flags & HAMMER_CURSOR_DISKEOF) {
1690                         r = 1;
1691                 } else {
1692                         KKASSERT((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0);
1693                         elm = &cursor->node->ondisk->elms[cursor->index];
1694                         r = hammer_btree_cmp(&elm->base,
1695                                              &cursor->iprec->leaf.base);
1696                 }
1697
1698                 /*
1699                  * Do we re-seek the media cursor?
1700                  */
1701                 if (r > 0) {
1702                         if (_hammer_ip_reseek(cursor))
1703                                 goto again;
1704                 }
1705         }
1706
1707         /*
1708          * We can now safely get the next in-memory record.  We cannot
1709          * block here.
1710          *
1711          * hammer_rec_scan_cmp:  Is the record still in our general range,
1712          *                       (non-inclusive of snapshot exclusions)?
1713          * hammer_rec_scan_callback: Is the record in our snapshot?
1714          */
1715         tmprec = NULL;
1716         if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1717                 /*
1718                  * If the current memory record was eaten then get the next
1719                  * one.  Stale records are skipped.
1720                  */
1721                 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1722                         tmprec = cursor->iprec;
1723                         cursor->iprec = NULL;
1724                         rec = hammer_rec_rb_tree_RB_NEXT(tmprec);
1725                         while (rec) {
1726                                 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1727                                         break;
1728                                 if (hammer_rec_scan_callback(rec, cursor) != 0)
1729                                         break;
1730                                 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1731                         }
1732                         if (cursor->iprec) {
1733                                 KKASSERT(cursor->iprec == rec);
1734                                 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1735                         } else {
1736                                 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1737                         }
1738                         cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1739                 }
1740         }
1741
1742         /*
1743          * MEMORY RECORD VALIDITY TEST
1744          *
1745          * (We still can't block, which is why tmprec is being held so
1746          * long).
1747          *
1748          * If the memory record is no longer valid we skip it.  It may
1749          * have been deleted by the frontend.  If it was deleted or
1750          * committed by the backend the generation change re-seeked the
1751          * disk cursor and the record will be present there.
1752          */
1753         if (error == 0 && (cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1754                 KKASSERT(cursor->iprec);
1755                 KKASSERT((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0);
1756                 if (!hammer_ip_iterate_mem_good(cursor, cursor->iprec)) {
1757                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1758                         if (tmprec)
1759                                 hammer_rel_mem_record(tmprec);
1760                         goto again;
1761                 }
1762         }
1763         if (tmprec)
1764                 hammer_rel_mem_record(tmprec);
1765
1766         /*
1767          * Extract either the disk or memory record depending on their
1768          * relative position.
1769          */
1770         error = 0;
1771         switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1772         case 0:
1773                 /*
1774                  * Both entries valid.   Compare the entries and nominally
1775                  * return the first one in the sort order.  Numerous cases
1776                  * require special attention, however.
1777                  */
1778                 elm = &cursor->node->ondisk->elms[cursor->index];
1779                 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1780
1781                 /*
1782                  * If the two entries differ only by their key (-2/2) or
1783                  * create_tid (-1/1), and are DATA records, we may have a
1784                  * nominal match.  We have to calculate the base file
1785                  * offset of the data.
1786                  */
1787                 if (r <= 2 && r >= -2 && r != 0 &&
1788                     cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1789                     cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1790                         int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1791                         int64_t base2 = cursor->iprec->leaf.base.key -
1792                                         cursor->iprec->leaf.data_len;
1793                         if (base1 == base2)
1794                                 r = 0;
1795                 }
1796
1797                 if (r < 0) {
1798                         error = hammer_btree_extract(cursor,
1799                                                      HAMMER_CURSOR_GET_LEAF);
1800                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1801                         cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1802                         break;
1803                 }
1804
1805                 /*
1806                  * If the entries match exactly the memory entry is either
1807                  * an on-disk directory entry deletion or a bulk data
1808                  * overwrite.  If it is a directory entry deletion we eat
1809                  * both entries.
1810                  *
1811                  * For the bulk-data overwrite case it is possible to have
1812                  * visibility into both, which simply means the syncer
1813                  * hasn't gotten around to doing the delete+insert sequence
1814                  * on the B-Tree.  Use the memory entry and throw away the
1815                  * on-disk entry.
1816                  *
1817                  * If the in-memory record is not either of these we
1818                  * probably caught the syncer while it was syncing it to
1819                  * the media.  Since we hold a shared lock on the cursor,
1820                  * the in-memory record had better be marked deleted at
1821                  * this point.
1822                  */
1823                 if (r == 0) {
1824                         if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1825                                 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1826                                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1827                                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1828                                         goto again;
1829                                 }
1830                         } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1831                                 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1832                                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1833                                 }
1834                                 /* fall through to memory entry */
1835                         } else {
1836                                 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
1837                                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1838                                 goto again;
1839                         }
1840                 }
1841                 /* fall through to the memory entry */
1842         case HAMMER_CURSOR_ATEDISK:
1843                 /*
1844                  * Only the memory entry is valid.
1845                  */
1846                 cursor->leaf = &cursor->iprec->leaf;
1847                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1848                 cursor->flags |= HAMMER_CURSOR_LASTWASMEM;
1849
1850                 /*
1851                  * If the memory entry is an on-disk deletion we should have
1852                  * also had found a B-Tree record.  If the backend beat us
1853                  * to it it would have interlocked the cursor and we should
1854                  * have seen the in-memory record marked DELETED_FE.
1855                  */
1856                 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1857                     (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1858                         panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
1859                 }
1860                 break;
1861         case HAMMER_CURSOR_ATEMEM:
1862                 /*
1863                  * Only the disk entry is valid
1864                  */
1865                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1866                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1867                 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1868                 break;
1869         default:
1870                 /*
1871                  * Neither entry is valid
1872                  *
1873                  * XXX error not set properly
1874                  */
1875                 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1876                 cursor->leaf = NULL;
1877                 error = ENOENT;
1878                 break;
1879         }
1880         return(error);
1881 }
1882
1883 /*
1884  * Resolve the cursor->data pointer for the current cursor position in
1885  * a merged iteration.
1886  */
1887 int
1888 hammer_ip_resolve_data(hammer_cursor_t cursor)
1889 {
1890         hammer_record_t record;
1891         int error;
1892
1893         if (hammer_cursor_inmem(cursor)) {
1894                 /*
1895                  * The data associated with an in-memory record is usually
1896                  * kmalloced, but reserve-ahead data records will have an
1897                  * on-disk reference.
1898                  *
1899                  * NOTE: Reserve-ahead data records must be handled in the
1900                  * context of the related high level buffer cache buffer
1901                  * to interlock against async writes.
1902                  */
1903                 record = cursor->iprec;
1904                 cursor->data = record->data;
1905                 error = 0;
1906                 if (cursor->data == NULL) {
1907                         KKASSERT(record->leaf.base.rec_type ==
1908                                  HAMMER_RECTYPE_DATA);
1909                         cursor->data = hammer_bread_ext(cursor->trans->hmp,
1910                                                     record->leaf.data_offset,
1911                                                     record->leaf.data_len,
1912                                                     &error,
1913                                                     &cursor->data_buffer);
1914                 }
1915         } else {
1916                 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1917                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1918         }
1919         return(error);
1920 }
1921
1922 /*
1923  * Backend truncation / record replacement - delete records in range.
1924  *
1925  * Delete all records within the specified range for inode ip.  In-memory
1926  * records still associated with the frontend are ignored. 
1927  *
1928  * If truncating is non-zero in-memory records associated with the back-end
1929  * are ignored.  If truncating is > 1 we can return EWOULDBLOCK.
1930  *
1931  * NOTES:
1932  *
1933  *      * An unaligned range will cause new records to be added to cover
1934  *        the edge cases. (XXX not implemented yet).
1935  *
1936  *      * Replacement via reservations (see hammer_ip_sync_record_cursor())
1937  *        also do not deal with unaligned ranges.
1938  *
1939  *      * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1940  *
1941  *      * Record keys for regular file data have to be special-cased since
1942  *        they indicate the end of the range (key = base + bytes).
1943  *
1944  *      * This function may be asked to delete ridiculously huge ranges, for
1945  *        example if someone truncates or removes a 1TB regular file.  We
1946  *        must be very careful on restarts and we may have to stop w/
1947  *        EWOULDBLOCK to avoid blowing out the buffer cache.
1948  */
1949 int
1950 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1951                        int64_t ran_beg, int64_t ran_end, int truncating)
1952 {
1953         hammer_transaction_t trans = cursor->trans;
1954         hammer_btree_leaf_elm_t leaf;
1955         int error;
1956         int64_t off;
1957         int64_t tmp64;
1958
1959 #if 0
1960         kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1961 #endif
1962
1963         KKASSERT(trans->type == HAMMER_TRANS_FLS);
1964 retry:
1965         hammer_normalize_cursor(cursor);
1966         cursor->key_beg.localization = ip->obj_localization +
1967                                        HAMMER_LOCALIZE_MISC;
1968         cursor->key_beg.obj_id = ip->obj_id;
1969         cursor->key_beg.create_tid = 0;
1970         cursor->key_beg.delete_tid = 0;
1971         cursor->key_beg.obj_type = 0;
1972
1973         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1974                 cursor->key_beg.key = ran_beg;
1975                 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1976         } else {
1977                 /*
1978                  * The key in the B-Tree is (base+bytes), so the first possible
1979                  * matching key is ran_beg + 1.
1980                  */
1981                 cursor->key_beg.key = ran_beg + 1;
1982                 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1983         }
1984
1985         cursor->key_end = cursor->key_beg;
1986         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1987                 cursor->key_end.key = ran_end;
1988         } else {
1989                 tmp64 = ran_end + MAXPHYS + 1;  /* work around GCC-4 bug */
1990                 if (tmp64 < ran_end)
1991                         cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1992                 else
1993                         cursor->key_end.key = ran_end + MAXPHYS + 1;
1994         }
1995
1996         cursor->asof = ip->obj_asof;
1997         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1998         cursor->flags |= HAMMER_CURSOR_ASOF;
1999         cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
2000         cursor->flags |= HAMMER_CURSOR_BACKEND;
2001         cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
2002
2003         error = hammer_ip_first(cursor);
2004
2005         /*
2006          * Iterate through matching records and mark them as deleted.
2007          */
2008         while (error == 0) {
2009                 leaf = cursor->leaf;
2010
2011                 KKASSERT(leaf->base.delete_tid == 0);
2012                 KKASSERT(leaf->base.obj_id == ip->obj_id);
2013
2014                 /*
2015                  * There may be overlap cases for regular file data.  Also
2016                  * remember the key for a regular file record is (base + len),
2017                  * NOT (base).
2018                  *
2019                  * Note that due to duplicates (mem & media) allowed by
2020                  * DELETE_VISIBILITY, off can wind up less then ran_beg.
2021                  */
2022                 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
2023                         off = leaf->base.key - leaf->data_len;
2024                         /*
2025                          * Check the left edge case.  We currently do not
2026                          * split existing records.
2027                          */
2028                         if (off < ran_beg && leaf->base.key > ran_beg) {
2029                                 panic("hammer left edge case %016llx %d",
2030                                         (long long)leaf->base.key,
2031                                         leaf->data_len);
2032                         }
2033
2034                         /*
2035                          * Check the right edge case.  Note that the
2036                          * record can be completely out of bounds, which
2037                          * terminates the search.
2038                          *
2039                          * base->key is exclusive of the right edge while
2040                          * ran_end is inclusive of the right edge.  The
2041                          * (key - data_len) left boundary is inclusive.
2042                          *
2043                          * XXX theory-check this test at some point, are
2044                          * we missing a + 1 somewhere?  Note that ran_end
2045                          * could overflow.
2046                          */
2047                         if (leaf->base.key - 1 > ran_end) {
2048                                 if (leaf->base.key - leaf->data_len > ran_end)
2049                                         break;
2050                                 panic("hammer right edge case");
2051                         }
2052                 } else {
2053                         off = leaf->base.key;
2054                 }
2055
2056                 /*
2057                  * Delete the record.  When truncating we do not delete
2058                  * in-memory (data) records because they represent data
2059                  * written after the truncation.
2060                  *
2061                  * This will also physically destroy the B-Tree entry and
2062                  * data if the retention policy dictates.  The function
2063                  * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2064                  * to retest the new 'current' element.
2065                  */
2066                 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
2067                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
2068                         /*
2069                          * If we have built up too many meta-buffers we risk
2070                          * deadlocking the kernel and must stop.  This can
2071                          * occur when deleting ridiculously huge files.
2072                          * sync_trunc_off is updated so the next cycle does
2073                          * not re-iterate records we have already deleted.
2074                          *
2075                          * This is only done with formal truncations.
2076                          */
2077                         if (truncating > 1 && error == 0 &&
2078                             hammer_flusher_meta_limit(ip->hmp)) {
2079                                 ip->sync_trunc_off = off;
2080                                 error = EWOULDBLOCK;
2081                         }
2082                 }
2083                 if (error)
2084                         break;
2085                 ran_beg = off;  /* for restart */
2086                 error = hammer_ip_next(cursor);
2087         }
2088         if (cursor->node)
2089                 hammer_cache_node(&ip->cache[1], cursor->node);
2090
2091         if (error == EDEADLK) {
2092                 hammer_done_cursor(cursor);
2093                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2094                 if (error == 0)
2095                         goto retry;
2096         }
2097         if (error == ENOENT)
2098                 error = 0;
2099         return(error);
2100 }
2101
2102 /*
2103  * This backend function deletes the specified record on-disk, similar to
2104  * delete_range but for a specific record.  Unlike the exact deletions
2105  * used when deleting a directory entry this function uses an ASOF search 
2106  * like delete_range.
2107  *
2108  * This function may be called with ip->obj_asof set for a slave snapshot,
2109  * so don't use it.  We always delete non-historical records only.
2110  */
2111 static int
2112 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
2113                       hammer_btree_leaf_elm_t leaf)
2114 {
2115         hammer_transaction_t trans = cursor->trans;
2116         int error;
2117
2118         KKASSERT(trans->type == HAMMER_TRANS_FLS);
2119 retry:
2120         hammer_normalize_cursor(cursor);
2121         cursor->key_beg = leaf->base;
2122         cursor->asof = HAMMER_MAX_TID;
2123         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2124         cursor->flags |= HAMMER_CURSOR_ASOF;
2125         cursor->flags |= HAMMER_CURSOR_BACKEND;
2126         cursor->flags &= ~HAMMER_CURSOR_INSERT;
2127
2128         error = hammer_btree_lookup(cursor);
2129         if (error == 0) {
2130                 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2131         }
2132         if (error == EDEADLK) {
2133                 hammer_done_cursor(cursor);
2134                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2135                 if (error == 0)
2136                         goto retry;
2137         }
2138         return(error);
2139 }
2140
2141 /*
2142  * This function deletes remaining auxillary records when an inode is
2143  * being deleted.  This function explicitly does not delete the
2144  * inode record, directory entry, data, or db records.  Those must be
2145  * properly disposed of prior to this call.
2146  */
2147 int
2148 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
2149 {
2150         hammer_transaction_t trans = cursor->trans;
2151         hammer_btree_leaf_elm_t leaf __debugvar;
2152         int error;
2153
2154         KKASSERT(trans->type == HAMMER_TRANS_FLS);
2155 retry:
2156         hammer_normalize_cursor(cursor);
2157         cursor->key_beg.localization = ip->obj_localization +
2158                                        HAMMER_LOCALIZE_MISC;
2159         cursor->key_beg.obj_id = ip->obj_id;
2160         cursor->key_beg.create_tid = 0;
2161         cursor->key_beg.delete_tid = 0;
2162         cursor->key_beg.obj_type = 0;
2163         cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
2164         cursor->key_beg.key = HAMMER_MIN_KEY;
2165
2166         cursor->key_end = cursor->key_beg;
2167         cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
2168         cursor->key_end.key = HAMMER_MAX_KEY;
2169
2170         cursor->asof = ip->obj_asof;
2171         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2172         cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2173         cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
2174         cursor->flags |= HAMMER_CURSOR_BACKEND;
2175
2176         error = hammer_ip_first(cursor);
2177
2178         /*
2179          * Iterate through matching records and mark them as deleted.
2180          */
2181         while (error == 0) {
2182                 leaf = cursor->leaf;
2183
2184                 KKASSERT(leaf->base.delete_tid == 0);
2185
2186                 /*
2187                  * Mark the record and B-Tree entry as deleted.  This will
2188                  * also physically delete the B-Tree entry, record, and
2189                  * data if the retention policy dictates.  The function
2190                  * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2191                  * to retest the new 'current' element.
2192                  *
2193                  * Directory entries (and delete-on-disk directory entries)
2194                  * must be synced and cannot be deleted.
2195                  */
2196                 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2197                 ++*countp;
2198                 if (error)
2199                         break;
2200                 error = hammer_ip_next(cursor);
2201         }
2202         if (cursor->node)
2203                 hammer_cache_node(&ip->cache[1], cursor->node);
2204         if (error == EDEADLK) {
2205                 hammer_done_cursor(cursor);
2206                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2207                 if (error == 0)
2208                         goto retry;
2209         }
2210         if (error == ENOENT)
2211                 error = 0;
2212         return(error);
2213 }
2214
2215 /*
2216  * Delete the record at the current cursor.  On success the cursor will
2217  * be positioned appropriately for an iteration but may no longer be at
2218  * a leaf node.
2219  *
2220  * This routine is only called from the backend.
2221  *
2222  * NOTE: This can return EDEADLK, requiring the caller to terminate the
2223  * cursor and retry.
2224  */
2225 int
2226 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
2227                         hammer_tid_t tid)
2228 {
2229         hammer_record_t iprec;
2230         int error;
2231
2232         KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
2233         KKASSERT(tid != 0);
2234
2235         /*
2236          * In-memory (unsynchronized) records can simply be freed.  This
2237          * only occurs in range iterations since all other records are
2238          * individually synchronized.  Thus there should be no confusion with
2239          * the interlock.
2240          *
2241          * An in-memory record may be deleted before being committed to disk,
2242          * but could have been accessed in the mean time.  The reservation
2243          * code will deal with the case.
2244          */
2245         if (hammer_cursor_inmem(cursor)) {
2246                 iprec = cursor->iprec;
2247                 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
2248                 iprec->flags |= HAMMER_RECF_DELETED_FE;
2249                 iprec->flags |= HAMMER_RECF_DELETED_BE;
2250                 KKASSERT(iprec->ip == ip);
2251                 ++ip->rec_generation;
2252                 return(0);
2253         }
2254
2255         /*
2256          * On-disk records are marked as deleted by updating their delete_tid.
2257          * This does not effect their position in the B-Tree (which is based
2258          * on their create_tid).
2259          *
2260          * Frontend B-Tree operations track inodes so we tell 
2261          * hammer_delete_at_cursor() not to.
2262          */
2263         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
2264
2265         if (error == 0) {
2266                 error = hammer_delete_at_cursor(
2267                                 cursor,
2268                                 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
2269                                 cursor->trans->tid,
2270                                 cursor->trans->time32,
2271                                 0, NULL);
2272         }
2273         return(error);
2274 }
2275
2276 /*
2277  * Used to write a generic record w/optional data to the media b-tree
2278  * when no inode context is available.  Used by the mirroring and
2279  * snapshot code.
2280  *
2281  * Caller must set cursor->key_beg to leaf->base.  The cursor must be
2282  * flagged for backend operation and not flagged ASOF (since we are
2283  * doing an insertion).
2284  *
2285  * This function will acquire the appropriate sync lock and will set
2286  * the cursor insertion flag for the operation, do the btree lookup,
2287  * and the insertion, and clear the insertion flag and sync lock before
2288  * returning.  The cursor state will be such that the caller can continue
2289  * scanning (used by the mirroring code).
2290  *
2291  * mode: HAMMER_CREATE_MODE_UMIRROR     copyin data, check crc
2292  *       HAMMER_CREATE_MODE_SYS         bcopy data, generate crc
2293  *
2294  * NOTE: EDEADLK can be returned.  The caller must do deadlock handling and
2295  *                retry.
2296  *
2297  *       EALREADY can be returned if the record already exists (WARNING,
2298  *                because ASOF cannot be used no check is made for illegal
2299  *                duplicates).
2300  *
2301  * NOTE: Do not use the function for normal inode-related records as this
2302  *       functions goes directly to the media and is not integrated with
2303  *       in-memory records.
2304  */
2305 int
2306 hammer_create_at_cursor(hammer_cursor_t cursor, hammer_btree_leaf_elm_t leaf,
2307                         void *udata, int mode)
2308 {
2309         hammer_transaction_t trans;
2310         hammer_buffer_t data_buffer;
2311         hammer_off_t ndata_offset;
2312         hammer_tid_t high_tid;
2313         void *ndata;
2314         int error;
2315         int doprop;
2316
2317         trans = cursor->trans;
2318         data_buffer = NULL;
2319         ndata_offset = 0;
2320         doprop = 0;
2321
2322         KKASSERT((cursor->flags &
2323                   (HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF)) ==
2324                   (HAMMER_CURSOR_BACKEND));
2325
2326         hammer_sync_lock_sh(trans);
2327
2328         if (leaf->data_len) {
2329                 ndata = hammer_alloc_data(trans, leaf->data_len,
2330                                           leaf->base.rec_type,
2331                                           &ndata_offset, &data_buffer,
2332                                           0, &error);
2333                 if (ndata == NULL) {
2334                         hammer_sync_unlock(trans);
2335                         return (error);
2336                 }
2337                 leaf->data_offset = ndata_offset;
2338                 hammer_modify_buffer(trans, data_buffer, NULL, 0);
2339
2340                 switch(mode) {
2341                 case HAMMER_CREATE_MODE_UMIRROR:
2342                         error = copyin(udata, ndata, leaf->data_len);
2343                         if (error == 0) {
2344                                 if (hammer_crc_test_leaf(ndata, leaf) == 0) {
2345                                         kprintf("data crc mismatch on pipe\n");
2346                                         error = EINVAL;
2347                                 } else {
2348                                         error = hammer_cursor_localize_data(
2349                                                         ndata, leaf);
2350                                 }
2351                         }
2352                         break;
2353                 case HAMMER_CREATE_MODE_SYS:
2354                         bcopy(udata, ndata, leaf->data_len);
2355                         error = 0;
2356                         hammer_crc_set_leaf(ndata, leaf);
2357                         break;
2358                 default:
2359                         panic("hammer: hammer_create_at_cursor: bad mode %d",
2360                                 mode);
2361                         break; /* NOT REACHED */
2362                 }
2363                 hammer_modify_buffer_done(data_buffer);
2364         } else {
2365                 leaf->data_offset = 0;
2366                 error = 0;
2367                 ndata = NULL;
2368         }
2369         if (error)
2370                 goto failed;
2371
2372         /*
2373          * Do the insertion.  This can fail with a EDEADLK or EALREADY
2374          */
2375         cursor->flags |= HAMMER_CURSOR_INSERT;
2376         error = hammer_btree_lookup(cursor);
2377         if (error != ENOENT) {
2378                 if (error == 0)
2379                         error = EALREADY;
2380                 goto failed;
2381         }
2382         error = hammer_btree_insert(cursor, leaf, &doprop);
2383
2384         /*
2385          * Cursor is left on current element, we want to skip it now.
2386          * (in case the caller is scanning)
2387          */
2388         cursor->flags |= HAMMER_CURSOR_ATEDISK;
2389         cursor->flags &= ~HAMMER_CURSOR_INSERT;
2390
2391         /*
2392          * If the insertion happens to be creating (and not just replacing)
2393          * an inode we have to track it.
2394          */
2395         if (error == 0 &&
2396             leaf->base.rec_type == HAMMER_RECTYPE_INODE &&
2397             leaf->base.delete_tid == 0) {
2398                 hammer_modify_volume_field(trans, trans->rootvol,
2399                                            vol0_stat_inodes);
2400                 ++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
2401                 hammer_modify_volume_done(trans->rootvol);
2402         }
2403
2404         /*
2405          * vol0_next_tid must track the highest TID stored in the filesystem.
2406          * We do not need to generate undo for this update.
2407          */
2408         high_tid = leaf->base.create_tid;
2409         if (high_tid < leaf->base.delete_tid)
2410                 high_tid = leaf->base.delete_tid;
2411         if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
2412                 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2413                 trans->rootvol->ondisk->vol0_next_tid = high_tid;
2414                 hammer_modify_volume_done(trans->rootvol);
2415         }
2416
2417         /*
2418          * WARNING!  cursor's leaf pointer may have changed after
2419          *           do_propagation returns.
2420          */
2421         if (error == 0 && doprop)
2422                 hammer_btree_do_propagation(cursor, NULL, leaf);
2423
2424 failed:
2425         /*
2426          * Cleanup
2427          */
2428         if (error && leaf->data_offset) {
2429                 hammer_blockmap_free(trans, leaf->data_offset, leaf->data_len);
2430
2431         }
2432         hammer_sync_unlock(trans);
2433         if (data_buffer)
2434                 hammer_rel_buffer(data_buffer, 0);
2435         return (error);
2436 }
2437
2438 /*
2439  * Delete the B-Tree element at the current cursor and do any necessary
2440  * mirror propagation.
2441  *
2442  * The cursor must be properly positioned for an iteration on return but
2443  * may be pointing at an internal element.
2444  *
2445  * An element can be un-deleted by passing a delete_tid of 0 with
2446  * HAMMER_DELETE_ADJUST.
2447  */
2448 int
2449 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
2450                         hammer_tid_t delete_tid, u_int32_t delete_ts,
2451                         int track, int64_t *stat_bytes)
2452 {
2453         struct hammer_btree_leaf_elm save_leaf;
2454         hammer_transaction_t trans;
2455         hammer_btree_leaf_elm_t leaf;
2456         hammer_node_t node;
2457         hammer_btree_elm_t elm;
2458         hammer_off_t data_offset;
2459         int32_t data_len;
2460         int error;
2461         int icount;
2462         int doprop;
2463
2464         error = hammer_cursor_upgrade(cursor);
2465         if (error)
2466                 return(error);
2467
2468         trans = cursor->trans;
2469         node = cursor->node;
2470         elm = &node->ondisk->elms[cursor->index];
2471         leaf = &elm->leaf;
2472         KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2473
2474         hammer_sync_lock_sh(trans);
2475         doprop = 0;
2476         icount = 0;
2477
2478         /*
2479          * Adjust the delete_tid.  Update the mirror_tid propagation field
2480          * as well.  delete_tid can be 0 (undelete -- used by mirroring).
2481          */
2482         if (delete_flags & HAMMER_DELETE_ADJUST) {
2483                 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2484                         if (elm->leaf.base.delete_tid == 0 && delete_tid)
2485                                 icount = -1;
2486                         if (elm->leaf.base.delete_tid && delete_tid == 0)
2487                                 icount = 1;
2488                 }
2489
2490                 hammer_modify_node(trans, node, elm, sizeof(*elm));
2491                 elm->leaf.base.delete_tid = delete_tid;
2492                 elm->leaf.delete_ts = delete_ts;
2493                 hammer_modify_node_done(node);
2494
2495                 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
2496                         hammer_modify_node_field(trans, node, mirror_tid);
2497                         node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2498                         hammer_modify_node_done(node);
2499                         doprop = 1;
2500                         if (hammer_debug_general & 0x0002) {
2501                                 kprintf("delete_at_cursor: propagate %016llx"
2502                                         " @%016llx\n",
2503                                         (long long)elm->leaf.base.delete_tid,
2504                                         (long long)node->node_offset);
2505                         }
2506                 }
2507
2508                 /*
2509                  * Adjust for the iteration.  We have deleted the current
2510                  * element and want to clear ATEDISK so the iteration does
2511                  * not skip the element after, which now becomes the current
2512                  * element.  This element must be re-tested if doing an
2513                  * iteration, which is handled by the RETEST flag.
2514                  */
2515                 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2516                         cursor->flags |= HAMMER_CURSOR_RETEST;
2517                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2518                 }
2519
2520                 /*
2521                  * An on-disk record cannot have the same delete_tid
2522                  * as its create_tid.  In a chain of record updates
2523                  * this could result in a duplicate record.
2524                  */
2525                 KKASSERT(elm->leaf.base.delete_tid !=
2526                          elm->leaf.base.create_tid);
2527         }
2528
2529         /*
2530          * Destroy the B-Tree element if asked (typically if a nohistory
2531          * file or mount, or when called by the pruning code).
2532          *
2533          * Adjust the ATEDISK flag to properly support iterations.
2534          */
2535         if (delete_flags & HAMMER_DELETE_DESTROY) {
2536                 data_offset = elm->leaf.data_offset;
2537                 data_len = elm->leaf.data_len;
2538                 if (doprop) {
2539                         save_leaf = elm->leaf;
2540                         leaf = &save_leaf;
2541                 }
2542                 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2543                     elm->leaf.base.delete_tid == 0) {
2544                         icount = -1;
2545                 }
2546
2547                 error = hammer_btree_delete(cursor);
2548                 if (error == 0) {
2549                         /*
2550                          * The deletion moves the next element (if any) to
2551                          * the current element position.  We must clear
2552                          * ATEDISK so this element is not skipped and we
2553                          * must set RETEST to force any iteration to re-test
2554                          * the element.
2555                          */
2556                         if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2557                                 cursor->flags |= HAMMER_CURSOR_RETEST;
2558                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2559                         }
2560                 }
2561                 if (error == 0) {
2562                         switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2563                         case HAMMER_ZONE_LARGE_DATA:
2564                         case HAMMER_ZONE_SMALL_DATA:
2565                         case HAMMER_ZONE_META:
2566                                 hammer_blockmap_free(trans,
2567                                                      data_offset, data_len);
2568                                 break;
2569                         default:
2570                                 break;
2571                         }
2572                 }
2573         }
2574
2575         /*
2576          * Track inode count and next_tid.  This is used by the mirroring
2577          * and PFS code.  icount can be negative, zero, or positive.
2578          */
2579         if (error == 0 && track) {
2580                 if (icount) {
2581                         hammer_modify_volume_field(trans, trans->rootvol,
2582                                                    vol0_stat_inodes);
2583                         trans->rootvol->ondisk->vol0_stat_inodes += icount;
2584                         hammer_modify_volume_done(trans->rootvol);
2585                 }
2586                 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2587                         hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2588                         trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2589                         hammer_modify_volume_done(trans->rootvol);
2590                 }
2591         }
2592
2593         /*
2594          * mirror_tid propagation occurs if the node's mirror_tid had to be
2595          * updated while adjusting the delete_tid.
2596          *
2597          * This occurs when deleting even in nohistory mode, but does not
2598          * occur when pruning an already-deleted node.
2599          *
2600          * cursor->ip is NULL when called from the pruning, mirroring,
2601          * and pfs code.  If non-NULL propagation will be conditionalized
2602          * on whether the PFS is in no-history mode or not.
2603          *
2604          * WARNING: cursor's leaf pointer may have changed after do_propagation
2605          *          returns!
2606          */
2607         if (doprop) {
2608                 if (cursor->ip)
2609                         hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2610                 else
2611                         hammer_btree_do_propagation(cursor, NULL, leaf);
2612         }
2613         hammer_sync_unlock(trans);
2614         return (error);
2615 }
2616
2617 /*
2618  * Determine whether we can remove a directory.  This routine checks whether
2619  * a directory is empty or not and enforces flush connectivity.
2620  *
2621  * Flush connectivity requires that we block if the target directory is
2622  * currently flushing, otherwise it may not end up in the same flush group.
2623  *
2624  * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2625  */
2626 int
2627 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2628 {
2629         struct hammer_cursor cursor;
2630         int error;
2631
2632         /*
2633          * Check directory empty
2634          */
2635         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2636
2637         cursor.key_beg.localization = ip->obj_localization +
2638                                       hammer_dir_localization(ip);
2639         cursor.key_beg.obj_id = ip->obj_id;
2640         cursor.key_beg.create_tid = 0;
2641         cursor.key_beg.delete_tid = 0;
2642         cursor.key_beg.obj_type = 0;
2643         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2644         cursor.key_beg.key = HAMMER_MIN_KEY;
2645
2646         cursor.key_end = cursor.key_beg;
2647         cursor.key_end.rec_type = 0xFFFF;
2648         cursor.key_end.key = HAMMER_MAX_KEY;
2649
2650         cursor.asof = ip->obj_asof;
2651         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2652
2653         error = hammer_ip_first(&cursor);
2654         if (error == ENOENT)
2655                 error = 0;
2656         else if (error == 0)
2657                 error = ENOTEMPTY;
2658         hammer_done_cursor(&cursor);
2659         return(error);
2660 }
2661
2662 /*
2663  * Localize the data payload.  Directory entries may need their
2664  * localization adjusted.
2665  */
2666 static
2667 int
2668 hammer_cursor_localize_data(hammer_data_ondisk_t data,
2669                             hammer_btree_leaf_elm_t leaf)
2670 {
2671         u_int32_t localization;
2672
2673         if (leaf->base.rec_type == HAMMER_RECTYPE_DIRENTRY) {
2674                 localization = leaf->base.localization &
2675                                HAMMER_LOCALIZE_PSEUDOFS_MASK;
2676                 if (data->entry.localization != localization) {
2677                         data->entry.localization = localization;
2678                         hammer_crc_set_leaf(data, leaf);
2679                 }
2680         }
2681         return(0);
2682 }