sys/vfs/hammer: Remove prototype of not existing fucntion
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36
37 static int hammer_mem_lookup(hammer_cursor_t cursor);
38 static void hammer_mem_first(hammer_cursor_t cursor);
39 static int hammer_frontend_trunc_callback(hammer_record_t record,
40                                 void *data __unused);
41 static int hammer_bulk_scan_callback(hammer_record_t record, void *data);
42 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
43 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
44                                 hammer_btree_leaf_elm_t leaf);
45 static int hammer_cursor_localize_data(hammer_data_ondisk_t data,
46                                 hammer_btree_leaf_elm_t leaf);
47
48 struct rec_trunc_info {
49         u_int16_t       rec_type;
50         int64_t         trunc_off;
51 };
52
53 struct hammer_bulk_info {
54         hammer_record_t record;
55         hammer_record_t conflict;
56 };
57
58 /*
59  * Red-black tree support.  Comparison code for insertion.
60  */
61 static int
62 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
63 {
64         if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
65                 return(-1);
66         if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
67                 return(1);
68
69         if (rec1->leaf.base.key < rec2->leaf.base.key)
70                 return(-1);
71         if (rec1->leaf.base.key > rec2->leaf.base.key)
72                 return(1);
73
74         /*
75          * For search & insertion purposes records deleted by the
76          * frontend or deleted/committed by the backend are silently
77          * ignored.  Otherwise pipelined insertions will get messed
78          * up.
79          *
80          * rec1 is greater then rec2 if rec1 is marked deleted.
81          * rec1 is less then rec2 if rec2 is marked deleted.
82          *
83          * Multiple deleted records may be present, do not return 0
84          * if both are marked deleted.
85          */
86         if (rec1->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
87                            HAMMER_RECF_COMMITTED)) {
88                 return(1);
89         }
90         if (rec2->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
91                            HAMMER_RECF_COMMITTED)) {
92                 return(-1);
93         }
94
95         return(0);
96 }
97
98 /*
99  * Basic record comparison code similar to hammer_btree_cmp().
100  *
101  * obj_id is not compared and may not yet be assigned in the record.
102  */
103 static int
104 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
105 {
106         if (elm->rec_type < rec->leaf.base.rec_type)
107                 return(-3);
108         if (elm->rec_type > rec->leaf.base.rec_type)
109                 return(3);
110
111         if (elm->key < rec->leaf.base.key)
112                 return(-2);
113         if (elm->key > rec->leaf.base.key)
114                 return(2);
115
116         /*
117          * Never match against an item deleted by the frontend
118          * or backend, or committed by the backend.
119          *
120          * elm is less then rec if rec is marked deleted.
121          */
122         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
123                           HAMMER_RECF_COMMITTED)) {
124                 return(-1);
125         }
126         return(0);
127 }
128
129 /*
130  * Ranged scan to locate overlapping record(s).  This is used by
131  * hammer_ip_get_bulk() to locate an overlapping record.  We have
132  * to use a ranged scan because the keys for data records with the
133  * same file base offset can be different due to differing data_len's.
134  *
135  * NOTE: The base file offset of a data record is (key - data_len), not (key).
136  */
137 static int
138 hammer_rec_overlap_cmp(hammer_record_t rec, void *data)
139 {
140         struct hammer_bulk_info *info = data;
141         hammer_btree_leaf_elm_t leaf = &info->record->leaf;
142
143         if (rec->leaf.base.rec_type < leaf->base.rec_type)
144                 return(-3);
145         if (rec->leaf.base.rec_type > leaf->base.rec_type)
146                 return(3);
147
148         /*
149          * Overlap compare
150          */
151         if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
152                 /* rec_beg >= leaf_end */
153                 if (rec->leaf.base.key - rec->leaf.data_len >= leaf->base.key)
154                         return(2);
155                 /* rec_end <= leaf_beg */
156                 if (rec->leaf.base.key <= leaf->base.key - leaf->data_len)
157                         return(-2);
158         } else {
159                 if (rec->leaf.base.key < leaf->base.key)
160                         return(-2);
161                 if (rec->leaf.base.key > leaf->base.key)
162                         return(2);
163         }
164
165         /*
166          * We have to return 0 at this point, even if DELETED_FE is set,
167          * because returning anything else will cause the scan to ignore
168          * one of the branches when we really want it to check both.
169          */
170         return(0);
171 }
172
173 /*
174  * RB_SCAN comparison code for hammer_mem_first().  The argument order
175  * is reversed so the comparison result has to be negated.  key_beg and
176  * key_end are both range-inclusive.
177  *
178  * Localized deletions are not cached in-memory.
179  */
180 static
181 int
182 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
183 {
184         hammer_cursor_t cursor = data;
185         int r;
186
187         r = hammer_rec_cmp(&cursor->key_beg, rec);
188         if (r > 1)
189                 return(-1);
190         r = hammer_rec_cmp(&cursor->key_end, rec);
191         if (r < -1)
192                 return(1);
193         return(0);
194 }
195
196 /*
197  * This compare function is used when simply looking up key_beg.
198  */
199 static
200 int
201 hammer_rec_find_cmp(hammer_record_t rec, void *data)
202 {
203         hammer_cursor_t cursor = data;
204         int r;
205
206         r = hammer_rec_cmp(&cursor->key_beg, rec);
207         if (r > 1)
208                 return(-1);
209         if (r < -1)
210                 return(1);
211         return(0);
212 }
213
214 /*
215  * Locate blocks within the truncation range.  Partial blocks do not count.
216  */
217 static
218 int
219 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
220 {
221         struct rec_trunc_info *info = data;
222
223         if (rec->leaf.base.rec_type < info->rec_type)
224                 return(-1);
225         if (rec->leaf.base.rec_type > info->rec_type)
226                 return(1);
227
228         switch(rec->leaf.base.rec_type) {
229         case HAMMER_RECTYPE_DB:
230                 /*
231                  * DB record key is not beyond the truncation point, retain.
232                  */
233                 if (rec->leaf.base.key < info->trunc_off)
234                         return(-1);
235                 break;
236         case HAMMER_RECTYPE_DATA:
237                 /*
238                  * DATA record offset start is not beyond the truncation point,
239                  * retain.
240                  */
241                 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
242                         return(-1);
243                 break;
244         default:
245                 panic("hammer_rec_trunc_cmp: unexpected record type");
246         }
247
248         /*
249          * The record start is >= the truncation point, return match,
250          * the record should be destroyed.
251          */
252         return(0);
253 }
254
255 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
256
257 /*
258  * Allocate a record for the caller to finish filling in.  The record is
259  * returned referenced.  In order to manually set data call this function
260  * with data_len=0 and then manually set record->leaf.data_len and
261  * record->data later.
262  */
263 hammer_record_t
264 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
265 {
266         hammer_record_t record;
267         hammer_mount_t hmp;
268
269         hmp = ip->hmp;
270         ++hammer_count_records;
271         record = kmalloc(sizeof(*record), hmp->m_misc,
272                          M_WAITOK | M_ZERO | M_USE_RESERVE);
273         record->flush_state = HAMMER_FST_IDLE;
274         record->ip = ip;
275         record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
276         record->leaf.data_len = data_len;
277         hammer_ref(&record->lock);
278
279         if (data_len) {
280                 record->data = kmalloc(data_len, hmp->m_misc, M_WAITOK | M_ZERO);
281                 record->flags |= HAMMER_RECF_ALLOCDATA;
282                 ++hammer_count_record_datas;
283         }
284
285         return (record);
286 }
287
288 void
289 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
290 {
291         while (record->flush_state == HAMMER_FST_FLUSH) {
292                 record->flags |= HAMMER_RECF_WANTED;
293                 tsleep(record, 0, ident, 0);
294         }
295 }
296
297 /*
298  * Called from the backend, hammer_inode.c, after a record has been
299  * flushed to disk.  The record has been exclusively locked by the
300  * caller and interlocked with BE.
301  *
302  * We clean up the state, unlock, and release the record (the record
303  * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
304  */
305 void
306 hammer_flush_record_done(hammer_record_t record, int error)
307 {
308         hammer_inode_t target_ip;
309
310         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
311         KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
312
313         /*
314          * If an error occured, the backend was unable to sync the
315          * record to its media.  Leave the record intact.
316          */
317         if (error) {
318                 hammer_critical_error(record->ip->hmp, record->ip, error,
319                                       "while flushing record");
320         }
321
322         --record->flush_group->refs;
323         record->flush_group = NULL;
324
325         /*
326          * Adjust the flush state and dependancy based on success or
327          * failure.
328          */
329         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
330                 if ((target_ip = record->target_ip) != NULL) {
331                         TAILQ_REMOVE(&target_ip->target_list, record,
332                                      target_entry);
333                         record->target_ip = NULL;
334                         hammer_test_inode(target_ip);
335                 }
336                 record->flush_state = HAMMER_FST_IDLE;
337         } else {
338                 if (record->target_ip) {
339                         record->flush_state = HAMMER_FST_SETUP;
340                         hammer_test_inode(record->ip);
341                         hammer_test_inode(record->target_ip);
342                 } else {
343                         record->flush_state = HAMMER_FST_IDLE;
344                 }
345         }
346         record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
347
348         /*
349          * Cleanup
350          */
351         if (record->flags & HAMMER_RECF_WANTED) {
352                 record->flags &= ~HAMMER_RECF_WANTED;
353                 wakeup(record);
354         }
355         hammer_rel_mem_record(record);
356 }
357
358 /*
359  * Release a memory record.  Records marked for deletion are immediately
360  * removed from the RB-Tree but otherwise left intact until the last ref
361  * goes away.
362  */
363 void
364 hammer_rel_mem_record(struct hammer_record *record)
365 {
366         hammer_mount_t hmp;
367         hammer_reserve_t resv;
368         hammer_inode_t ip;
369         hammer_inode_t target_ip;
370         int diddrop;
371
372         hammer_rel(&record->lock);
373
374         if (hammer_norefs(&record->lock)) {
375                 /*
376                  * Upon release of the last reference wakeup any waiters.
377                  * The record structure may get destroyed so callers will
378                  * loop up and do a relookup.
379                  *
380                  * WARNING!  Record must be removed from RB-TREE before we
381                  * might possibly block.  hammer_test_inode() can block!
382                  */
383                 ip = record->ip;
384                 hmp = ip->hmp;
385
386                 /*
387                  * Upon release of the last reference a record marked deleted
388                  * by the front or backend, or committed by the backend,
389                  * is destroyed.
390                  */
391                 if (record->flags & (HAMMER_RECF_DELETED_FE |
392                                      HAMMER_RECF_DELETED_BE |
393                                      HAMMER_RECF_COMMITTED)) {
394                         KKASSERT(hammer_isactive(&ip->lock) > 0);
395                         KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
396
397                         /*
398                          * target_ip may have zero refs, we have to ref it
399                          * to prevent it from being ripped out from under
400                          * us.
401                          */
402                         if ((target_ip = record->target_ip) != NULL) {
403                                 TAILQ_REMOVE(&target_ip->target_list,
404                                              record, target_entry);
405                                 record->target_ip = NULL;
406                                 hammer_ref(&target_ip->lock);
407                         }
408
409                         /*
410                          * Remove the record from the RB-Tree
411                          */
412                         if (record->flags & HAMMER_RECF_ONRBTREE) {
413                                 RB_REMOVE(hammer_rec_rb_tree,
414                                           &record->ip->rec_tree,
415                                           record);
416                                 record->flags &= ~HAMMER_RECF_ONRBTREE;
417                                 KKASSERT(ip->rsv_recs > 0);
418                                 if (RB_EMPTY(&record->ip->rec_tree)) {
419                                         record->ip->flags &=
420                                                         ~HAMMER_INODE_XDIRTY;
421                                         record->ip->sync_flags &=
422                                                         ~HAMMER_INODE_XDIRTY;
423                                 }
424                                 diddrop = 1;
425                         } else {
426                                 diddrop = 0;
427                         }
428
429                         /*
430                          * We must wait for any direct-IO to complete before
431                          * we can destroy the record because the bio may
432                          * have a reference to it.
433                          */
434                         if (record->gflags &
435                            (HAMMER_RECG_DIRECT_IO | HAMMER_RECG_DIRECT_INVAL)) {
436                                 hammer_io_direct_wait(record);
437                         }
438
439                         /*
440                          * Account for the completion after the direct IO
441                          * has completed.
442                          */
443                         if (diddrop) {
444                                 --hmp->rsv_recs;
445                                 --ip->rsv_recs;
446                                 hmp->rsv_databytes -= record->leaf.data_len;
447
448                                 if (RB_EMPTY(&record->ip->rec_tree))
449                                         hammer_test_inode(record->ip);
450                                 if ((ip->flags & HAMMER_INODE_RECSW) &&
451                                     ip->rsv_recs <= hammer_limit_inode_recs/2) {
452                                         ip->flags &= ~HAMMER_INODE_RECSW;
453                                         wakeup(&ip->rsv_recs);
454                                 }
455                         }
456
457                         /*
458                          * Do this test after removing record from the RB-Tree.
459                          */
460                         if (target_ip) {
461                                 hammer_test_inode(target_ip);
462                                 hammer_rel_inode(target_ip, 0);
463                         }
464
465                         if (record->flags & HAMMER_RECF_ALLOCDATA) {
466                                 --hammer_count_record_datas;
467                                 kfree(record->data, hmp->m_misc);
468                                 record->flags &= ~HAMMER_RECF_ALLOCDATA;
469                         }
470
471                         /*
472                          * Release the reservation.
473                          *
474                          * If the record was not committed we can theoretically
475                          * undo the reservation.  However, doing so might
476                          * create weird edge cases with the ordering of
477                          * direct writes because the related buffer cache
478                          * elements are per-vnode.  So we don't try.
479                          */
480                         if ((resv = record->resv) != NULL) {
481                                 /* XXX undo leaf.data_offset,leaf.data_len */
482                                 hammer_blockmap_reserve_complete(hmp, resv);
483                                 record->resv = NULL;
484                         }
485                         record->data = NULL;
486                         --hammer_count_records;
487                         kfree(record, hmp->m_misc);
488                 }
489         }
490 }
491
492 /*
493  * Record visibility depends on whether the record is being accessed by
494  * the backend or the frontend.  Backend tests ignore the frontend delete
495  * flag.  Frontend tests do NOT ignore the backend delete/commit flags and
496  * must also check for commit races.
497  *
498  * Return non-zero if the record is visible, zero if it isn't or if it is
499  * deleted.  Returns 0 if the record has been comitted (unless the special
500  * delete-visibility flag is set).  A committed record must be located
501  * via the media B-Tree.  Returns non-zero if the record is good.
502  *
503  * If HAMMER_CURSOR_DELETE_VISIBILITY is set we allow deleted memory
504  * records to be returned.  This is so pending deletions are detected
505  * when using an iterator to locate an unused hash key, or when we need
506  * to locate historical records on-disk to destroy.
507  */
508 static __inline
509 int
510 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
511 {
512         if (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY)
513                 return(1);
514         if (cursor->flags & HAMMER_CURSOR_BACKEND) {
515                 if (record->flags & (HAMMER_RECF_DELETED_BE |
516                                      HAMMER_RECF_COMMITTED)) {
517                         return(0);
518                 }
519         } else {
520                 if (record->flags & (HAMMER_RECF_DELETED_FE |
521                                      HAMMER_RECF_DELETED_BE |
522                                      HAMMER_RECF_COMMITTED)) {
523                         return(0);
524                 }
525         }
526         return(1);
527 }
528
529 /*
530  * This callback is used as part of the RB_SCAN function for in-memory
531  * records.  We terminate it (return -1) as soon as we get a match.
532  *
533  * This routine is used by frontend code.
534  *
535  * The primary compare code does not account for ASOF lookups.  This
536  * code handles that case as well as a few others.
537  */
538 static
539 int
540 hammer_rec_scan_callback(hammer_record_t rec, void *data)
541 {
542         hammer_cursor_t cursor = data;
543
544         /*
545          * We terminate on success, so this should be NULL on entry.
546          */
547         KKASSERT(cursor->iprec == NULL);
548
549         /*
550          * Skip if the record was marked deleted or committed.
551          */
552         if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
553                 return(0);
554
555         /*
556          * Skip if not visible due to our as-of TID
557          */
558         if (cursor->flags & HAMMER_CURSOR_ASOF) {
559                 if (cursor->asof < rec->leaf.base.create_tid)
560                         return(0);
561                 if (rec->leaf.base.delete_tid &&
562                     cursor->asof >= rec->leaf.base.delete_tid) {
563                         return(0);
564                 }
565         }
566
567         /*
568          * ref the record.  The record is protected from backend B-Tree
569          * interactions by virtue of the cursor's IP lock.
570          */
571         hammer_ref(&rec->lock);
572
573         /*
574          * The record may have been deleted or committed while we
575          * were blocked.  XXX remove?
576          */
577         if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
578                 hammer_rel_mem_record(rec);
579                 return(0);
580         }
581
582         /*
583          * Set the matching record and stop the scan.
584          */
585         cursor->iprec = rec;
586         return(-1);
587 }
588
589
590 /*
591  * Lookup an in-memory record given the key specified in the cursor.  Works
592  * just like hammer_btree_lookup() but operates on an inode's in-memory
593  * record list.
594  *
595  * The lookup must fail if the record is marked for deferred deletion.
596  *
597  * The API for mem/btree_lookup() does not mess with the ATE/EOF bits.
598  */
599 static
600 int
601 hammer_mem_lookup(hammer_cursor_t cursor)
602 {
603         KKASSERT(cursor->ip);
604         if (cursor->iprec) {
605                 hammer_rel_mem_record(cursor->iprec);
606                 cursor->iprec = NULL;
607         }
608         hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
609                                    hammer_rec_scan_callback, cursor);
610
611         return (cursor->iprec ? 0 : ENOENT);
612 }
613
614 /*
615  * hammer_mem_first() - locate the first in-memory record matching the
616  * cursor within the bounds of the key range.
617  *
618  * WARNING!  API is slightly different from btree_first().  hammer_mem_first()
619  * will set ATEMEM the same as MEMEOF, and does not return any error.
620  */
621 static
622 void
623 hammer_mem_first(hammer_cursor_t cursor)
624 {
625         hammer_inode_t ip;
626
627         ip = cursor->ip;
628         KKASSERT(ip != NULL);
629
630         if (cursor->iprec) {
631                 hammer_rel_mem_record(cursor->iprec);
632                 cursor->iprec = NULL;
633         }
634         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
635                                    hammer_rec_scan_callback, cursor);
636
637         if (cursor->iprec)
638                 cursor->flags &= ~(HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM);
639         else
640                 cursor->flags |= HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM;
641 }
642
643 /************************************************************************
644  *                   HAMMER IN-MEMORY RECORD FUNCTIONS                  *
645  ************************************************************************
646  *
647  * These functions manipulate in-memory records.  Such records typically
648  * exist prior to being committed to disk or indexed via the on-disk B-Tree.
649  */
650
651 /*
652  * Add a directory entry (dip,ncp) which references inode (ip).
653  *
654  * Note that the low 32 bits of the namekey are set temporarily to create
655  * a unique in-memory record, and may be modified a second time when the
656  * record is synchronized to disk.  In particular, the low 32 bits cannot be
657  * all 0's when synching to disk, which is not handled here.
658  *
659  * NOTE: bytes does not include any terminating \0 on name, and name might
660  * not be terminated.
661  */
662 int
663 hammer_ip_add_directory(struct hammer_transaction *trans,
664                      struct hammer_inode *dip, const char *name, int bytes,
665                      struct hammer_inode *ip)
666 {
667         struct hammer_cursor cursor;
668         hammer_record_t record;
669         int error;
670         u_int32_t max_iterations;
671
672         KKASSERT(dip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY);
673
674         record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
675
676         record->type = HAMMER_MEM_RECORD_ADD;
677         record->leaf.base.localization = dip->obj_localization +
678                                          hammer_dir_localization(dip);
679         record->leaf.base.obj_id = dip->obj_id;
680         record->leaf.base.key = hammer_directory_namekey(dip, name, bytes,
681                                                          &max_iterations);
682         record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
683         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
684         record->data->entry.obj_id = ip->obj_id;
685         record->data->entry.localization = ip->obj_localization;
686         bcopy(name, record->data->entry.name, bytes);
687
688         ++ip->ino_data.nlinks;
689         ip->ino_data.ctime = trans->time;
690         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
691
692         /*
693          * Find an unused namekey.  Both the in-memory record tree and
694          * the B-Tree are checked.  We do not want historically deleted
695          * names to create a collision as our iteration space may be limited,
696          * and since create_tid wouldn't match anyway an ASOF search
697          * must be used to locate collisions.
698          *
699          * delete-visibility is set so pending deletions do not give us
700          * a false-negative on our ability to use an iterator.
701          *
702          * The iterator must not rollover the key.  Directory keys only
703          * use the positive key space.
704          */
705         hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
706         cursor.key_beg = record->leaf.base;
707         cursor.flags |= HAMMER_CURSOR_ASOF;
708         cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
709         cursor.asof = ip->obj_asof;
710
711         while (hammer_ip_lookup(&cursor) == 0) {
712                 ++record->leaf.base.key;
713                 KKASSERT(record->leaf.base.key > 0);
714                 cursor.key_beg.key = record->leaf.base.key;
715                 if (--max_iterations == 0) {
716                         hammer_rel_mem_record(record);
717                         error = ENOSPC;
718                         goto failed;
719                 }
720         }
721
722         /*
723          * The target inode and the directory entry are bound together.
724          */
725         record->target_ip = ip;
726         record->flush_state = HAMMER_FST_SETUP;
727         TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
728
729         /*
730          * The inode now has a dependancy and must be taken out of the idle
731          * state.  An inode not in an idle state is given an extra reference.
732          *
733          * When transitioning to a SETUP state flag for an automatic reflush
734          * when the dependancies are disposed of if someone is waiting on
735          * the inode.
736          */
737         if (ip->flush_state == HAMMER_FST_IDLE) {
738                 hammer_ref(&ip->lock);
739                 ip->flush_state = HAMMER_FST_SETUP;
740                 if (ip->flags & HAMMER_INODE_FLUSHW)
741                         ip->flags |= HAMMER_INODE_REFLUSH;
742         }
743         error = hammer_mem_add(record);
744         if (error == 0) {
745                 dip->ino_data.mtime = trans->time;
746                 hammer_modify_inode(trans, dip, HAMMER_INODE_MTIME);
747         }
748 failed:
749         hammer_done_cursor(&cursor);
750         return(error);
751 }
752
753 /*
754  * Delete the directory entry and update the inode link count.  The
755  * cursor must be seeked to the directory entry record being deleted.
756  *
757  * The related inode should be share-locked by the caller.  The caller is
758  * on the frontend.  It could also be NULL indicating that the directory
759  * entry being removed has no related inode.
760  *
761  * This function can return EDEADLK requiring the caller to terminate
762  * the cursor, any locks, wait on the returned record, and retry.
763  */
764 int
765 hammer_ip_del_directory(struct hammer_transaction *trans,
766                      hammer_cursor_t cursor, struct hammer_inode *dip,
767                      struct hammer_inode *ip)
768 {
769         hammer_record_t record;
770         int error;
771
772         if (hammer_cursor_inmem(cursor)) {
773                 /*
774                  * In-memory (unsynchronized) records can simply be freed.
775                  *
776                  * Even though the HAMMER_RECF_DELETED_FE flag is ignored
777                  * by the backend, we must still avoid races against the
778                  * backend potentially syncing the record to the media.
779                  *
780                  * We cannot call hammer_ip_delete_record(), that routine may
781                  * only be called from the backend.
782                  */
783                 record = cursor->iprec;
784                 if (record->flags & (HAMMER_RECF_INTERLOCK_BE |
785                                      HAMMER_RECF_DELETED_BE |
786                                      HAMMER_RECF_COMMITTED)) {
787                         KKASSERT(cursor->deadlk_rec == NULL);
788                         hammer_ref(&record->lock);
789                         cursor->deadlk_rec = record;
790                         error = EDEADLK;
791                 } else {
792                         KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
793                         record->flags |= HAMMER_RECF_DELETED_FE;
794                         error = 0;
795                 }
796         } else {
797                 /*
798                  * If the record is on-disk we have to queue the deletion by
799                  * the record's key.  This also causes lookups to skip the
800                  * record (lookups for the purposes of finding an unused
801                  * directory key do not skip the record).
802                  */
803                 KKASSERT(dip->flags &
804                          (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
805                 record = hammer_alloc_mem_record(dip, 0);
806                 record->type = HAMMER_MEM_RECORD_DEL;
807                 record->leaf.base = cursor->leaf->base;
808                 KKASSERT(dip->obj_id == record->leaf.base.obj_id);
809
810                 /*
811                  * ip may be NULL, indicating the deletion of a directory
812                  * entry which has no related inode.
813                  */
814                 record->target_ip = ip;
815                 if (ip) {
816                         record->flush_state = HAMMER_FST_SETUP;
817                         TAILQ_INSERT_TAIL(&ip->target_list, record,
818                                           target_entry);
819                 } else {
820                         record->flush_state = HAMMER_FST_IDLE;
821                 }
822
823                 /*
824                  * The inode now has a dependancy and must be taken out of
825                  * the idle state.  An inode not in an idle state is given
826                  * an extra reference.
827                  *
828                  * When transitioning to a SETUP state flag for an automatic
829                  * reflush when the dependancies are disposed of if someone
830                  * is waiting on the inode.
831                  */
832                 if (ip && ip->flush_state == HAMMER_FST_IDLE) {
833                         hammer_ref(&ip->lock);
834                         ip->flush_state = HAMMER_FST_SETUP;
835                         if (ip->flags & HAMMER_INODE_FLUSHW)
836                                 ip->flags |= HAMMER_INODE_REFLUSH;
837                 }
838
839                 error = hammer_mem_add(record);
840         }
841
842         /*
843          * One less link.  The file may still be open in the OS even after
844          * all links have gone away.
845          *
846          * We have to terminate the cursor before syncing the inode to
847          * avoid deadlocking against ourselves.  XXX this may no longer
848          * be true.
849          *
850          * If nlinks drops to zero and the vnode is inactive (or there is
851          * no vnode), call hammer_inode_unloadable_check() to zonk the
852          * inode.  If we don't do this here the inode will not be destroyed
853          * on-media until we unmount.
854          */
855         if (error == 0) {
856                 if (ip) {
857                         --ip->ino_data.nlinks;  /* do before we might block */
858                         ip->ino_data.ctime = trans->time;
859                 }
860                 dip->ino_data.mtime = trans->time;
861                 hammer_modify_inode(trans, dip, HAMMER_INODE_MTIME);
862                 if (ip) {
863                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
864                         if (ip->ino_data.nlinks == 0 &&
865                             (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
866                                 hammer_done_cursor(cursor);
867                                 hammer_inode_unloadable_check(ip, 1);
868                                 hammer_flush_inode(ip, 0);
869                         }
870                 }
871
872         }
873         return(error);
874 }
875
876 /*
877  * Add a record to an inode.
878  *
879  * The caller must allocate the record with hammer_alloc_mem_record(ip,len) and
880  * initialize the following additional fields that are not initialized by these
881  * functions.
882  *
883  * The related inode should be share-locked by the caller.  The caller is
884  * on the frontend.
885  *
886  * record->leaf.base.key
887  * record->leaf.base.rec_type
888  * record->leaf.base.localization
889  */
890 int
891 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
892 {
893         hammer_inode_t ip = record->ip;
894         int error;
895
896         KKASSERT(record->leaf.base.localization != 0);
897         record->leaf.base.obj_id = ip->obj_id;
898         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
899         error = hammer_mem_add(record);
900         return(error);
901 }
902
903 /*
904  * Locate a pre-existing bulk record in memory.  The caller wishes to
905  * replace the record with a new one.  The existing record may have a
906  * different length (and thus a different key) so we have to use an
907  * overlap check function.
908  */
909 static hammer_record_t
910 hammer_ip_get_bulk(hammer_record_t record)
911 {
912         struct hammer_bulk_info info;
913         hammer_inode_t ip = record->ip;
914
915         info.record = record;
916         info.conflict = NULL;
917         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_overlap_cmp,
918                                    hammer_bulk_scan_callback, &info);
919
920         return(info.conflict);  /* may be NULL */
921 }
922
923 /*
924  * Take records vetted by overlap_cmp.  The first non-deleted record
925  * (if any) stops the scan.
926  */
927 static int
928 hammer_bulk_scan_callback(hammer_record_t record, void *data)
929 {
930         struct hammer_bulk_info *info = data;
931
932         if (record->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
933                              HAMMER_RECF_COMMITTED)) {
934                 return(0);
935         }
936         hammer_ref(&record->lock);
937         info->conflict = record;
938         return(-1);                     /* stop scan */
939 }
940
941 /*
942  * Reserve blockmap space placemarked with an in-memory record.
943  *
944  * This routine is called by the frontend in order to be able to directly
945  * flush a buffer cache buffer.  The frontend has locked the related buffer
946  * cache buffers and we should be able to manipulate any overlapping
947  * in-memory records.
948  *
949  * The caller is responsible for adding the returned record and deleting
950  * the returned conflicting record (if any), typically by calling
951  * hammer_ip_replace_bulk() (via hammer_io_direct_write()).
952  */
953 hammer_record_t
954 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
955                    int *errorp)
956 {
957         hammer_record_t record;
958         hammer_dedup_cache_t dcp;
959         hammer_crc_t crc;
960         int zone;
961
962         /*
963          * Create a record to cover the direct write.  The record cannot
964          * be added to the in-memory RB tree here as it might conflict
965          * with an existing memory record.  See hammer_io_direct_write().
966          *
967          * The backend is responsible for finalizing the space reserved in
968          * this record.
969          *
970          * XXX bytes not aligned, depend on the reservation code to
971          * align the reservation.
972          */
973         record = hammer_alloc_mem_record(ip, 0);
974         zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
975                                            HAMMER_ZONE_SMALL_DATA_INDEX;
976         if (bytes == 0)
977                 crc = 0;
978         else
979                 crc = crc32(data, bytes);
980
981         if (hammer_live_dedup == 0)
982                 goto nodedup;
983         if ((dcp = hammer_dedup_cache_lookup(ip->hmp, crc)) != NULL) {
984                 struct hammer_dedup_cache tmp = *dcp;
985
986                 record->resv = hammer_blockmap_reserve_dedup(ip->hmp, zone,
987                         bytes, tmp.data_offset, errorp);
988                 if (record->resv == NULL)
989                         goto nodedup;
990
991                 if (!hammer_dedup_validate(&tmp, zone, bytes, data)) {
992                         hammer_blockmap_reserve_complete(ip->hmp, record->resv);
993                         goto nodedup;
994                 }
995
996                 record->leaf.data_offset = tmp.data_offset;
997                 record->flags |= HAMMER_RECF_DEDUPED;
998         } else {
999 nodedup:
1000                 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
1001                        &record->leaf.data_offset, errorp);
1002                 if (record->resv == NULL) {
1003                         kprintf("hammer_ip_add_bulk: reservation failed\n");
1004                         hammer_rel_mem_record(record);
1005                         return(NULL);
1006                 }
1007         }
1008
1009         record->type = HAMMER_MEM_RECORD_DATA;
1010         record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
1011         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
1012         record->leaf.base.obj_id = ip->obj_id;
1013         record->leaf.base.key = file_offset + bytes;
1014         record->leaf.base.localization = ip->obj_localization +
1015                                          HAMMER_LOCALIZE_MISC;
1016         record->leaf.data_len = bytes;
1017         record->leaf.data_crc = crc;
1018         KKASSERT(*errorp == 0);
1019
1020         return(record);
1021 }
1022
1023 /*
1024  * Called by hammer_io_direct_write() prior to any possible completion
1025  * of the BIO to emplace the memory record associated with the I/O and
1026  * to replace any prior memory record which might still be active.
1027  *
1028  * Setting the FE deleted flag on the old record (if any) avoids any RB
1029  * tree insertion conflict, amoung other things.
1030  *
1031  * This has to be done prior to the caller completing any related buffer
1032  * cache I/O or a reinstantiation of the buffer may load data from the
1033  * old media location instead of the new media location.  The holding
1034  * of the locked buffer cache buffer serves to interlock the record
1035  * replacement operation.
1036  */
1037 void
1038 hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record)
1039 {
1040         hammer_record_t conflict;
1041         int error __debugvar;
1042
1043         while ((conflict = hammer_ip_get_bulk(record)) != NULL) {
1044                 if ((conflict->flags & HAMMER_RECF_INTERLOCK_BE) == 0) {
1045                         conflict->flags |= HAMMER_RECF_DELETED_FE;
1046                         break;
1047                 }
1048                 conflict->flags |= HAMMER_RECF_WANTED;
1049                 tsleep(conflict, 0, "hmrrc3", 0);
1050                 hammer_rel_mem_record(conflict);
1051         }
1052         error = hammer_mem_add(record);
1053         if (conflict)
1054                 hammer_rel_mem_record(conflict);
1055         KKASSERT(error == 0);
1056 }
1057
1058 /*
1059  * Frontend truncation code.  Scan in-memory records only.  On-disk records
1060  * and records in a flushing state are handled by the backend.  The vnops
1061  * setattr code will handle the block containing the truncation point.
1062  *
1063  * Partial blocks are not deleted.
1064  *
1065  * This code is only called on regular files.
1066  */
1067 int
1068 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
1069 {
1070         struct rec_trunc_info info;
1071
1072         switch(ip->ino_data.obj_type) {
1073         case HAMMER_OBJTYPE_REGFILE:
1074                 info.rec_type = HAMMER_RECTYPE_DATA;
1075                 break;
1076         case HAMMER_OBJTYPE_DBFILE:
1077                 info.rec_type = HAMMER_RECTYPE_DB;
1078                 break;
1079         default:
1080                 return(EINVAL);
1081         }
1082         info.trunc_off = file_size;
1083         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
1084                                    hammer_frontend_trunc_callback, &info);
1085         return(0);
1086 }
1087
1088 /*
1089  * Scan callback for frontend records to destroy during a truncation.
1090  * We must ensure that DELETED_FE is set on the record or the frontend
1091  * will get confused in future read() calls.
1092  *
1093  * NOTE: DELETED_FE cannot be set while the record interlock (BE) is held.
1094  *       In this rare case we must wait for the interlock to be cleared.
1095  *
1096  * NOTE: This function is only called on regular files.  There are further
1097  *       restrictions to the setting of DELETED_FE on directory records
1098  *       undergoing a flush due to sensitive inode link count calculations.
1099  */
1100 static int
1101 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
1102 {
1103         if (record->flags & HAMMER_RECF_DELETED_FE)
1104                 return(0);
1105 #if 0
1106         if (record->flush_state == HAMMER_FST_FLUSH)
1107                 return(0);
1108 #endif
1109         hammer_ref(&record->lock);
1110         while (record->flags & HAMMER_RECF_INTERLOCK_BE)
1111                 hammer_wait_mem_record_ident(record, "hmmtrr");
1112         record->flags |= HAMMER_RECF_DELETED_FE;
1113         hammer_rel_mem_record(record);
1114         return(0);
1115 }
1116
1117 /*
1118  * Return 1 if the caller must check for and delete existing records
1119  * before writing out a new data record.
1120  *
1121  * Return 0 if the caller can just insert the record into the B-Tree without
1122  * checking.
1123  */
1124 static int
1125 hammer_record_needs_overwrite_delete(hammer_record_t record)
1126 {
1127         hammer_inode_t ip = record->ip;
1128         int64_t file_offset;
1129         int r;
1130
1131         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
1132                 file_offset = record->leaf.base.key;
1133         else
1134                 file_offset = record->leaf.base.key - record->leaf.data_len;
1135         r = (file_offset < ip->save_trunc_off);
1136         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1137                 if (ip->save_trunc_off <= record->leaf.base.key)
1138                         ip->save_trunc_off = record->leaf.base.key + 1;
1139         } else {
1140                 if (ip->save_trunc_off < record->leaf.base.key)
1141                         ip->save_trunc_off = record->leaf.base.key;
1142         }
1143         return(r);
1144 }
1145
1146 /*
1147  * Backend code.  Sync a record to the media.
1148  */
1149 int
1150 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1151 {
1152         hammer_transaction_t trans = cursor->trans;
1153         int64_t file_offset;
1154         int bytes;
1155         void *bdata;
1156         int error;
1157         int doprop;
1158
1159         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1160         KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1161         KKASSERT(record->leaf.base.localization != 0);
1162
1163         /*
1164          * Any direct-write related to the record must complete before we
1165          * can sync the record to the on-disk media.
1166          */
1167         if (record->gflags & (HAMMER_RECG_DIRECT_IO | HAMMER_RECG_DIRECT_INVAL))
1168                 hammer_io_direct_wait(record);
1169
1170         /*
1171          * If this is a bulk-data record placemarker there may be an existing
1172          * record on-disk, indicating a data overwrite.  If there is the
1173          * on-disk record must be deleted before we can insert our new record.
1174          *
1175          * We've synthesized this record and do not know what the create_tid
1176          * on-disk is, nor how much data it represents.
1177          *
1178          * Keep in mind that (key) for data records is (base_offset + len),
1179          * not (base_offset).  Also, we only want to get rid of on-disk
1180          * records since we are trying to sync our in-memory record, call
1181          * hammer_ip_delete_range() with truncating set to 1 to make sure
1182          * it skips in-memory records.
1183          *
1184          * It is ok for the lookup to return ENOENT.
1185          *
1186          * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1187          * to call hammer_ip_delete_range() or not.  This also means we must
1188          * update sync_trunc_off() as we write.
1189          */
1190         if (record->type == HAMMER_MEM_RECORD_DATA &&
1191             hammer_record_needs_overwrite_delete(record)) {
1192                 file_offset = record->leaf.base.key - record->leaf.data_len;
1193                 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1194                         ~HAMMER_BUFMASK;
1195                 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1196                 error = hammer_ip_delete_range(
1197                                 cursor, record->ip,
1198                                 file_offset, file_offset + bytes - 1,
1199                                 1);
1200                 if (error && error != ENOENT)
1201                         goto done;
1202         }
1203
1204         /*
1205          * If this is a general record there may be an on-disk version
1206          * that must be deleted before we can insert the new record.
1207          */
1208         if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1209                 error = hammer_delete_general(cursor, record->ip, &record->leaf);
1210                 if (error && error != ENOENT)
1211                         goto done;
1212         }
1213
1214         /*
1215          * Setup the cursor.
1216          */
1217         hammer_normalize_cursor(cursor);
1218         cursor->key_beg = record->leaf.base;
1219         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1220         cursor->flags |= HAMMER_CURSOR_BACKEND;
1221         cursor->flags &= ~HAMMER_CURSOR_INSERT;
1222
1223         /*
1224          * Records can wind up on-media before the inode itself is on-media.
1225          * Flag the case.
1226          */
1227         record->ip->flags |= HAMMER_INODE_DONDISK;
1228
1229         /*
1230          * If we are deleting a directory entry an exact match must be
1231          * found on-disk.
1232          */
1233         if (record->type == HAMMER_MEM_RECORD_DEL) {
1234                 error = hammer_btree_lookup(cursor);
1235                 if (error == 0) {
1236                         KKASSERT(cursor->iprec == NULL);
1237                         error = hammer_ip_delete_record(cursor, record->ip,
1238                                                         trans->tid);
1239                         if (error == 0) {
1240                                 record->flags |= HAMMER_RECF_DELETED_BE |
1241                                                  HAMMER_RECF_COMMITTED;
1242                                 ++record->ip->rec_generation;
1243                         }
1244                 }
1245                 goto done;
1246         }
1247
1248         /*
1249          * We are inserting.
1250          *
1251          * Issue a lookup to position the cursor and locate the insertion
1252          * point.  The target key should not exist.  If we are creating a
1253          * directory entry we may have to iterate the low 32 bits of the
1254          * key to find an unused key.
1255          */
1256         hammer_sync_lock_sh(trans);
1257         cursor->flags |= HAMMER_CURSOR_INSERT;
1258         error = hammer_btree_lookup(cursor);
1259         if (hammer_debug_inode)
1260                 kprintf("DOINSERT LOOKUP %d\n", error);
1261         if (error == 0) {
1262                 kprintf("hammer_ip_sync_record_cursor: duplicate rec "
1263                         "at (%016llx)\n", (long long)record->leaf.base.key);
1264                 if (hammer_debug_critical)
1265                         Debugger("duplicate record1");
1266                 error = EIO;
1267         }
1268 #if 0
1269         if (record->type == HAMMER_MEM_RECORD_DATA)
1270                 kprintf("sync_record  %016llx ---------------- %016llx %d\n",
1271                         record->leaf.base.key - record->leaf.data_len,
1272                         record->leaf.data_offset, error);
1273 #endif
1274
1275         if (error != ENOENT)
1276                 goto done_unlock;
1277
1278         /*
1279          * Allocate the record and data.  The result buffers will be
1280          * marked as being modified and further calls to
1281          * hammer_modify_buffer() will result in unneeded UNDO records.
1282          *
1283          * Support zero-fill records (data == NULL and data_len != 0)
1284          */
1285         if (record->type == HAMMER_MEM_RECORD_DATA) {
1286                 /*
1287                  * The data portion of a bulk-data record has already been
1288                  * committed to disk, we need only adjust the layer2
1289                  * statistics in the same transaction as our B-Tree insert.
1290                  */
1291                 KKASSERT(record->leaf.data_offset != 0);
1292                 error = hammer_blockmap_finalize(trans,
1293                                                  record->resv,
1294                                                  record->leaf.data_offset,
1295                                                  record->leaf.data_len);
1296
1297                 if (hammer_live_dedup == 2 &&
1298                     (record->flags & HAMMER_RECF_DEDUPED) == 0) {
1299                         hammer_dedup_cache_add(record->ip, &record->leaf);
1300                 }
1301         } else if (record->data && record->leaf.data_len) {
1302                 /*
1303                  * Wholely cached record, with data.  Allocate the data.
1304                  */
1305                 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1306                                           record->leaf.base.rec_type,
1307                                           &record->leaf.data_offset,
1308                                           &cursor->data_buffer,
1309                                           0, &error);
1310                 if (bdata == NULL)
1311                         goto done_unlock;
1312                 hammer_crc_set_leaf(record->data, &record->leaf);
1313                 hammer_modify_buffer_noundo(trans, cursor->data_buffer);
1314                 bcopy(record->data, bdata, record->leaf.data_len);
1315                 hammer_modify_buffer_done(cursor->data_buffer);
1316         } else {
1317                 /*
1318                  * Wholely cached record, without data.
1319                  */
1320                 record->leaf.data_offset = 0;
1321                 record->leaf.data_crc = 0;
1322         }
1323
1324         error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1325         if (hammer_debug_inode && error) {
1326                 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n",
1327                         error,
1328                         (long long)cursor->node->node_offset,
1329                         cursor->index,
1330                         (long long)record->leaf.base.key);
1331         }
1332
1333         /*
1334          * Our record is on-disk and we normally mark the in-memory version
1335          * as having been committed (and not BE-deleted).
1336          *
1337          * If the record represented a directory deletion but we had to
1338          * sync a valid directory entry to disk due to dependancies,
1339          * we must convert the record to a covering delete so the
1340          * frontend does not have visibility on the synced entry.
1341          *
1342          * WARNING: cursor's leaf pointer may have changed after do_propagation
1343          *          returns!
1344          */
1345         if (error == 0) {
1346                 if (doprop) {
1347                         hammer_btree_do_propagation(cursor,
1348                                                     record->ip->pfsm,
1349                                                     &record->leaf);
1350                 }
1351                 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1352                         /*
1353                          * Must convert deleted directory entry add
1354                          * to a directory entry delete.
1355                          */
1356                         KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1357                         record->flags &= ~HAMMER_RECF_DELETED_FE;
1358                         record->type = HAMMER_MEM_RECORD_DEL;
1359                         KKASSERT(record->ip->obj_id == record->leaf.base.obj_id);
1360                         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1361                         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1362                         KKASSERT((record->flags & (HAMMER_RECF_COMMITTED |
1363                                                  HAMMER_RECF_DELETED_BE)) == 0);
1364                         /* converted record is not yet committed */
1365                         /* hammer_flush_record_done takes care of the rest */
1366                 } else {
1367                         /*
1368                          * Everything went fine and we are now done with
1369                          * this record.
1370                          */
1371                         record->flags |= HAMMER_RECF_COMMITTED;
1372                         ++record->ip->rec_generation;
1373                 }
1374         } else {
1375                 if (record->leaf.data_offset) {
1376                         hammer_blockmap_free(trans, record->leaf.data_offset,
1377                                              record->leaf.data_len);
1378                 }
1379         }
1380 done_unlock:
1381         hammer_sync_unlock(trans);
1382 done:
1383         return(error);
1384 }
1385
1386 /*
1387  * Add the record to the inode's rec_tree.  The low 32 bits of a directory
1388  * entry's key is used to deal with hash collisions in the upper 32 bits.
1389  * A unique 64 bit key is generated in-memory and may be regenerated a
1390  * second time when the directory record is flushed to the on-disk B-Tree.
1391  *
1392  * A referenced record is passed to this function.  This function
1393  * eats the reference.  If an error occurs the record will be deleted.
1394  *
1395  * A copy of the temporary record->data pointer provided by the caller
1396  * will be made.
1397  */
1398 int
1399 hammer_mem_add(hammer_record_t record)
1400 {
1401         hammer_mount_t hmp = record->ip->hmp;
1402
1403         /*
1404          * Make a private copy of record->data
1405          */
1406         if (record->data)
1407                 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1408
1409         /*
1410          * Insert into the RB tree.  A unique key should have already
1411          * been selected if this is a directory entry.
1412          */
1413         if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1414                 record->flags |= HAMMER_RECF_DELETED_FE;
1415                 hammer_rel_mem_record(record);
1416                 return (EEXIST);
1417         }
1418         ++hmp->count_newrecords;
1419         ++hmp->rsv_recs;
1420         ++record->ip->rsv_recs;
1421         record->ip->hmp->rsv_databytes += record->leaf.data_len;
1422         record->flags |= HAMMER_RECF_ONRBTREE;
1423         hammer_modify_inode(NULL, record->ip, HAMMER_INODE_XDIRTY);
1424         hammer_rel_mem_record(record);
1425         return(0);
1426 }
1427
1428 /************************************************************************
1429  *                   HAMMER INODE MERGED-RECORD FUNCTIONS               *
1430  ************************************************************************
1431  *
1432  * These functions augment the B-Tree scanning functions in hammer_btree.c
1433  * by merging in-memory records with on-disk records.
1434  */
1435
1436 /*
1437  * Locate a particular record either in-memory or on-disk.
1438  *
1439  * NOTE: This is basically a standalone routine, hammer_ip_next() may
1440  * NOT be called to iterate results.
1441  */
1442 int
1443 hammer_ip_lookup(hammer_cursor_t cursor)
1444 {
1445         int error;
1446
1447         /*
1448          * If the element is in-memory return it without searching the
1449          * on-disk B-Tree
1450          */
1451         KKASSERT(cursor->ip);
1452         error = hammer_mem_lookup(cursor);
1453         if (error == 0) {
1454                 cursor->leaf = &cursor->iprec->leaf;
1455                 return(error);
1456         }
1457         if (error != ENOENT)
1458                 return(error);
1459
1460         /*
1461          * If the inode has on-disk components search the on-disk B-Tree.
1462          */
1463         if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1464                 return(error);
1465         error = hammer_btree_lookup(cursor);
1466         if (error == 0)
1467                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1468         return(error);
1469 }
1470
1471 /*
1472  * Helper for hammer_ip_first()/hammer_ip_next()
1473  *
1474  * NOTE: Both ATEDISK and DISKEOF will be set the same.  This sets up
1475  * hammer_ip_first() for calling hammer_ip_next(), and sets up the re-seek
1476  * state if hammer_ip_next() needs to re-seek.
1477  */
1478 static __inline
1479 int
1480 _hammer_ip_seek_btree(hammer_cursor_t cursor)
1481 {
1482         hammer_inode_t ip = cursor->ip;
1483         int error;
1484
1485         if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1486                 error = hammer_btree_lookup(cursor);
1487                 if (error == ENOENT || error == EDEADLK) {
1488                         if (hammer_debug_general & 0x2000) {
1489                                 kprintf("error %d node %p %016llx index %d\n",
1490                                         error, cursor->node,
1491                                         (long long)cursor->node->node_offset,
1492                                         cursor->index);
1493                         }
1494                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1495                         error = hammer_btree_iterate(cursor);
1496                 }
1497                 if (error == 0) {
1498                         cursor->flags &= ~(HAMMER_CURSOR_DISKEOF |
1499                                            HAMMER_CURSOR_ATEDISK);
1500                 } else {
1501                         cursor->flags |= HAMMER_CURSOR_DISKEOF |
1502                                          HAMMER_CURSOR_ATEDISK;
1503                         if (error == ENOENT)
1504                                 error = 0;
1505                 }
1506         } else {
1507                 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_ATEDISK;
1508                 error = 0;
1509         }
1510         return(error);
1511 }
1512
1513 /*
1514  * Helper for hammer_ip_next()
1515  *
1516  * The caller has determined that the media cursor is further along than the
1517  * memory cursor and must be reseeked after a generation number change.
1518  */
1519 static
1520 int
1521 _hammer_ip_reseek(hammer_cursor_t cursor)
1522 {
1523         struct hammer_base_elm save;
1524         hammer_btree_elm_t elm;
1525         int error __debugvar;
1526         int r;
1527         int again = 0;
1528
1529         /*
1530          * Do the re-seek.
1531          */
1532         kprintf("HAMMER: Debug: re-seeked during scan @ino=%016llx\n",
1533                 (long long)cursor->ip->obj_id);
1534         save = cursor->key_beg;
1535         cursor->key_beg = cursor->iprec->leaf.base;
1536         error = _hammer_ip_seek_btree(cursor);
1537         KKASSERT(error == 0);
1538         cursor->key_beg = save;
1539
1540         /*
1541          * If the memory record was previous returned to
1542          * the caller and the media record matches
1543          * (-1/+1: only create_tid differs), then iterate
1544          * the media record to avoid a double result.
1545          */
1546         if ((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0 &&
1547             (cursor->flags & HAMMER_CURSOR_LASTWASMEM)) {
1548                 elm = &cursor->node->ondisk->elms[cursor->index];
1549                 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1550                 if (cursor->flags & HAMMER_CURSOR_ASOF) {
1551                         if (r >= -1 && r <= 1) {
1552                                 kprintf("HAMMER: Debug: iterated after "
1553                                         "re-seek (asof r=%d)\n", r);
1554                                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1555                                 again = 1;
1556                         }
1557                 } else {
1558                         if (r == 0) {
1559                                 kprintf("HAMMER: Debug: iterated after "
1560                                         "re-seek\n");
1561                                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1562                                 again = 1;
1563                         }
1564                 }
1565         }
1566         return(again);
1567 }
1568
1569 /*
1570  * Locate the first record within the cursor's key_beg/key_end range,
1571  * restricted to a particular inode.  0 is returned on success, ENOENT
1572  * if no records matched the requested range, or some other error.
1573  *
1574  * When 0 is returned hammer_ip_next() may be used to iterate additional
1575  * records within the requested range.
1576  *
1577  * This function can return EDEADLK, requiring the caller to terminate
1578  * the cursor and try again.
1579  */
1580
1581 int
1582 hammer_ip_first(hammer_cursor_t cursor)
1583 {
1584         hammer_inode_t ip __debugvar = cursor->ip;
1585         int error;
1586
1587         KKASSERT(ip != NULL);
1588
1589         /*
1590          * Clean up fields and setup for merged scan
1591          */
1592         cursor->flags &= ~HAMMER_CURSOR_RETEST;
1593
1594         /*
1595          * Search the in-memory record list (Red-Black tree).  Unlike the
1596          * B-Tree search, mem_first checks for records in the range.
1597          *
1598          * This function will setup both ATEMEM and MEMEOF properly for
1599          * the ip iteration.  ATEMEM will be set if MEMEOF is set.
1600          */
1601         hammer_mem_first(cursor);
1602
1603         /*
1604          * Detect generation changes during blockages, including
1605          * blockages which occur on the initial btree search.
1606          */
1607         cursor->rec_generation = cursor->ip->rec_generation;
1608
1609         /*
1610          * Initial search and result
1611          */
1612         error = _hammer_ip_seek_btree(cursor);
1613         if (error == 0)
1614                 error = hammer_ip_next(cursor);
1615
1616         return (error);
1617 }
1618
1619 /*
1620  * Retrieve the next record in a merged iteration within the bounds of the
1621  * cursor.  This call may be made multiple times after the cursor has been
1622  * initially searched with hammer_ip_first().
1623  *
1624  * There are numerous special cases in this code to deal with races between
1625  * in-memory records and on-media records.
1626  *
1627  * 0 is returned on success, ENOENT if no further records match the
1628  * requested range, or some other error code is returned.
1629  */
1630 int
1631 hammer_ip_next(hammer_cursor_t cursor)
1632 {
1633         hammer_btree_elm_t elm;
1634         hammer_record_t rec;
1635         hammer_record_t tmprec;
1636         int error;
1637         int r;
1638
1639 again:
1640         /*
1641          * Get the next on-disk record
1642          *
1643          * NOTE: If we deleted the last on-disk record we had scanned
1644          *       ATEDISK will be clear and RETEST will be set, forcing
1645          *       a call to iterate.  The fact that ATEDISK is clear causes
1646          *       iterate to re-test the 'current' element.  If ATEDISK is
1647          *       set, iterate will skip the 'current' element.
1648          */
1649         error = 0;
1650         if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1651                 if (cursor->flags & (HAMMER_CURSOR_ATEDISK |
1652                                      HAMMER_CURSOR_RETEST)) {
1653                         error = hammer_btree_iterate(cursor);
1654                         cursor->flags &= ~HAMMER_CURSOR_RETEST;
1655                         if (error == 0) {
1656                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1657                                 hammer_cache_node(&cursor->ip->cache[1],
1658                                                   cursor->node);
1659                         } else if (error == ENOENT) {
1660                                 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1661                                                  HAMMER_CURSOR_ATEDISK;
1662                                 error = 0;
1663                         }
1664                 }
1665         }
1666
1667         /*
1668          * If the generation changed the backend has deleted or committed
1669          * one or more memory records since our last check.
1670          *
1671          * When this case occurs if the disk cursor is > current memory record
1672          * or the disk cursor is at EOF, we must re-seek the disk-cursor.
1673          * Since the cursor is ahead it must have not yet been eaten (if
1674          * not at eof anyway). (XXX data offset case?)
1675          *
1676          * NOTE: we are not doing a full check here.  That will be handled
1677          * later on.
1678          *
1679          * If we have exhausted all memory records we do not have to do any
1680          * further seeks.
1681          */
1682         while (cursor->rec_generation != cursor->ip->rec_generation &&
1683                error == 0) {
1684                 kprintf("HAMMER: Debug: generation changed during scan @ino=%016llx\n", (long long)cursor->ip->obj_id);
1685                 cursor->rec_generation = cursor->ip->rec_generation;
1686                 if (cursor->flags & HAMMER_CURSOR_MEMEOF)
1687                         break;
1688                 if (cursor->flags & HAMMER_CURSOR_DISKEOF) {
1689                         r = 1;
1690                 } else {
1691                         KKASSERT((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0);
1692                         elm = &cursor->node->ondisk->elms[cursor->index];
1693                         r = hammer_btree_cmp(&elm->base,
1694                                              &cursor->iprec->leaf.base);
1695                 }
1696
1697                 /*
1698                  * Do we re-seek the media cursor?
1699                  */
1700                 if (r > 0) {
1701                         if (_hammer_ip_reseek(cursor))
1702                                 goto again;
1703                 }
1704         }
1705
1706         /*
1707          * We can now safely get the next in-memory record.  We cannot
1708          * block here.
1709          *
1710          * hammer_rec_scan_cmp:  Is the record still in our general range,
1711          *                       (non-inclusive of snapshot exclusions)?
1712          * hammer_rec_scan_callback: Is the record in our snapshot?
1713          */
1714         tmprec = NULL;
1715         if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1716                 /*
1717                  * If the current memory record was eaten then get the next
1718                  * one.  Stale records are skipped.
1719                  */
1720                 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1721                         tmprec = cursor->iprec;
1722                         cursor->iprec = NULL;
1723                         rec = hammer_rec_rb_tree_RB_NEXT(tmprec);
1724                         while (rec) {
1725                                 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1726                                         break;
1727                                 if (hammer_rec_scan_callback(rec, cursor) != 0)
1728                                         break;
1729                                 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1730                         }
1731                         if (cursor->iprec) {
1732                                 KKASSERT(cursor->iprec == rec);
1733                                 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1734                         } else {
1735                                 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1736                         }
1737                         cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1738                 }
1739         }
1740
1741         /*
1742          * MEMORY RECORD VALIDITY TEST
1743          *
1744          * (We still can't block, which is why tmprec is being held so
1745          * long).
1746          *
1747          * If the memory record is no longer valid we skip it.  It may
1748          * have been deleted by the frontend.  If it was deleted or
1749          * committed by the backend the generation change re-seeked the
1750          * disk cursor and the record will be present there.
1751          */
1752         if (error == 0 && (cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1753                 KKASSERT(cursor->iprec);
1754                 KKASSERT((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0);
1755                 if (!hammer_ip_iterate_mem_good(cursor, cursor->iprec)) {
1756                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1757                         if (tmprec)
1758                                 hammer_rel_mem_record(tmprec);
1759                         goto again;
1760                 }
1761         }
1762         if (tmprec)
1763                 hammer_rel_mem_record(tmprec);
1764
1765         /*
1766          * Extract either the disk or memory record depending on their
1767          * relative position.
1768          */
1769         error = 0;
1770         switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1771         case 0:
1772                 /*
1773                  * Both entries valid.   Compare the entries and nominally
1774                  * return the first one in the sort order.  Numerous cases
1775                  * require special attention, however.
1776                  */
1777                 elm = &cursor->node->ondisk->elms[cursor->index];
1778                 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1779
1780                 /*
1781                  * If the two entries differ only by their key (-2/2) or
1782                  * create_tid (-1/1), and are DATA records, we may have a
1783                  * nominal match.  We have to calculate the base file
1784                  * offset of the data.
1785                  */
1786                 if (r <= 2 && r >= -2 && r != 0 &&
1787                     cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1788                     cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1789                         int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1790                         int64_t base2 = cursor->iprec->leaf.base.key -
1791                                         cursor->iprec->leaf.data_len;
1792                         if (base1 == base2)
1793                                 r = 0;
1794                 }
1795
1796                 if (r < 0) {
1797                         error = hammer_btree_extract(cursor,
1798                                                      HAMMER_CURSOR_GET_LEAF);
1799                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1800                         cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1801                         break;
1802                 }
1803
1804                 /*
1805                  * If the entries match exactly the memory entry is either
1806                  * an on-disk directory entry deletion or a bulk data
1807                  * overwrite.  If it is a directory entry deletion we eat
1808                  * both entries.
1809                  *
1810                  * For the bulk-data overwrite case it is possible to have
1811                  * visibility into both, which simply means the syncer
1812                  * hasn't gotten around to doing the delete+insert sequence
1813                  * on the B-Tree.  Use the memory entry and throw away the
1814                  * on-disk entry.
1815                  *
1816                  * If the in-memory record is not either of these we
1817                  * probably caught the syncer while it was syncing it to
1818                  * the media.  Since we hold a shared lock on the cursor,
1819                  * the in-memory record had better be marked deleted at
1820                  * this point.
1821                  */
1822                 if (r == 0) {
1823                         if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1824                                 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1825                                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1826                                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1827                                         goto again;
1828                                 }
1829                         } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1830                                 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1831                                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1832                                 }
1833                                 /* fall through to memory entry */
1834                         } else {
1835                                 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
1836                                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1837                                 goto again;
1838                         }
1839                 }
1840                 /* fall through to the memory entry */
1841         case HAMMER_CURSOR_ATEDISK:
1842                 /*
1843                  * Only the memory entry is valid.
1844                  */
1845                 cursor->leaf = &cursor->iprec->leaf;
1846                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1847                 cursor->flags |= HAMMER_CURSOR_LASTWASMEM;
1848
1849                 /*
1850                  * If the memory entry is an on-disk deletion we should have
1851                  * also had found a B-Tree record.  If the backend beat us
1852                  * to it it would have interlocked the cursor and we should
1853                  * have seen the in-memory record marked DELETED_FE.
1854                  */
1855                 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1856                     (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1857                         panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
1858                 }
1859                 break;
1860         case HAMMER_CURSOR_ATEMEM:
1861                 /*
1862                  * Only the disk entry is valid
1863                  */
1864                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1865                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1866                 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1867                 break;
1868         default:
1869                 /*
1870                  * Neither entry is valid
1871                  *
1872                  * XXX error not set properly
1873                  */
1874                 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1875                 cursor->leaf = NULL;
1876                 error = ENOENT;
1877                 break;
1878         }
1879         return(error);
1880 }
1881
1882 /*
1883  * Resolve the cursor->data pointer for the current cursor position in
1884  * a merged iteration.
1885  */
1886 int
1887 hammer_ip_resolve_data(hammer_cursor_t cursor)
1888 {
1889         hammer_record_t record;
1890         int error;
1891
1892         if (hammer_cursor_inmem(cursor)) {
1893                 /*
1894                  * The data associated with an in-memory record is usually
1895                  * kmalloced, but reserve-ahead data records will have an
1896                  * on-disk reference.
1897                  *
1898                  * NOTE: Reserve-ahead data records must be handled in the
1899                  * context of the related high level buffer cache buffer
1900                  * to interlock against async writes.
1901                  */
1902                 record = cursor->iprec;
1903                 cursor->data = record->data;
1904                 error = 0;
1905                 if (cursor->data == NULL) {
1906                         KKASSERT(record->leaf.base.rec_type ==
1907                                  HAMMER_RECTYPE_DATA);
1908                         cursor->data = hammer_bread_ext(cursor->trans->hmp,
1909                                                     record->leaf.data_offset,
1910                                                     record->leaf.data_len,
1911                                                     &error,
1912                                                     &cursor->data_buffer);
1913                 }
1914         } else {
1915                 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1916                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1917         }
1918         return(error);
1919 }
1920
1921 /*
1922  * Backend truncation / record replacement - delete records in range.
1923  *
1924  * Delete all records within the specified range for inode ip.  In-memory
1925  * records still associated with the frontend are ignored.
1926  *
1927  * If truncating is non-zero in-memory records associated with the back-end
1928  * are ignored.  If truncating is > 1 we can return EWOULDBLOCK.
1929  *
1930  * NOTES:
1931  *
1932  *      * An unaligned range will cause new records to be added to cover
1933  *        the edge cases. (XXX not implemented yet).
1934  *
1935  *      * Replacement via reservations (see hammer_ip_sync_record_cursor())
1936  *        also do not deal with unaligned ranges.
1937  *
1938  *      * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1939  *
1940  *      * Record keys for regular file data have to be special-cased since
1941  *        they indicate the end of the range (key = base + bytes).
1942  *
1943  *      * This function may be asked to delete ridiculously huge ranges, for
1944  *        example if someone truncates or removes a 1TB regular file.  We
1945  *        must be very careful on restarts and we may have to stop w/
1946  *        EWOULDBLOCK to avoid blowing out the buffer cache.
1947  */
1948 int
1949 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1950                        int64_t ran_beg, int64_t ran_end, int truncating)
1951 {
1952         hammer_transaction_t trans = cursor->trans;
1953         hammer_btree_leaf_elm_t leaf;
1954         int error;
1955         int64_t off;
1956         int64_t tmp64;
1957
1958 #if 0
1959         kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1960 #endif
1961
1962         KKASSERT(trans->type == HAMMER_TRANS_FLS);
1963 retry:
1964         hammer_normalize_cursor(cursor);
1965         cursor->key_beg.localization = ip->obj_localization +
1966                                        HAMMER_LOCALIZE_MISC;
1967         cursor->key_beg.obj_id = ip->obj_id;
1968         cursor->key_beg.create_tid = 0;
1969         cursor->key_beg.delete_tid = 0;
1970         cursor->key_beg.obj_type = 0;
1971
1972         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1973                 cursor->key_beg.key = ran_beg;
1974                 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1975         } else {
1976                 /*
1977                  * The key in the B-Tree is (base+bytes), so the first possible
1978                  * matching key is ran_beg + 1.
1979                  */
1980                 cursor->key_beg.key = ran_beg + 1;
1981                 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1982         }
1983
1984         cursor->key_end = cursor->key_beg;
1985         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1986                 cursor->key_end.key = ran_end;
1987         } else {
1988                 tmp64 = ran_end + MAXPHYS + 1;  /* work around GCC-4 bug */
1989                 if (tmp64 < ran_end)
1990                         cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1991                 else
1992                         cursor->key_end.key = ran_end + MAXPHYS + 1;
1993         }
1994
1995         cursor->asof = ip->obj_asof;
1996         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1997         cursor->flags |= HAMMER_CURSOR_ASOF;
1998         cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1999         cursor->flags |= HAMMER_CURSOR_BACKEND;
2000         cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
2001
2002         error = hammer_ip_first(cursor);
2003
2004         /*
2005          * Iterate through matching records and mark them as deleted.
2006          */
2007         while (error == 0) {
2008                 leaf = cursor->leaf;
2009
2010                 KKASSERT(leaf->base.delete_tid == 0);
2011                 KKASSERT(leaf->base.obj_id == ip->obj_id);
2012
2013                 /*
2014                  * There may be overlap cases for regular file data.  Also
2015                  * remember the key for a regular file record is (base + len),
2016                  * NOT (base).
2017                  *
2018                  * Note that due to duplicates (mem & media) allowed by
2019                  * DELETE_VISIBILITY, off can wind up less then ran_beg.
2020                  */
2021                 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
2022                         off = leaf->base.key - leaf->data_len;
2023                         /*
2024                          * Check the left edge case.  We currently do not
2025                          * split existing records.
2026                          */
2027                         if (off < ran_beg && leaf->base.key > ran_beg) {
2028                                 panic("hammer left edge case %016llx %d",
2029                                         (long long)leaf->base.key,
2030                                         leaf->data_len);
2031                         }
2032
2033                         /*
2034                          * Check the right edge case.  Note that the
2035                          * record can be completely out of bounds, which
2036                          * terminates the search.
2037                          *
2038                          * base->key is exclusive of the right edge while
2039                          * ran_end is inclusive of the right edge.  The
2040                          * (key - data_len) left boundary is inclusive.
2041                          *
2042                          * XXX theory-check this test at some point, are
2043                          * we missing a + 1 somewhere?  Note that ran_end
2044                          * could overflow.
2045                          */
2046                         if (leaf->base.key - 1 > ran_end) {
2047                                 if (leaf->base.key - leaf->data_len > ran_end)
2048                                         break;
2049                                 panic("hammer right edge case");
2050                         }
2051                 } else {
2052                         off = leaf->base.key;
2053                 }
2054
2055                 /*
2056                  * Delete the record.  When truncating we do not delete
2057                  * in-memory (data) records because they represent data
2058                  * written after the truncation.
2059                  *
2060                  * This will also physically destroy the B-Tree entry and
2061                  * data if the retention policy dictates.  The function
2062                  * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2063                  * to retest the new 'current' element.
2064                  */
2065                 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
2066                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
2067                         /*
2068                          * If we have built up too many meta-buffers we risk
2069                          * deadlocking the kernel and must stop.  This can
2070                          * occur when deleting ridiculously huge files.
2071                          * sync_trunc_off is updated so the next cycle does
2072                          * not re-iterate records we have already deleted.
2073                          *
2074                          * This is only done with formal truncations.
2075                          */
2076                         if (truncating > 1 && error == 0 &&
2077                             hammer_flusher_meta_limit(ip->hmp)) {
2078                                 ip->sync_trunc_off = off;
2079                                 error = EWOULDBLOCK;
2080                         }
2081                 }
2082                 if (error)
2083                         break;
2084                 ran_beg = off;  /* for restart */
2085                 error = hammer_ip_next(cursor);
2086         }
2087         if (cursor->node)
2088                 hammer_cache_node(&ip->cache[1], cursor->node);
2089
2090         if (error == EDEADLK) {
2091                 hammer_done_cursor(cursor);
2092                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2093                 if (error == 0)
2094                         goto retry;
2095         }
2096         if (error == ENOENT)
2097                 error = 0;
2098         return(error);
2099 }
2100
2101 /*
2102  * This backend function deletes the specified record on-disk, similar to
2103  * delete_range but for a specific record.  Unlike the exact deletions
2104  * used when deleting a directory entry this function uses an ASOF search
2105  * like delete_range.
2106  *
2107  * This function may be called with ip->obj_asof set for a slave snapshot,
2108  * so don't use it.  We always delete non-historical records only.
2109  */
2110 static int
2111 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
2112                       hammer_btree_leaf_elm_t leaf)
2113 {
2114         hammer_transaction_t trans = cursor->trans;
2115         int error;
2116
2117         KKASSERT(trans->type == HAMMER_TRANS_FLS);
2118 retry:
2119         hammer_normalize_cursor(cursor);
2120         cursor->key_beg = leaf->base;
2121         cursor->asof = HAMMER_MAX_TID;
2122         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2123         cursor->flags |= HAMMER_CURSOR_ASOF;
2124         cursor->flags |= HAMMER_CURSOR_BACKEND;
2125         cursor->flags &= ~HAMMER_CURSOR_INSERT;
2126
2127         error = hammer_btree_lookup(cursor);
2128         if (error == 0) {
2129                 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2130         }
2131         if (error == EDEADLK) {
2132                 hammer_done_cursor(cursor);
2133                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2134                 if (error == 0)
2135                         goto retry;
2136         }
2137         return(error);
2138 }
2139
2140 /*
2141  * This function deletes remaining auxillary records when an inode is
2142  * being deleted.  This function explicitly does not delete the
2143  * inode record, directory entry, data, or db records.  Those must be
2144  * properly disposed of prior to this call.
2145  */
2146 int
2147 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
2148 {
2149         hammer_transaction_t trans = cursor->trans;
2150         hammer_btree_leaf_elm_t leaf __debugvar;
2151         int error;
2152
2153         KKASSERT(trans->type == HAMMER_TRANS_FLS);
2154 retry:
2155         hammer_normalize_cursor(cursor);
2156         cursor->key_beg.localization = ip->obj_localization +
2157                                        HAMMER_LOCALIZE_MISC;
2158         cursor->key_beg.obj_id = ip->obj_id;
2159         cursor->key_beg.create_tid = 0;
2160         cursor->key_beg.delete_tid = 0;
2161         cursor->key_beg.obj_type = 0;
2162         cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
2163         cursor->key_beg.key = HAMMER_MIN_KEY;
2164
2165         cursor->key_end = cursor->key_beg;
2166         cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
2167         cursor->key_end.key = HAMMER_MAX_KEY;
2168
2169         cursor->asof = ip->obj_asof;
2170         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2171         cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2172         cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
2173         cursor->flags |= HAMMER_CURSOR_BACKEND;
2174
2175         error = hammer_ip_first(cursor);
2176
2177         /*
2178          * Iterate through matching records and mark them as deleted.
2179          */
2180         while (error == 0) {
2181                 leaf = cursor->leaf;
2182
2183                 KKASSERT(leaf->base.delete_tid == 0);
2184
2185                 /*
2186                  * Mark the record and B-Tree entry as deleted.  This will
2187                  * also physically delete the B-Tree entry, record, and
2188                  * data if the retention policy dictates.  The function
2189                  * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2190                  * to retest the new 'current' element.
2191                  *
2192                  * Directory entries (and delete-on-disk directory entries)
2193                  * must be synced and cannot be deleted.
2194                  */
2195                 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2196                 ++*countp;
2197                 if (error)
2198                         break;
2199                 error = hammer_ip_next(cursor);
2200         }
2201         if (cursor->node)
2202                 hammer_cache_node(&ip->cache[1], cursor->node);
2203         if (error == EDEADLK) {
2204                 hammer_done_cursor(cursor);
2205                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2206                 if (error == 0)
2207                         goto retry;
2208         }
2209         if (error == ENOENT)
2210                 error = 0;
2211         return(error);
2212 }
2213
2214 /*
2215  * Delete the record at the current cursor.  On success the cursor will
2216  * be positioned appropriately for an iteration but may no longer be at
2217  * a leaf node.
2218  *
2219  * This routine is only called from the backend.
2220  *
2221  * NOTE: This can return EDEADLK, requiring the caller to terminate the
2222  * cursor and retry.
2223  */
2224 int
2225 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
2226                         hammer_tid_t tid)
2227 {
2228         hammer_record_t iprec;
2229         int error;
2230
2231         KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
2232         KKASSERT(tid != 0);
2233
2234         /*
2235          * In-memory (unsynchronized) records can simply be freed.  This
2236          * only occurs in range iterations since all other records are
2237          * individually synchronized.  Thus there should be no confusion with
2238          * the interlock.
2239          *
2240          * An in-memory record may be deleted before being committed to disk,
2241          * but could have been accessed in the mean time.  The reservation
2242          * code will deal with the case.
2243          */
2244         if (hammer_cursor_inmem(cursor)) {
2245                 iprec = cursor->iprec;
2246                 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
2247                 iprec->flags |= HAMMER_RECF_DELETED_FE;
2248                 iprec->flags |= HAMMER_RECF_DELETED_BE;
2249                 KKASSERT(iprec->ip == ip);
2250                 ++ip->rec_generation;
2251                 return(0);
2252         }
2253
2254         /*
2255          * On-disk records are marked as deleted by updating their delete_tid.
2256          * This does not effect their position in the B-Tree (which is based
2257          * on their create_tid).
2258          *
2259          * Frontend B-Tree operations track inodes so we tell
2260          * hammer_delete_at_cursor() not to.
2261          */
2262         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
2263
2264         if (error == 0) {
2265                 error = hammer_delete_at_cursor(
2266                                 cursor,
2267                                 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
2268                                 cursor->trans->tid,
2269                                 cursor->trans->time32,
2270                                 0, NULL);
2271         }
2272         return(error);
2273 }
2274
2275 /*
2276  * Used to write a generic record w/optional data to the media b-tree
2277  * when no inode context is available.  Used by the mirroring and
2278  * snapshot code.
2279  *
2280  * Caller must set cursor->key_beg to leaf->base.  The cursor must be
2281  * flagged for backend operation and not flagged ASOF (since we are
2282  * doing an insertion).
2283  *
2284  * This function will acquire the appropriate sync lock and will set
2285  * the cursor insertion flag for the operation, do the btree lookup,
2286  * and the insertion, and clear the insertion flag and sync lock before
2287  * returning.  The cursor state will be such that the caller can continue
2288  * scanning (used by the mirroring code).
2289  *
2290  * mode: HAMMER_CREATE_MODE_UMIRROR     copyin data, check crc
2291  *       HAMMER_CREATE_MODE_SYS         bcopy data, generate crc
2292  *
2293  * NOTE: EDEADLK can be returned.  The caller must do deadlock handling and
2294  *                retry.
2295  *
2296  *       EALREADY can be returned if the record already exists (WARNING,
2297  *                because ASOF cannot be used no check is made for illegal
2298  *                duplicates).
2299  *
2300  * NOTE: Do not use the function for normal inode-related records as this
2301  *       functions goes directly to the media and is not integrated with
2302  *       in-memory records.
2303  */
2304 int
2305 hammer_create_at_cursor(hammer_cursor_t cursor, hammer_btree_leaf_elm_t leaf,
2306                         void *udata, int mode)
2307 {
2308         hammer_transaction_t trans;
2309         hammer_buffer_t data_buffer;
2310         hammer_off_t ndata_offset;
2311         hammer_tid_t high_tid;
2312         void *ndata;
2313         int error;
2314         int doprop;
2315
2316         trans = cursor->trans;
2317         data_buffer = NULL;
2318         ndata_offset = 0;
2319         doprop = 0;
2320
2321         KKASSERT((cursor->flags &
2322                   (HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF)) ==
2323                   (HAMMER_CURSOR_BACKEND));
2324
2325         hammer_sync_lock_sh(trans);
2326
2327         if (leaf->data_len) {
2328                 ndata = hammer_alloc_data(trans, leaf->data_len,
2329                                           leaf->base.rec_type,
2330                                           &ndata_offset, &data_buffer,
2331                                           0, &error);
2332                 if (ndata == NULL) {
2333                         hammer_sync_unlock(trans);
2334                         return (error);
2335                 }
2336                 leaf->data_offset = ndata_offset;
2337                 hammer_modify_buffer_noundo(trans, data_buffer);
2338
2339                 switch(mode) {
2340                 case HAMMER_CREATE_MODE_UMIRROR:
2341                         error = copyin(udata, ndata, leaf->data_len);
2342                         if (error == 0) {
2343                                 if (hammer_crc_test_leaf(ndata, leaf) == 0) {
2344                                         kprintf("data crc mismatch on pipe\n");
2345                                         error = EINVAL;
2346                                 } else {
2347                                         error = hammer_cursor_localize_data(
2348                                                         ndata, leaf);
2349                                 }
2350                         }
2351                         break;
2352                 case HAMMER_CREATE_MODE_SYS:
2353                         bcopy(udata, ndata, leaf->data_len);
2354                         error = 0;
2355                         hammer_crc_set_leaf(ndata, leaf);
2356                         break;
2357                 default:
2358                         panic("HAMMER: hammer_create_at_cursor: bad mode %d",
2359                                 mode);
2360                         break; /* NOT REACHED */
2361                 }
2362                 hammer_modify_buffer_done(data_buffer);
2363         } else {
2364                 leaf->data_offset = 0;
2365                 error = 0;
2366                 ndata = NULL;
2367         }
2368         if (error)
2369                 goto failed;
2370
2371         /*
2372          * Do the insertion.  This can fail with a EDEADLK or EALREADY
2373          */
2374         cursor->flags |= HAMMER_CURSOR_INSERT;
2375         error = hammer_btree_lookup(cursor);
2376         if (error != ENOENT) {
2377                 if (error == 0)
2378                         error = EALREADY;
2379                 goto failed;
2380         }
2381         error = hammer_btree_insert(cursor, leaf, &doprop);
2382
2383         /*
2384          * Cursor is left on current element, we want to skip it now.
2385          * (in case the caller is scanning)
2386          */
2387         cursor->flags |= HAMMER_CURSOR_ATEDISK;
2388         cursor->flags &= ~HAMMER_CURSOR_INSERT;
2389
2390         /*
2391          * If the insertion happens to be creating (and not just replacing)
2392          * an inode we have to track it.
2393          */
2394         if (error == 0 &&
2395             leaf->base.rec_type == HAMMER_RECTYPE_INODE &&
2396             leaf->base.delete_tid == 0) {
2397                 hammer_modify_volume_field(trans, trans->rootvol,
2398                                            vol0_stat_inodes);
2399                 ++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
2400                 hammer_modify_volume_done(trans->rootvol);
2401         }
2402
2403         /*
2404          * vol0_next_tid must track the highest TID stored in the filesystem.
2405          * We do not need to generate undo for this update.
2406          */
2407         high_tid = leaf->base.create_tid;
2408         if (high_tid < leaf->base.delete_tid)
2409                 high_tid = leaf->base.delete_tid;
2410         if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
2411                 hammer_modify_volume_noundo(trans, trans->rootvol);
2412                 trans->rootvol->ondisk->vol0_next_tid = high_tid;
2413                 hammer_modify_volume_done(trans->rootvol);
2414         }
2415
2416         /*
2417          * WARNING!  cursor's leaf pointer may have changed after
2418          *           do_propagation returns.
2419          */
2420         if (error == 0 && doprop)
2421                 hammer_btree_do_propagation(cursor, NULL, leaf);
2422
2423 failed:
2424         /*
2425          * Cleanup
2426          */
2427         if (error && leaf->data_offset) {
2428                 hammer_blockmap_free(trans, leaf->data_offset, leaf->data_len);
2429
2430         }
2431         hammer_sync_unlock(trans);
2432         if (data_buffer)
2433                 hammer_rel_buffer(data_buffer, 0);
2434         return (error);
2435 }
2436
2437 /*
2438  * Delete the B-Tree element at the current cursor and do any necessary
2439  * mirror propagation.
2440  *
2441  * The cursor must be properly positioned for an iteration on return but
2442  * may be pointing at an internal element.
2443  *
2444  * An element can be un-deleted by passing a delete_tid of 0 with
2445  * HAMMER_DELETE_ADJUST.
2446  *
2447  * This function will store the number of bytes deleted in *stat_bytes
2448  * if stat_bytes is not NULL.
2449  */
2450 int
2451 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
2452                         hammer_tid_t delete_tid, u_int32_t delete_ts,
2453                         int track, int64_t *stat_bytes)
2454 {
2455         struct hammer_btree_leaf_elm save_leaf;
2456         hammer_transaction_t trans;
2457         hammer_btree_leaf_elm_t leaf;
2458         hammer_node_t node;
2459         hammer_btree_elm_t elm;
2460         hammer_off_t data_offset;
2461         int32_t data_len;
2462         int64_t bytes;
2463         int ndelete;
2464         int error;
2465         int icount;
2466         int doprop;
2467
2468         error = hammer_cursor_upgrade(cursor);
2469         if (error)
2470                 return(error);
2471
2472         trans = cursor->trans;
2473         node = cursor->node;
2474         elm = &node->ondisk->elms[cursor->index];
2475         leaf = &elm->leaf;
2476         KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2477
2478         hammer_sync_lock_sh(trans);
2479         bytes = 0;
2480         doprop = 0;
2481         icount = 0;
2482
2483         /*
2484          * Adjust the delete_tid.  Update the mirror_tid propagation field
2485          * as well.  delete_tid can be 0 (undelete -- used by mirroring).
2486          */
2487         if (delete_flags & HAMMER_DELETE_ADJUST) {
2488                 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2489                         if (elm->leaf.base.delete_tid == 0 && delete_tid)
2490                                 icount = -1;
2491                         if (elm->leaf.base.delete_tid && delete_tid == 0)
2492                                 icount = 1;
2493                 }
2494
2495                 hammer_modify_node(trans, node, elm, sizeof(*elm));
2496                 elm->leaf.base.delete_tid = delete_tid;
2497                 elm->leaf.delete_ts = delete_ts;
2498                 hammer_modify_node_done(node);
2499
2500                 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
2501                         hammer_modify_node_field(trans, node, mirror_tid);
2502                         node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2503                         hammer_modify_node_done(node);
2504                         doprop = 1;
2505                         if (hammer_debug_general & 0x0002) {
2506                                 kprintf("delete_at_cursor: propagate %016llx"
2507                                         " @%016llx\n",
2508                                         (long long)elm->leaf.base.delete_tid,
2509                                         (long long)node->node_offset);
2510                         }
2511                 }
2512
2513                 /*
2514                  * Adjust for the iteration.  We have deleted the current
2515                  * element and want to clear ATEDISK so the iteration does
2516                  * not skip the element after, which now becomes the current
2517                  * element.  This element must be re-tested if doing an
2518                  * iteration, which is handled by the RETEST flag.
2519                  */
2520                 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2521                         cursor->flags |= HAMMER_CURSOR_RETEST;
2522                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2523                 }
2524
2525                 /*
2526                  * An on-disk record cannot have the same delete_tid
2527                  * as its create_tid.  In a chain of record updates
2528                  * this could result in a duplicate record.
2529                  */
2530                 KKASSERT(elm->leaf.base.delete_tid !=
2531                          elm->leaf.base.create_tid);
2532         }
2533
2534         /*
2535          * Destroy the B-Tree element if asked (typically if a nohistory
2536          * file or mount, or when called by the pruning code).
2537          *
2538          * Adjust the ATEDISK flag to properly support iterations.
2539          */
2540         if (delete_flags & HAMMER_DELETE_DESTROY) {
2541                 data_offset = elm->leaf.data_offset;
2542                 data_len = elm->leaf.data_len;
2543                 if (doprop) {
2544                         save_leaf = elm->leaf;
2545                         leaf = &save_leaf;
2546                 }
2547                 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2548                     elm->leaf.base.delete_tid == 0) {
2549                         icount = -1;
2550                 }
2551
2552                 error = hammer_btree_delete(cursor, &ndelete);
2553                 if (error == 0) {
2554                         /*
2555                          * The deletion moves the next element (if any) to
2556                          * the current element position.  We must clear
2557                          * ATEDISK so this element is not skipped and we
2558                          * must set RETEST to force any iteration to re-test
2559                          * the element.
2560                          */
2561                         if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2562                                 cursor->flags |= HAMMER_CURSOR_RETEST;
2563                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2564                         }
2565                         bytes += (ndelete * sizeof(struct hammer_node_ondisk));
2566
2567                         switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2568                         case HAMMER_ZONE_LARGE_DATA:
2569                         case HAMMER_ZONE_SMALL_DATA:
2570                         case HAMMER_ZONE_META:
2571                                 hammer_blockmap_free(trans,
2572                                                      data_offset, data_len);
2573                                 bytes += data_len;
2574                                 break;
2575                         default:
2576                                 break;
2577                         }
2578                 }
2579         }
2580
2581         /*
2582          * Track inode count and next_tid.  This is used by the mirroring
2583          * and PFS code.  icount can be negative, zero, or positive.
2584          */
2585         if (error == 0 && track) {
2586                 if (icount) {
2587                         hammer_modify_volume_field(trans, trans->rootvol,
2588                                                    vol0_stat_inodes);
2589                         trans->rootvol->ondisk->vol0_stat_inodes += icount;
2590                         hammer_modify_volume_done(trans->rootvol);
2591                 }
2592                 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2593                         hammer_modify_volume_noundo(trans, trans->rootvol);
2594                         trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2595                         hammer_modify_volume_done(trans->rootvol);
2596                 }
2597         }
2598
2599         /*
2600          * mirror_tid propagation occurs if the node's mirror_tid had to be
2601          * updated while adjusting the delete_tid.
2602          *
2603          * This occurs when deleting even in nohistory mode, but does not
2604          * occur when pruning an already-deleted node.
2605          *
2606          * cursor->ip is NULL when called from the pruning, mirroring,
2607          * and pfs code.  If non-NULL propagation will be conditionalized
2608          * on whether the PFS is in no-history mode or not.
2609          *
2610          * WARNING: cursor's leaf pointer may have changed after do_propagation
2611          *          returns!
2612          */
2613         if (doprop) {
2614                 if (cursor->ip)
2615                         hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2616                 else
2617                         hammer_btree_do_propagation(cursor, NULL, leaf);
2618         }
2619         if (stat_bytes)
2620                 *stat_bytes = bytes;
2621         hammer_sync_unlock(trans);
2622         return (error);
2623 }
2624
2625 /*
2626  * Determine whether we can remove a directory.  This routine checks whether
2627  * a directory is empty or not and enforces flush connectivity.
2628  *
2629  * Flush connectivity requires that we block if the target directory is
2630  * currently flushing, otherwise it may not end up in the same flush group.
2631  *
2632  * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2633  */
2634 int
2635 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2636 {
2637         struct hammer_cursor cursor;
2638         int error;
2639
2640         /*
2641          * Check directory empty
2642          */
2643         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2644
2645         cursor.key_beg.localization = ip->obj_localization +
2646                                       hammer_dir_localization(ip);
2647         cursor.key_beg.obj_id = ip->obj_id;
2648         cursor.key_beg.create_tid = 0;
2649         cursor.key_beg.delete_tid = 0;
2650         cursor.key_beg.obj_type = 0;
2651         cursor.key_beg.rec_type = HAMMER_RECTYPE_ENTRY_START;
2652         cursor.key_beg.key = HAMMER_MIN_KEY;
2653
2654         cursor.key_end = cursor.key_beg;
2655         cursor.key_end.rec_type = HAMMER_RECTYPE_MAX;
2656         cursor.key_end.key = HAMMER_MAX_KEY;
2657
2658         cursor.asof = ip->obj_asof;
2659         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2660
2661         error = hammer_ip_first(&cursor);
2662         if (error == ENOENT)
2663                 error = 0;
2664         else if (error == 0)
2665                 error = ENOTEMPTY;
2666         hammer_done_cursor(&cursor);
2667         return(error);
2668 }
2669
2670 /*
2671  * Localize the data payload.  Directory entries may need their
2672  * localization adjusted.
2673  */
2674 static
2675 int
2676 hammer_cursor_localize_data(hammer_data_ondisk_t data,
2677                             hammer_btree_leaf_elm_t leaf)
2678 {
2679         u_int32_t localization;
2680
2681         if (leaf->base.rec_type == HAMMER_RECTYPE_DIRENTRY) {
2682                 localization = leaf->base.localization &
2683                                HAMMER_LOCALIZE_PSEUDOFS_MASK;
2684                 if (data->entry.localization != localization) {
2685                         data->entry.localization = localization;
2686                         hammer_crc_set_leaf(data, leaf);
2687                 }
2688         }
2689         return(0);
2690 }