HAMMER 60C/many: Mirroring
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.82 2008/07/04 07:25:36 dillon Exp $
35  */
36
37 #include "hammer.h"
38
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_frontend_trunc_callback(hammer_record_t record,
43                                 void *data __unused);
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
45 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46                       hammer_btree_leaf_elm_t leaf);
47
48 struct rec_trunc_info {
49         u_int16_t       rec_type;
50         int64_t         trunc_off;
51 };
52
53 /*
54  * Red-black tree support.  Comparison code for insertion.
55  */
56 static int
57 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
58 {
59         if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
60                 return(-1);
61         if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
62                 return(1);
63
64         if (rec1->leaf.base.key < rec2->leaf.base.key)
65                 return(-1);
66         if (rec1->leaf.base.key > rec2->leaf.base.key)
67                 return(1);
68
69         /*
70          * Never match against an item deleted by the front-end.
71          *
72          * rec1 is greater then rec2 if rec1 is marked deleted.
73          * rec1 is less then rec2 if rec2 is marked deleted.
74          *
75          * Multiple deleted records may be present, do not return 0
76          * if both are marked deleted.
77          */
78         if (rec1->flags & HAMMER_RECF_DELETED_FE)
79                 return(1);
80         if (rec2->flags & HAMMER_RECF_DELETED_FE)
81                 return(-1);
82
83         return(0);
84 }
85
86 /*
87  * Basic record comparison code similar to hammer_btree_cmp().
88  */
89 static int
90 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
91 {
92         if (elm->rec_type < rec->leaf.base.rec_type)
93                 return(-3);
94         if (elm->rec_type > rec->leaf.base.rec_type)
95                 return(3);
96
97         if (elm->key < rec->leaf.base.key)
98                 return(-2);
99         if (elm->key > rec->leaf.base.key)
100                 return(2);
101
102         /*
103          * Never match against an item deleted by the front-end.
104          * elm is less then rec if rec is marked deleted.
105          */
106         if (rec->flags & HAMMER_RECF_DELETED_FE)
107                 return(-1);
108         return(0);
109 }
110
111 /*
112  * Special LOOKUP_INFO to locate an overlapping record.  This used by
113  * the reservation code to implement small-block records (whos keys will
114  * be different depending on data_len, when representing the same base
115  * offset).
116  *
117  * NOTE: The base file offset of a data record is (key - data_len), not (key).
118  */
119 static int
120 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
121 {
122         if (leaf->base.rec_type < rec->leaf.base.rec_type)
123                 return(-3);
124         if (leaf->base.rec_type > rec->leaf.base.rec_type)
125                 return(3);
126
127         /*
128          * Overlap compare
129          */
130         if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
131                 /* leaf_end <= rec_beg */
132                 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
133                         return(-2);
134                 /* leaf_beg >= rec_end */
135                 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
136                         return(2);
137         } else {
138                 if (leaf->base.key < rec->leaf.base.key)
139                         return(-2);
140                 if (leaf->base.key > rec->leaf.base.key)
141                         return(2);
142         }
143
144         /*
145          * Never match against an item deleted by the front-end.
146          * leaf is less then rec if rec is marked deleted.
147          *
148          * We must still return the proper code for the scan to continue
149          * along the correct branches.
150          */
151         if (rec->flags & HAMMER_RECF_DELETED_FE) {
152                 if (leaf->base.key < rec->leaf.base.key)
153                         return(-2);
154                 if (leaf->base.key > rec->leaf.base.key)
155                         return(2);
156                 return(-1);
157         }
158         return(0);
159 }
160
161 /*
162  * RB_SCAN comparison code for hammer_mem_first().  The argument order
163  * is reversed so the comparison result has to be negated.  key_beg and
164  * key_end are both range-inclusive.
165  *
166  * Localized deletions are not cached in-memory.
167  */
168 static
169 int
170 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
171 {
172         hammer_cursor_t cursor = data;
173         int r;
174
175         r = hammer_rec_cmp(&cursor->key_beg, rec);
176         if (r > 1)
177                 return(-1);
178         r = hammer_rec_cmp(&cursor->key_end, rec);
179         if (r < -1)
180                 return(1);
181         return(0);
182 }
183
184 /*
185  * This compare function is used when simply looking up key_beg.
186  */
187 static
188 int
189 hammer_rec_find_cmp(hammer_record_t rec, void *data)
190 {
191         hammer_cursor_t cursor = data;
192         int r;
193
194         r = hammer_rec_cmp(&cursor->key_beg, rec);
195         if (r > 1)
196                 return(-1);
197         if (r < -1)
198                 return(1);
199         return(0);
200 }
201
202 /*
203  * Locate blocks within the truncation range.  Partial blocks do not count.
204  */
205 static
206 int
207 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
208 {
209         struct rec_trunc_info *info = data;
210
211         if (rec->leaf.base.rec_type < info->rec_type)
212                 return(-1);
213         if (rec->leaf.base.rec_type > info->rec_type)
214                 return(1);
215
216         switch(rec->leaf.base.rec_type) {
217         case HAMMER_RECTYPE_DB:
218                 /*
219                  * DB record key is not beyond the truncation point, retain.
220                  */
221                 if (rec->leaf.base.key < info->trunc_off)
222                         return(-1);
223                 break;
224         case HAMMER_RECTYPE_DATA:
225                 /*
226                  * DATA record offset start is not beyond the truncation point,
227                  * retain.
228                  */
229                 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
230                         return(-1);
231                 break;
232         default:
233                 panic("hammer_rec_trunc_cmp: unexpected record type");
234         }
235
236         /*
237          * The record start is >= the truncation point, return match,
238          * the record should be destroyed.
239          */
240         return(0);
241 }
242
243 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
244 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
245                     hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
246
247 /*
248  * Allocate a record for the caller to finish filling in.  The record is
249  * returned referenced.
250  */
251 hammer_record_t
252 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
253 {
254         hammer_record_t record;
255
256         ++hammer_count_records;
257         record = kmalloc(sizeof(*record), M_HAMMER,
258                          M_WAITOK | M_ZERO | M_USE_RESERVE);
259         record->flush_state = HAMMER_FST_IDLE;
260         record->ip = ip;
261         record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
262         record->leaf.data_len = data_len;
263         hammer_ref(&record->lock);
264
265         if (data_len) {
266                 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
267                 record->flags |= HAMMER_RECF_ALLOCDATA;
268                 ++hammer_count_record_datas;
269         }
270
271         return (record);
272 }
273
274 void
275 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
276 {
277         while (record->flush_state == HAMMER_FST_FLUSH) {
278                 record->flags |= HAMMER_RECF_WANTED;
279                 tsleep(record, 0, ident, 0);
280         }
281 }
282
283 /*
284  * Called from the backend, hammer_inode.c, after a record has been
285  * flushed to disk.  The record has been exclusively locked by the
286  * caller and interlocked with BE.
287  *
288  * We clean up the state, unlock, and release the record (the record
289  * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
290  */
291 void
292 hammer_flush_record_done(hammer_record_t record, int error)
293 {
294         hammer_inode_t target_ip;
295
296         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
297         KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
298
299         if (error) {
300                 /*
301                  * An error occured, the backend was unable to sync the
302                  * record to its media.  Leave the record intact.
303                  */
304                 Debugger("flush_record_done error");
305         }
306
307         if (record->flags & HAMMER_RECF_DELETED_BE) {
308                 if ((target_ip = record->target_ip) != NULL) {
309                         TAILQ_REMOVE(&target_ip->target_list, record,
310                                      target_entry);
311                         record->target_ip = NULL;
312                         hammer_test_inode(target_ip);
313                 }
314                 record->flush_state = HAMMER_FST_IDLE;
315         } else {
316                 if (record->target_ip) {
317                         record->flush_state = HAMMER_FST_SETUP;
318                         hammer_test_inode(record->ip);
319                         hammer_test_inode(record->target_ip);
320                 } else {
321                         record->flush_state = HAMMER_FST_IDLE;
322                 }
323         }
324         record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
325         if (record->flags & HAMMER_RECF_WANTED) {
326                 record->flags &= ~HAMMER_RECF_WANTED;
327                 wakeup(record);
328         }
329         hammer_rel_mem_record(record);
330 }
331
332 /*
333  * Release a memory record.  Records marked for deletion are immediately
334  * removed from the RB-Tree but otherwise left intact until the last ref
335  * goes away.
336  */
337 void
338 hammer_rel_mem_record(struct hammer_record *record)
339 {
340         hammer_inode_t ip, target_ip;
341
342         hammer_unref(&record->lock);
343
344         if (record->lock.refs == 0) {
345                 /*
346                  * Upon release of the last reference wakeup any waiters.
347                  * The record structure may get destroyed so callers will
348                  * loop up and do a relookup.
349                  *
350                  * WARNING!  Record must be removed from RB-TREE before we
351                  * might possibly block.  hammer_test_inode() can block!
352                  */
353                 ip = record->ip;
354
355                 /*
356                  * Upon release of the last reference a record marked deleted
357                  * is destroyed.
358                  */
359                 if (record->flags & HAMMER_RECF_DELETED_FE) {
360                         KKASSERT(ip->lock.refs > 0);
361                         KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
362
363                         /*
364                          * target_ip may have zero refs, we have to ref it
365                          * to prevent it from being ripped out from under
366                          * us.
367                          */
368                         if ((target_ip = record->target_ip) != NULL) {
369                                 TAILQ_REMOVE(&target_ip->target_list,
370                                              record, target_entry);
371                                 record->target_ip = NULL;
372                                 hammer_ref(&target_ip->lock);
373                         }
374
375                         if (record->flags & HAMMER_RECF_ONRBTREE) {
376                                 RB_REMOVE(hammer_rec_rb_tree,
377                                           &record->ip->rec_tree,
378                                           record);
379                                 KKASSERT(ip->rsv_recs > 0);
380                                 --ip->hmp->rsv_recs;
381                                 --ip->rsv_recs;
382                                 ip->hmp->rsv_databytes -= record->leaf.data_len;
383                                 record->flags &= ~HAMMER_RECF_ONRBTREE;
384
385                                 if (RB_EMPTY(&record->ip->rec_tree)) {
386                                         record->ip->flags &= ~HAMMER_INODE_XDIRTY;
387                                         record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
388                                         hammer_test_inode(record->ip);
389                                 }
390                         }
391
392                         /*
393                          * Do this test after removing record from the B-Tree.
394                          */
395                         if (target_ip) {
396                                 hammer_test_inode(target_ip);
397                                 hammer_rel_inode(target_ip, 0);
398                         }
399
400                         if (record->flags & HAMMER_RECF_ALLOCDATA) {
401                                 --hammer_count_record_datas;
402                                 kfree(record->data, M_HAMMER);
403                                 record->flags &= ~HAMMER_RECF_ALLOCDATA;
404                         }
405                         if (record->resv) {
406                                 hammer_blockmap_reserve_complete(ip->hmp,
407                                                                  record->resv);
408                                 record->resv = NULL;
409                         }
410                         record->data = NULL;
411                         --hammer_count_records;
412                         kfree(record, M_HAMMER);
413                 }
414         }
415 }
416
417 /*
418  * Record visibility depends on whether the record is being accessed by
419  * the backend or the frontend.
420  *
421  * Return non-zero if the record is visible, zero if it isn't or if it is
422  * deleted.
423  */
424 static __inline
425 int
426 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
427 {
428         if (cursor->flags & HAMMER_CURSOR_BACKEND) {
429                 if (record->flags & HAMMER_RECF_DELETED_BE)
430                         return(0);
431         } else {
432                 if (record->flags & HAMMER_RECF_DELETED_FE)
433                         return(0);
434         }
435         return(1);
436 }
437
438 /*
439  * This callback is used as part of the RB_SCAN function for in-memory
440  * records.  We terminate it (return -1) as soon as we get a match.
441  *
442  * This routine is used by frontend code.
443  *
444  * The primary compare code does not account for ASOF lookups.  This
445  * code handles that case as well as a few others.
446  */
447 static
448 int
449 hammer_rec_scan_callback(hammer_record_t rec, void *data)
450 {
451         hammer_cursor_t cursor = data;
452
453         /*
454          * We terminate on success, so this should be NULL on entry.
455          */
456         KKASSERT(cursor->iprec == NULL);
457
458         /*
459          * Skip if the record was marked deleted.
460          */
461         if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
462                 return(0);
463
464         /*
465          * Skip if not visible due to our as-of TID
466          */
467         if (cursor->flags & HAMMER_CURSOR_ASOF) {
468                 if (cursor->asof < rec->leaf.base.create_tid)
469                         return(0);
470                 if (rec->leaf.base.delete_tid &&
471                     cursor->asof >= rec->leaf.base.delete_tid) {
472                         return(0);
473                 }
474         }
475
476         /*
477          * If the record is queued to the flusher we have to block until
478          * it isn't.  Otherwise we may see duplication between our memory
479          * cache and the media.
480          */
481         hammer_ref(&rec->lock);
482
483 #warning "This deadlocks"
484 #if 0
485         if (rec->flush_state == HAMMER_FST_FLUSH)
486                 hammer_wait_mem_record(rec);
487 #endif
488
489         /*
490          * The record may have been deleted while we were blocked.
491          */
492         if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
493                 hammer_rel_mem_record(rec);
494                 return(0);
495         }
496
497         /*
498          * Set the matching record and stop the scan.
499          */
500         cursor->iprec = rec;
501         return(-1);
502 }
503
504
505 /*
506  * Lookup an in-memory record given the key specified in the cursor.  Works
507  * just like hammer_btree_lookup() but operates on an inode's in-memory
508  * record list.
509  *
510  * The lookup must fail if the record is marked for deferred deletion.
511  */
512 static
513 int
514 hammer_mem_lookup(hammer_cursor_t cursor)
515 {
516         int error;
517
518         KKASSERT(cursor->ip);
519         if (cursor->iprec) {
520                 hammer_rel_mem_record(cursor->iprec);
521                 cursor->iprec = NULL;
522         }
523         hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
524                                    hammer_rec_scan_callback, cursor);
525
526         if (cursor->iprec == NULL)
527                 error = ENOENT;
528         else
529                 error = 0;
530         return(error);
531 }
532
533 /*
534  * hammer_mem_first() - locate the first in-memory record matching the
535  * cursor within the bounds of the key range.
536  */
537 static
538 int
539 hammer_mem_first(hammer_cursor_t cursor)
540 {
541         hammer_inode_t ip;
542
543         ip = cursor->ip;
544         KKASSERT(ip != NULL);
545
546         if (cursor->iprec) {
547                 hammer_rel_mem_record(cursor->iprec);
548                 cursor->iprec = NULL;
549         }
550
551         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
552                                    hammer_rec_scan_callback, cursor);
553
554         /*
555          * Adjust scan.node and keep it linked into the RB-tree so we can
556          * hold the cursor through third party modifications of the RB-tree.
557          */
558         if (cursor->iprec)
559                 return(0);
560         return(ENOENT);
561 }
562
563 /************************************************************************
564  *                   HAMMER IN-MEMORY RECORD FUNCTIONS                  *
565  ************************************************************************
566  *
567  * These functions manipulate in-memory records.  Such records typically
568  * exist prior to being committed to disk or indexed via the on-disk B-Tree.
569  */
570
571 /*
572  * Add a directory entry (dip,ncp) which references inode (ip).
573  *
574  * Note that the low 32 bits of the namekey are set temporarily to create
575  * a unique in-memory record, and may be modified a second time when the
576  * record is synchronized to disk.  In particular, the low 32 bits cannot be
577  * all 0's when synching to disk, which is not handled here.
578  *
579  * NOTE: bytes does not include any terminating \0 on name, and name might
580  * not be terminated.
581  */
582 int
583 hammer_ip_add_directory(struct hammer_transaction *trans,
584                      struct hammer_inode *dip, const char *name, int bytes,
585                      struct hammer_inode *ip)
586 {
587         struct hammer_cursor cursor;
588         hammer_record_t record;
589         int error;
590         int count;
591         u_int32_t iterator;
592
593         record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
594         if (++trans->hmp->namekey_iterator == 0)
595                 ++trans->hmp->namekey_iterator;
596
597         record->type = HAMMER_MEM_RECORD_ADD;
598         record->leaf.base.localization = dip->obj_localization +
599                                          HAMMER_LOCALIZE_MISC;
600         record->leaf.base.obj_id = dip->obj_id;
601         record->leaf.base.key = hammer_directory_namekey(name, bytes);
602         record->leaf.base.key += trans->hmp->namekey_iterator;
603         record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
604         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
605         record->data->entry.obj_id = ip->obj_id;
606         record->data->entry.localization = ip->obj_localization;
607         bcopy(name, record->data->entry.name, bytes);
608
609         ++ip->ino_data.nlinks;
610         hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
611
612         /*
613          * Find an unused namekey.  Both the in-memory record tree and
614          * the B-Tree are checked.  Exact matches also match create_tid
615          * so use an ASOF search to (mostly) ignore it.
616          *
617          * delete-visibility is set so pending deletions do not give us
618          * a false-negative on our ability to use an iterator.
619          */
620         hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
621         cursor.key_beg = record->leaf.base;
622         cursor.flags |= HAMMER_CURSOR_ASOF;
623         cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
624         cursor.asof = ip->obj_asof;
625
626         count = 0;
627         while (hammer_ip_lookup(&cursor) == 0) {
628                 iterator = (u_int32_t)record->leaf.base.key + 1;
629                 if (iterator == 0)
630                         iterator = 1;
631                 record->leaf.base.key &= ~0xFFFFFFFFLL;
632                 record->leaf.base.key |= iterator;
633                 cursor.key_beg.key = record->leaf.base.key;
634                 if (++count == 1000000000) {
635                         hammer_rel_mem_record(record);
636                         error = ENOSPC;
637                         goto failed;
638                 }
639         }
640
641         /*
642          * The target inode and the directory entry are bound together.
643          */
644         record->target_ip = ip;
645         record->flush_state = HAMMER_FST_SETUP;
646         TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
647
648         /*
649          * The inode now has a dependancy and must be taken out of the idle
650          * state.  An inode not in an idle state is given an extra reference.
651          */
652         if (ip->flush_state == HAMMER_FST_IDLE) {
653                 hammer_ref(&ip->lock);
654                 ip->flush_state = HAMMER_FST_SETUP;
655         }
656         error = hammer_mem_add(record);
657 failed:
658         hammer_done_cursor(&cursor);
659         return(error);
660 }
661
662 /*
663  * Delete the directory entry and update the inode link count.  The
664  * cursor must be seeked to the directory entry record being deleted.
665  *
666  * The related inode should be share-locked by the caller.  The caller is
667  * on the frontend.
668  *
669  * This function can return EDEADLK requiring the caller to terminate
670  * the cursor, any locks, wait on the returned record, and retry.
671  */
672 int
673 hammer_ip_del_directory(struct hammer_transaction *trans,
674                      hammer_cursor_t cursor, struct hammer_inode *dip,
675                      struct hammer_inode *ip)
676 {
677         hammer_record_t record;
678         int error;
679
680         if (hammer_cursor_inmem(cursor)) {
681                 /*
682                  * In-memory (unsynchronized) records can simply be freed.
683                  * Even though the HAMMER_RECF_DELETED_FE flag is ignored
684                  * by the backend, we must still avoid races against the
685                  * backend potentially syncing the record to the media. 
686                  *
687                  * We cannot call hammer_ip_delete_record(), that routine may
688                  * only be called from the backend.
689                  */
690                 record = cursor->iprec;
691                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
692                         KKASSERT(cursor->deadlk_rec == NULL);
693                         hammer_ref(&record->lock);
694                         cursor->deadlk_rec = record;
695                         error = EDEADLK;
696                 } else {
697                         KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
698                         record->flags |= HAMMER_RECF_DELETED_FE;
699                         error = 0;
700                 }
701         } else {
702                 /*
703                  * If the record is on-disk we have to queue the deletion by
704                  * the record's key.  This also causes lookups to skip the
705                  * record.
706                  */
707                 KKASSERT(dip->flags &
708                          (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
709                 record = hammer_alloc_mem_record(dip, 0);
710                 record->type = HAMMER_MEM_RECORD_DEL;
711                 record->leaf.base = cursor->leaf->base;
712
713                 record->target_ip = ip;
714                 record->flush_state = HAMMER_FST_SETUP;
715                 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
716
717                 /*
718                  * The inode now has a dependancy and must be taken out of
719                  * the idle state.  An inode not in an idle state is given
720                  * an extra reference.
721                  */
722                 if (ip->flush_state == HAMMER_FST_IDLE) {
723                         hammer_ref(&ip->lock);
724                         ip->flush_state = HAMMER_FST_SETUP;
725                 }
726
727                 error = hammer_mem_add(record);
728         }
729
730         /*
731          * One less link.  The file may still be open in the OS even after
732          * all links have gone away.
733          *
734          * We have to terminate the cursor before syncing the inode to
735          * avoid deadlocking against ourselves.  XXX this may no longer
736          * be true.
737          *
738          * If nlinks drops to zero and the vnode is inactive (or there is
739          * no vnode), call hammer_inode_unloadable_check() to zonk the
740          * inode.  If we don't do this here the inode will not be destroyed
741          * on-media until we unmount.
742          */
743         if (error == 0) {
744                 --ip->ino_data.nlinks;
745                 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
746                 if (ip->ino_data.nlinks == 0 &&
747                     (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
748                         hammer_done_cursor(cursor);
749                         hammer_inode_unloadable_check(ip, 1);
750                         hammer_flush_inode(ip, 0);
751                 }
752
753         }
754         return(error);
755 }
756
757 /*
758  * Add a record to an inode.
759  *
760  * The caller must allocate the record with hammer_alloc_mem_record(ip) and
761  * initialize the following additional fields:
762  *
763  * The related inode should be share-locked by the caller.  The caller is
764  * on the frontend.
765  *
766  * record->rec.entry.base.base.key
767  * record->rec.entry.base.base.rec_type
768  * record->rec.entry.base.base.data_len
769  * record->data         (a copy will be kmalloc'd if it cannot be embedded)
770  */
771 int
772 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
773 {
774         hammer_inode_t ip = record->ip;
775         int error;
776
777         KKASSERT(record->leaf.base.localization != 0);
778         record->leaf.base.obj_id = ip->obj_id;
779         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
780         error = hammer_mem_add(record);
781         return(error);
782 }
783
784 /*
785  * Locate a bulk record in-memory.  Bulk records allow disk space to be
786  * reserved so the front-end can flush large data writes without having
787  * to queue the BIO to the flusher.  Only the related record gets queued
788  * to the flusher.
789  */
790 static hammer_record_t
791 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
792 {
793         hammer_record_t record;
794         struct hammer_btree_leaf_elm leaf;
795
796         bzero(&leaf, sizeof(leaf));
797         leaf.base.obj_id = ip->obj_id;
798         leaf.base.key = file_offset + bytes;
799         leaf.base.create_tid = 0;
800         leaf.base.delete_tid = 0;
801         leaf.base.rec_type = HAMMER_RECTYPE_DATA;
802         leaf.base.obj_type = 0;                 /* unused */
803         leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;     /* unused */
804         leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_MISC;
805         leaf.data_len = bytes;
806
807         record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
808         if (record)
809                 hammer_ref(&record->lock);
810         return(record);
811 }
812
813 /*
814  * Reserve blockmap space placemarked with an in-memory record.  
815  *
816  * This routine is called by the frontend in order to be able to directly
817  * flush a buffer cache buffer.  The frontend has locked the related buffer
818  * cache buffers and we should be able to manipulate any overlapping
819  * in-memory records.
820  */
821 hammer_record_t
822 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
823                    int *errorp)
824 {
825         hammer_record_t record;
826         hammer_record_t conflict;
827         int zone;
828         int flags;
829
830         /*
831          * Deal with conflicting in-memory records.  We cannot have multiple
832          * in-memory records for the same offset without seriously confusing
833          * the backend, including but not limited to the backend issuing
834          * delete-create-delete sequences and asserting on the delete_tid
835          * being the same as the create_tid.
836          *
837          * If we encounter a record with the backend interlock set we cannot
838          * immediately delete it without confusing the backend.
839          */
840         while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
841                 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
842                         conflict->flags |= HAMMER_RECF_WANTED;
843                         tsleep(conflict, 0, "hmrrc3", 0);
844                 } else {
845                         conflict->flags |= HAMMER_RECF_DELETED_FE;
846                 }
847                 hammer_rel_mem_record(conflict);
848         }
849
850         /*
851          * Create a record to cover the direct write.  This is called with
852          * the related BIO locked so there should be no possible conflict.
853          *
854          * The backend is responsible for finalizing the space reserved in
855          * this record.
856          *
857          * XXX bytes not aligned, depend on the reservation code to
858          * align the reservation.
859          */
860         record = hammer_alloc_mem_record(ip, 0);
861         zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
862                                            HAMMER_ZONE_SMALL_DATA_INDEX;
863         record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
864                                                &record->leaf.data_offset,
865                                                errorp);
866         if (record->resv == NULL) {
867                 kprintf("hammer_ip_add_bulk: reservation failed\n");
868                 hammer_rel_mem_record(record);
869                 return(NULL);
870         }
871         record->type = HAMMER_MEM_RECORD_DATA;
872         record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
873         record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
874         record->leaf.base.obj_id = ip->obj_id;
875         record->leaf.base.key = file_offset + bytes;
876         record->leaf.base.localization = ip->obj_localization +
877                                          HAMMER_LOCALIZE_MISC;
878         record->leaf.data_len = bytes;
879         hammer_crc_set_leaf(data, &record->leaf);
880         flags = record->flags;
881
882         hammer_ref(&record->lock);      /* mem_add eats a reference */
883         *errorp = hammer_mem_add(record);
884         if (*errorp) {
885                 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
886                 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
887                         *errorp, conflict, file_offset, bytes);
888                 if (conflict)
889                         kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
890                 if (conflict)
891                         hammer_rel_mem_record(conflict);
892         }
893         KKASSERT(*errorp == 0);
894         conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
895         if (conflict != record) {
896                 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
897                 if (conflict)
898                     kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
899         }
900         KKASSERT(conflict == record);
901         hammer_rel_mem_record(conflict);
902
903         return (record);
904 }
905
906 /*
907  * Frontend truncation code.  Scan in-memory records only.  On-disk records
908  * and records in a flushing state are handled by the backend.  The vnops
909  * setattr code will handle the block containing the truncation point.
910  *
911  * Partial blocks are not deleted.
912  */
913 int
914 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
915 {
916         struct rec_trunc_info info;
917
918         switch(ip->ino_data.obj_type) {
919         case HAMMER_OBJTYPE_REGFILE:
920                 info.rec_type = HAMMER_RECTYPE_DATA;
921                 break;
922         case HAMMER_OBJTYPE_DBFILE:
923                 info.rec_type = HAMMER_RECTYPE_DB;
924                 break;
925         default:
926                 return(EINVAL);
927         }
928         info.trunc_off = file_size;
929         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
930                                    hammer_frontend_trunc_callback, &info);
931         return(0);
932 }
933
934 static int
935 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
936 {
937         if (record->flags & HAMMER_RECF_DELETED_FE)
938                 return(0);
939         if (record->flush_state == HAMMER_FST_FLUSH)
940                 return(0);
941         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
942         hammer_ref(&record->lock);
943         record->flags |= HAMMER_RECF_DELETED_FE;
944         hammer_rel_mem_record(record);
945         return(0);
946 }
947
948 /*
949  * Return 1 if the caller must check for and delete existing records
950  * before writing out a new data record.
951  *
952  * Return 0 if the caller can just insert the record into the B-Tree without
953  * checking.
954  */
955 static int
956 hammer_record_needs_overwrite_delete(hammer_record_t record)
957 {
958         hammer_inode_t ip = record->ip;
959         int64_t file_offset;
960         int r;
961
962         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
963                 file_offset = record->leaf.base.key;
964         else
965                 file_offset = record->leaf.base.key - record->leaf.data_len;
966         r = (file_offset < ip->save_trunc_off);
967         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
968                 if (ip->save_trunc_off <= record->leaf.base.key)
969                         ip->save_trunc_off = record->leaf.base.key + 1;
970         } else {
971                 if (ip->save_trunc_off < record->leaf.base.key)
972                         ip->save_trunc_off = record->leaf.base.key;
973         }
974         return(r);
975 }
976
977 /*
978  * Backend code.  Sync a record to the media.
979  */
980 int
981 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
982 {
983         hammer_transaction_t trans = cursor->trans;
984         int64_t file_offset;
985         int bytes;
986         void *bdata;
987         int error;
988         int doprop;
989
990         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
991         KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
992         KKASSERT(record->leaf.base.localization != 0);
993
994         /*
995          * If this is a bulk-data record placemarker there may be an existing
996          * record on-disk, indicating a data overwrite.  If there is the
997          * on-disk record must be deleted before we can insert our new record.
998          *
999          * We've synthesized this record and do not know what the create_tid
1000          * on-disk is, nor how much data it represents.
1001          *
1002          * Keep in mind that (key) for data records is (base_offset + len),
1003          * not (base_offset).  Also, we only want to get rid of on-disk
1004          * records since we are trying to sync our in-memory record, call
1005          * hammer_ip_delete_range() with truncating set to 1 to make sure
1006          * it skips in-memory records.
1007          *
1008          * It is ok for the lookup to return ENOENT.
1009          *
1010          * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1011          * to call hammer_ip_delete_range() or not.  This also means we must
1012          * update sync_trunc_off() as we write.
1013          */
1014         if (record->type == HAMMER_MEM_RECORD_DATA &&
1015             hammer_record_needs_overwrite_delete(record)) {
1016                 file_offset = record->leaf.base.key - record->leaf.data_len;
1017                 bytes = (record->leaf.data_len + HAMMER_BUFMASK) & 
1018                         ~HAMMER_BUFMASK;
1019                 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1020                 error = hammer_ip_delete_range(
1021                                 cursor, record->ip,
1022                                 file_offset, file_offset + bytes - 1,
1023                                 1);
1024                 if (error && error != ENOENT)
1025                         goto done;
1026         }
1027
1028         /*
1029          * If this is a general record there may be an on-disk version
1030          * that must be deleted before we can insert the new record.
1031          */
1032         if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1033                 error = hammer_delete_general(cursor, record->ip,
1034                                               &record->leaf);
1035                 if (error && error != ENOENT)
1036                         goto done;
1037         }
1038
1039         /*
1040          * Setup the cursor.
1041          */
1042         hammer_normalize_cursor(cursor);
1043         cursor->key_beg = record->leaf.base;
1044         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1045         cursor->flags |= HAMMER_CURSOR_BACKEND;
1046         cursor->flags &= ~HAMMER_CURSOR_INSERT;
1047
1048         /*
1049          * Records can wind up on-media before the inode itself is on-media.
1050          * Flag the case.
1051          */
1052         record->ip->flags |= HAMMER_INODE_DONDISK;
1053
1054         /*
1055          * If we are deleting a directory entry an exact match must be
1056          * found on-disk.
1057          */
1058         if (record->type == HAMMER_MEM_RECORD_DEL) {
1059                 error = hammer_btree_lookup(cursor);
1060                 if (error == 0) {
1061                         error = hammer_ip_delete_record(cursor, record->ip,
1062                                                         trans->tid);
1063                         if (error == 0) {
1064                                 record->flags |= HAMMER_RECF_DELETED_FE;
1065                                 record->flags |= HAMMER_RECF_DELETED_BE;
1066                         }
1067                 }
1068                 goto done;
1069         }
1070
1071         /*
1072          * We are inserting.
1073          *
1074          * Issue a lookup to position the cursor and locate the cluster.  The
1075          * target key should not exist.  If we are creating a directory entry
1076          * we may have to iterate the low 32 bits of the key to find an unused
1077          * key.
1078          */
1079         cursor->flags |= HAMMER_CURSOR_INSERT;
1080
1081         error = hammer_btree_lookup(cursor);
1082         if (hammer_debug_inode)
1083                 kprintf("DOINSERT LOOKUP %d\n", error);
1084         if (error == 0) {
1085                 kprintf("hammer_ip_sync_record: duplicate rec "
1086                         "at (%016llx)\n", record->leaf.base.key);
1087                 Debugger("duplicate record1");
1088                 error = EIO;
1089         }
1090 #if 0
1091         if (record->type == HAMMER_MEM_RECORD_DATA)
1092                 kprintf("sync_record  %016llx ---------------- %016llx %d\n",
1093                         record->leaf.base.key - record->leaf.data_len,
1094                         record->leaf.data_offset, error);
1095 #endif
1096
1097         if (error != ENOENT)
1098                 goto done;
1099
1100         /*
1101          * Allocate the record and data.  The result buffers will be
1102          * marked as being modified and further calls to
1103          * hammer_modify_buffer() will result in unneeded UNDO records.
1104          *
1105          * Support zero-fill records (data == NULL and data_len != 0)
1106          */
1107         if (record->type == HAMMER_MEM_RECORD_DATA) {
1108                 /*
1109                  * The data portion of a bulk-data record has already been
1110                  * committed to disk, we need only adjust the layer2
1111                  * statistics in the same transaction as our B-Tree insert.
1112                  */
1113                 KKASSERT(record->leaf.data_offset != 0);
1114                 hammer_blockmap_finalize(trans, record->leaf.data_offset,
1115                                          record->leaf.data_len);
1116                 error = 0;
1117         } else if (record->data && record->leaf.data_len) {
1118                 /*
1119                  * Wholely cached record, with data.  Allocate the data.
1120                  */
1121                 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1122                                           record->leaf.base.rec_type,
1123                                           &record->leaf.data_offset,
1124                                           &cursor->data_buffer, &error);
1125                 if (bdata == NULL)
1126                         goto done;
1127                 hammer_crc_set_leaf(record->data, &record->leaf);
1128                 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1129                 bcopy(record->data, bdata, record->leaf.data_len);
1130                 hammer_modify_buffer_done(cursor->data_buffer);
1131         } else {
1132                 /*
1133                  * Wholely cached record, without data.
1134                  */
1135                 record->leaf.data_offset = 0;
1136                 record->leaf.data_crc = 0;
1137         }
1138
1139         error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1140         if (hammer_debug_inode && error)
1141                 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1142
1143         /*
1144          * Our record is on-disk, normally mark the in-memory version as
1145          * deleted.  If the record represented a directory deletion but
1146          * we had to sync a valid directory entry to disk we must convert
1147          * the record to a covering delete so the frontend does not have
1148          * visibility on the synced entry.
1149          */
1150         if (error == 0) {
1151                 if (doprop) {
1152                         hammer_btree_do_propagation(cursor, record->ip,
1153                                                     &record->leaf);
1154                 }
1155                 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1156                         KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1157                         record->flags &= ~HAMMER_RECF_DELETED_FE;
1158                         record->type = HAMMER_MEM_RECORD_DEL;
1159                         KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1160                         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1161                         /* hammer_flush_record_done takes care of the rest */
1162                 } else {
1163                         record->flags |= HAMMER_RECF_DELETED_FE;
1164                         record->flags |= HAMMER_RECF_DELETED_BE;
1165                 }
1166         } else {
1167                 if (record->leaf.data_offset) {
1168                         hammer_blockmap_free(trans, record->leaf.data_offset,
1169                                              record->leaf.data_len);
1170                 }
1171         }
1172
1173 done:
1174         return(error);
1175 }
1176
1177 /*
1178  * Add the record to the inode's rec_tree.  The low 32 bits of a directory
1179  * entry's key is used to deal with hash collisions in the upper 32 bits.
1180  * A unique 64 bit key is generated in-memory and may be regenerated a
1181  * second time when the directory record is flushed to the on-disk B-Tree.
1182  *
1183  * A referenced record is passed to this function.  This function
1184  * eats the reference.  If an error occurs the record will be deleted.
1185  *
1186  * A copy of the temporary record->data pointer provided by the caller
1187  * will be made.
1188  */
1189 static
1190 int
1191 hammer_mem_add(hammer_record_t record)
1192 {
1193         hammer_mount_t hmp = record->ip->hmp;
1194
1195         /*
1196          * Make a private copy of record->data
1197          */
1198         if (record->data)
1199                 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1200
1201         /*
1202          * Insert into the RB tree.  A unique key should have already
1203          * been selected if this is a directory entry.
1204          */
1205         if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1206                 record->flags |= HAMMER_RECF_DELETED_FE;
1207                 hammer_rel_mem_record(record);
1208                 return (EEXIST);
1209         }
1210         ++hmp->count_newrecords;
1211         ++hmp->rsv_recs;
1212         ++record->ip->rsv_recs;
1213         record->ip->hmp->rsv_databytes += record->leaf.data_len;
1214         record->flags |= HAMMER_RECF_ONRBTREE;
1215         hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1216         hammer_rel_mem_record(record);
1217         return(0);
1218 }
1219
1220 /************************************************************************
1221  *                   HAMMER INODE MERGED-RECORD FUNCTIONS               *
1222  ************************************************************************
1223  *
1224  * These functions augment the B-Tree scanning functions in hammer_btree.c
1225  * by merging in-memory records with on-disk records.
1226  */
1227
1228 /*
1229  * Locate a particular record either in-memory or on-disk.
1230  *
1231  * NOTE: This is basically a standalone routine, hammer_ip_next() may
1232  * NOT be called to iterate results.
1233  */
1234 int
1235 hammer_ip_lookup(hammer_cursor_t cursor)
1236 {
1237         int error;
1238
1239         /*
1240          * If the element is in-memory return it without searching the
1241          * on-disk B-Tree
1242          */
1243         KKASSERT(cursor->ip);
1244         error = hammer_mem_lookup(cursor);
1245         if (error == 0) {
1246                 cursor->leaf = &cursor->iprec->leaf;
1247                 return(error);
1248         }
1249         if (error != ENOENT)
1250                 return(error);
1251
1252         /*
1253          * If the inode has on-disk components search the on-disk B-Tree.
1254          */
1255         if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1256                 return(error);
1257         error = hammer_btree_lookup(cursor);
1258         if (error == 0)
1259                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1260         return(error);
1261 }
1262
1263 /*
1264  * Locate the first record within the cursor's key_beg/key_end range,
1265  * restricted to a particular inode.  0 is returned on success, ENOENT
1266  * if no records matched the requested range, or some other error.
1267  *
1268  * When 0 is returned hammer_ip_next() may be used to iterate additional
1269  * records within the requested range.
1270  *
1271  * This function can return EDEADLK, requiring the caller to terminate
1272  * the cursor and try again.
1273  */
1274 int
1275 hammer_ip_first(hammer_cursor_t cursor)
1276 {
1277         hammer_inode_t ip = cursor->ip;
1278         int error;
1279
1280         KKASSERT(ip != NULL);
1281
1282         /*
1283          * Clean up fields and setup for merged scan
1284          */
1285         cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1286         cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1287         cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1288         if (cursor->iprec) {
1289                 hammer_rel_mem_record(cursor->iprec);
1290                 cursor->iprec = NULL;
1291         }
1292
1293         /*
1294          * Search the on-disk B-Tree.  hammer_btree_lookup() only does an
1295          * exact lookup so if we get ENOENT we have to call the iterate
1296          * function to validate the first record after the begin key.
1297          *
1298          * The ATEDISK flag is used by hammer_btree_iterate to determine
1299          * whether it must index forwards or not.  It is also used here
1300          * to select the next record from in-memory or on-disk.
1301          *
1302          * EDEADLK can only occur if the lookup hit an empty internal
1303          * element and couldn't delete it.  Since this could only occur
1304          * in-range, we can just iterate from the failure point.
1305          */
1306         if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1307                 error = hammer_btree_lookup(cursor);
1308                 if (error == ENOENT || error == EDEADLK) {
1309                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1310                         if (hammer_debug_general & 0x2000)
1311                                 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1312                         error = hammer_btree_iterate(cursor);
1313                 }
1314                 if (error && error != ENOENT) 
1315                         return(error);
1316                 if (error == 0) {
1317                         cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1318                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1319                 } else {
1320                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1321                 }
1322         }
1323
1324         /*
1325          * Search the in-memory record list (Red-Black tree).  Unlike the
1326          * B-Tree search, mem_first checks for records in the range.
1327          */
1328         error = hammer_mem_first(cursor);
1329         if (error && error != ENOENT)
1330                 return(error);
1331         if (error == 0) {
1332                 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1333                 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1334                 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1335                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1336         }
1337
1338         /*
1339          * This will return the first matching record.
1340          */
1341         return(hammer_ip_next(cursor));
1342 }
1343
1344 /*
1345  * Retrieve the next record in a merged iteration within the bounds of the
1346  * cursor.  This call may be made multiple times after the cursor has been
1347  * initially searched with hammer_ip_first().
1348  *
1349  * 0 is returned on success, ENOENT if no further records match the
1350  * requested range, or some other error code is returned.
1351  */
1352 int
1353 hammer_ip_next(hammer_cursor_t cursor)
1354 {
1355         hammer_btree_elm_t elm;
1356         hammer_record_t rec, save;
1357         int error;
1358         int r;
1359
1360 next_btree:
1361         /*
1362          * Load the current on-disk and in-memory record.  If we ate any
1363          * records we have to get the next one. 
1364          *
1365          * If we deleted the last on-disk record we had scanned ATEDISK will
1366          * be clear and DELBTREE will be set, forcing a call to iterate. The
1367          * fact that ATEDISK is clear causes iterate to re-test the 'current'
1368          * element.  If ATEDISK is set, iterate will skip the 'current'
1369          * element.
1370          *
1371          * Get the next on-disk record
1372          */
1373         if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1374                 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1375                         error = hammer_btree_iterate(cursor);
1376                         cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1377                         if (error == 0) {
1378                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1379                                 hammer_cache_node(&cursor->ip->cache[1],
1380                                                   cursor->node);
1381                         } else {
1382                                 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1383                                                  HAMMER_CURSOR_ATEDISK;
1384                         }
1385                 }
1386         }
1387
1388 next_memory:
1389         /*
1390          * Get the next in-memory record.  The record can be ripped out
1391          * of the RB tree so we maintain a scan_info structure to track
1392          * the next node.
1393          *
1394          * hammer_rec_scan_cmp:  Is the record still in our general range,
1395          *                       (non-inclusive of snapshot exclusions)?
1396          * hammer_rec_scan_callback: Is the record in our snapshot?
1397          */
1398         if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1399                 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1400                         save = cursor->iprec;
1401                         cursor->iprec = NULL;
1402                         rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1403                         while (rec) {
1404                                 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1405                                         break;
1406                                 if (hammer_rec_scan_callback(rec, cursor) != 0)
1407                                         break;
1408                                 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1409                         }
1410                         if (save)
1411                                 hammer_rel_mem_record(save);
1412                         if (cursor->iprec) {
1413                                 KKASSERT(cursor->iprec == rec);
1414                                 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1415                         } else {
1416                                 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1417                         }
1418                 }
1419         }
1420
1421         /*
1422          * The memory record may have become stale while being held in
1423          * cursor->iprec.  We are interlocked against the backend on 
1424          * with regards to B-Tree entries.
1425          */
1426         if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1427                 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1428                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1429                         goto next_memory;
1430                 }
1431         }
1432
1433         /*
1434          * Extract either the disk or memory record depending on their
1435          * relative position.
1436          */
1437         error = 0;
1438         switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1439         case 0:
1440                 /*
1441                  * Both entries valid.   Compare the entries and nominally
1442                  * return the first one in the sort order.  Numerous cases
1443                  * require special attention, however.
1444                  */
1445                 elm = &cursor->node->ondisk->elms[cursor->index];
1446                 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1447
1448                 /*
1449                  * If the two entries differ only by their key (-2/2) or
1450                  * create_tid (-1/1), and are DATA records, we may have a
1451                  * nominal match.  We have to calculate the base file
1452                  * offset of the data.
1453                  */
1454                 if (r <= 2 && r >= -2 && r != 0 &&
1455                     cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1456                     cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1457                         int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1458                         int64_t base2 = cursor->iprec->leaf.base.key -
1459                                         cursor->iprec->leaf.data_len;
1460                         if (base1 == base2)
1461                                 r = 0;
1462                 }
1463
1464                 if (r < 0) {
1465                         error = hammer_btree_extract(cursor,
1466                                                      HAMMER_CURSOR_GET_LEAF);
1467                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1468                         break;
1469                 }
1470
1471                 /*
1472                  * If the entries match exactly the memory entry is either
1473                  * an on-disk directory entry deletion or a bulk data
1474                  * overwrite.  If it is a directory entry deletion we eat
1475                  * both entries.
1476                  *
1477                  * For the bulk-data overwrite case it is possible to have
1478                  * visibility into both, which simply means the syncer
1479                  * hasn't gotten around to doing the delete+insert sequence
1480                  * on the B-Tree.  Use the memory entry and throw away the
1481                  * on-disk entry.
1482                  *
1483                  * If the in-memory record is not either of these we
1484                  * probably caught the syncer while it was syncing it to
1485                  * the media.  Since we hold a shared lock on the cursor,
1486                  * the in-memory record had better be marked deleted at
1487                  * this point.
1488                  */
1489                 if (r == 0) {
1490                         if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1491                                 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1492                                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1493                                         cursor->flags |= HAMMER_CURSOR_ATEMEM;
1494                                         goto next_btree;
1495                                 }
1496                         } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1497                                 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1498                                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1499                                 }
1500                                 /* fall through to memory entry */
1501                         } else {
1502                                 panic("hammer_ip_next: duplicate mem/b-tree entry");
1503                                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1504                                 goto next_memory;
1505                         }
1506                 }
1507                 /* fall through to the memory entry */
1508         case HAMMER_CURSOR_ATEDISK:
1509                 /*
1510                  * Only the memory entry is valid.
1511                  */
1512                 cursor->leaf = &cursor->iprec->leaf;
1513                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1514
1515                 /*
1516                  * If the memory entry is an on-disk deletion we should have
1517                  * also had found a B-Tree record.  If the backend beat us
1518                  * to it it would have interlocked the cursor and we should
1519                  * have seen the in-memory record marked DELETED_FE.
1520                  */
1521                 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1522                     (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1523                         panic("hammer_ip_next: del-on-disk with no b-tree entry");
1524                 }
1525                 break;
1526         case HAMMER_CURSOR_ATEMEM:
1527                 /*
1528                  * Only the disk entry is valid
1529                  */
1530                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1531                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1532                 break;
1533         default:
1534                 /*
1535                  * Neither entry is valid
1536                  *
1537                  * XXX error not set properly
1538                  */
1539                 cursor->leaf = NULL;
1540                 error = ENOENT;
1541                 break;
1542         }
1543         return(error);
1544 }
1545
1546 /*
1547  * Resolve the cursor->data pointer for the current cursor position in
1548  * a merged iteration.
1549  */
1550 int
1551 hammer_ip_resolve_data(hammer_cursor_t cursor)
1552 {
1553         hammer_record_t record;
1554         int error;
1555
1556         if (hammer_cursor_inmem(cursor)) {
1557                 /*
1558                  * The data associated with an in-memory record is usually
1559                  * kmalloced, but reserve-ahead data records will have an
1560                  * on-disk reference.
1561                  *
1562                  * NOTE: Reserve-ahead data records must be handled in the
1563                  * context of the related high level buffer cache buffer
1564                  * to interlock against async writes.
1565                  */
1566                 record = cursor->iprec;
1567                 cursor->data = record->data;
1568                 error = 0;
1569                 if (cursor->data == NULL) {
1570                         KKASSERT(record->leaf.base.rec_type ==
1571                                  HAMMER_RECTYPE_DATA);
1572                         cursor->data = hammer_bread_ext(cursor->trans->hmp,
1573                                                     record->leaf.data_offset,
1574                                                     record->leaf.data_len,
1575                                                     &error,
1576                                                     &cursor->data_buffer);
1577                 }
1578         } else {
1579                 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1580                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1581         }
1582         return(error);
1583 }
1584
1585 /*
1586  * Backend truncation / record replacement - delete records in range.
1587  *
1588  * Delete all records within the specified range for inode ip.  In-memory
1589  * records still associated with the frontend are ignored. 
1590  *
1591  * If truncating is non-zero in-memory records associated with the back-end
1592  * are ignored.  If truncating is > 1 we can return EWOULDBLOCK.
1593  *
1594  * NOTES:
1595  *
1596  *      * An unaligned range will cause new records to be added to cover
1597  *        the edge cases. (XXX not implemented yet).
1598  *
1599  *      * Replacement via reservations (see hammer_ip_sync_record_cursor())
1600  *        also do not deal with unaligned ranges.
1601  *
1602  *      * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1603  *
1604  *      * Record keys for regular file data have to be special-cased since
1605  *        they indicate the end of the range (key = base + bytes).
1606  *
1607  *      * This function may be asked to delete ridiculously huge ranges, for
1608  *        example if someone truncates or removes a 1TB regular file.  We
1609  *        must be very careful on restarts and we may have to stop w/
1610  *        EWOULDBLOCK to avoid blowing out the buffer cache.
1611  */
1612 int
1613 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1614                        int64_t ran_beg, int64_t ran_end, int truncating)
1615 {
1616         hammer_transaction_t trans = cursor->trans;
1617         hammer_btree_leaf_elm_t leaf;
1618         int error;
1619         int64_t off;
1620         int64_t tmp64;
1621
1622 #if 0
1623         kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1624 #endif
1625
1626         KKASSERT(trans->type == HAMMER_TRANS_FLS);
1627 retry:
1628         hammer_normalize_cursor(cursor);
1629         cursor->key_beg.localization = ip->obj_localization +
1630                                        HAMMER_LOCALIZE_MISC;
1631         cursor->key_beg.obj_id = ip->obj_id;
1632         cursor->key_beg.create_tid = 0;
1633         cursor->key_beg.delete_tid = 0;
1634         cursor->key_beg.obj_type = 0;
1635
1636         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1637                 cursor->key_beg.key = ran_beg;
1638                 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1639         } else {
1640                 /*
1641                  * The key in the B-Tree is (base+bytes), so the first possible
1642                  * matching key is ran_beg + 1.
1643                  */
1644                 cursor->key_beg.key = ran_beg + 1;
1645                 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1646         }
1647
1648         cursor->key_end = cursor->key_beg;
1649         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1650                 cursor->key_end.key = ran_end;
1651         } else {
1652                 tmp64 = ran_end + MAXPHYS + 1;  /* work around GCC-4 bug */
1653                 if (tmp64 < ran_end)
1654                         cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1655                 else
1656                         cursor->key_end.key = ran_end + MAXPHYS + 1;
1657         }
1658
1659         cursor->asof = ip->obj_asof;
1660         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1661         cursor->flags |= HAMMER_CURSOR_ASOF;
1662         cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1663         cursor->flags |= HAMMER_CURSOR_BACKEND;
1664         cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1665
1666         error = hammer_ip_first(cursor);
1667
1668         /*
1669          * Iterate through matching records and mark them as deleted.
1670          */
1671         while (error == 0) {
1672                 leaf = cursor->leaf;
1673
1674                 KKASSERT(leaf->base.delete_tid == 0);
1675
1676                 /*
1677                  * There may be overlap cases for regular file data.  Also
1678                  * remember the key for a regular file record is (base + len),
1679                  * NOT (base).
1680                  */
1681                 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1682                         off = leaf->base.key - leaf->data_len;
1683                         /*
1684                          * Check the left edge case.  We currently do not
1685                          * split existing records.
1686                          */
1687                         if (off < ran_beg) {
1688                                 panic("hammer left edge case %016llx %d\n",
1689                                         leaf->base.key, leaf->data_len);
1690                         }
1691
1692                         /*
1693                          * Check the right edge case.  Note that the
1694                          * record can be completely out of bounds, which
1695                          * terminates the search.
1696                          *
1697                          * base->key is exclusive of the right edge while
1698                          * ran_end is inclusive of the right edge.  The
1699                          * (key - data_len) left boundary is inclusive.
1700                          *
1701                          * XXX theory-check this test at some point, are
1702                          * we missing a + 1 somewhere?  Note that ran_end
1703                          * could overflow.
1704                          */
1705                         if (leaf->base.key - 1 > ran_end) {
1706                                 if (leaf->base.key - leaf->data_len > ran_end)
1707                                         break;
1708                                 panic("hammer right edge case\n");
1709                         }
1710                 } else {
1711                         off = leaf->base.key;
1712                 }
1713
1714                 /*
1715                  * Delete the record.  When truncating we do not delete
1716                  * in-memory (data) records because they represent data
1717                  * written after the truncation.
1718                  *
1719                  * This will also physically destroy the B-Tree entry and
1720                  * data if the retention policy dictates.  The function
1721                  * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1722                  * uses to perform a fixup.
1723                  */
1724                 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
1725                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1726                         /*
1727                          * If we have built up too many meta-buffers we risk
1728                          * deadlocking the kernel and must stop.  This can
1729                          * occur when deleting ridiculously huge files.
1730                          * sync_trunc_off is updated so the next cycle does
1731                          * not re-iterate records we have already deleted.
1732                          *
1733                          * This is only done with formal truncations.
1734                          */
1735                         if (truncating > 1 && error == 0 &&
1736                             hammer_flusher_meta_limit(ip->hmp)) {
1737                                 ip->sync_trunc_off = off;
1738                                 error = EWOULDBLOCK;
1739                         }
1740                 }
1741                 if (error)
1742                         break;
1743                 ran_beg = off;  /* for restart */
1744                 error = hammer_ip_next(cursor);
1745         }
1746         if (cursor->node)
1747                 hammer_cache_node(&ip->cache[1], cursor->node);
1748
1749         if (error == EDEADLK) {
1750                 hammer_done_cursor(cursor);
1751                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1752                 if (error == 0)
1753                         goto retry;
1754         }
1755         if (error == ENOENT)
1756                 error = 0;
1757         return(error);
1758 }
1759
1760 /*
1761  * This backend function deletes the specified record on-disk, similar to
1762  * delete_range but for a specific record.  Unlike the exact deletions
1763  * used when deleting a directory entry this function uses an ASOF search 
1764  * like delete_range.
1765  *
1766  * This function may be called with ip->obj_asof set for a slave snapshot,
1767  * so don't use it.  We always delete non-historical records only.
1768  */
1769 static int
1770 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
1771                       hammer_btree_leaf_elm_t leaf)
1772 {
1773         hammer_transaction_t trans = cursor->trans;
1774         int error;
1775
1776         KKASSERT(trans->type == HAMMER_TRANS_FLS);
1777 retry:
1778         hammer_normalize_cursor(cursor);
1779         cursor->key_beg = leaf->base;
1780         cursor->asof = HAMMER_MAX_TID;
1781         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1782         cursor->flags |= HAMMER_CURSOR_ASOF;
1783         cursor->flags |= HAMMER_CURSOR_BACKEND;
1784         cursor->flags &= ~HAMMER_CURSOR_INSERT;
1785
1786         error = hammer_btree_lookup(cursor);
1787         if (error == 0) {
1788                 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1789         }
1790         if (error == EDEADLK) {
1791                 hammer_done_cursor(cursor);
1792                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1793                 if (error == 0)
1794                         goto retry;
1795         }
1796         return(error);
1797 }
1798
1799 /*
1800  * This function deletes remaining auxillary records when an inode is
1801  * being deleted.  This function explicitly does not delete the
1802  * inode record, directory entry, data, or db records.  Those must be
1803  * properly disposed of prior to this call.
1804  */
1805 int
1806 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
1807 {
1808         hammer_transaction_t trans = cursor->trans;
1809         hammer_btree_leaf_elm_t leaf;
1810         int error;
1811
1812         KKASSERT(trans->type == HAMMER_TRANS_FLS);
1813 retry:
1814         hammer_normalize_cursor(cursor);
1815         cursor->key_beg.localization = ip->obj_localization +
1816                                        HAMMER_LOCALIZE_MISC;
1817         cursor->key_beg.obj_id = ip->obj_id;
1818         cursor->key_beg.create_tid = 0;
1819         cursor->key_beg.delete_tid = 0;
1820         cursor->key_beg.obj_type = 0;
1821         cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
1822         cursor->key_beg.key = HAMMER_MIN_KEY;
1823
1824         cursor->key_end = cursor->key_beg;
1825         cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
1826         cursor->key_end.key = HAMMER_MAX_KEY;
1827
1828         cursor->asof = ip->obj_asof;
1829         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1830         cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1831         cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1832         cursor->flags |= HAMMER_CURSOR_BACKEND;
1833
1834         error = hammer_ip_first(cursor);
1835
1836         /*
1837          * Iterate through matching records and mark them as deleted.
1838          */
1839         while (error == 0) {
1840                 leaf = cursor->leaf;
1841
1842                 KKASSERT(leaf->base.delete_tid == 0);
1843
1844                 /*
1845                  * Mark the record and B-Tree entry as deleted.  This will
1846                  * also physically delete the B-Tree entry, record, and
1847                  * data if the retention policy dictates.  The function
1848                  * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1849                  * uses to perform a fixup.
1850                  *
1851                  * Directory entries (and delete-on-disk directory entries)
1852                  * must be synced and cannot be deleted.
1853                  */
1854                 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1855                 ++*countp;
1856                 if (error)
1857                         break;
1858                 error = hammer_ip_next(cursor);
1859         }
1860         if (cursor->node)
1861                 hammer_cache_node(&ip->cache[1], cursor->node);
1862         if (error == EDEADLK) {
1863                 hammer_done_cursor(cursor);
1864                 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1865                 if (error == 0)
1866                         goto retry;
1867         }
1868         if (error == ENOENT)
1869                 error = 0;
1870         return(error);
1871 }
1872
1873 /*
1874  * Delete the record at the current cursor.  On success the cursor will
1875  * be positioned appropriately for an iteration but may no longer be at
1876  * a leaf node.
1877  *
1878  * This routine is only called from the backend.
1879  *
1880  * NOTE: This can return EDEADLK, requiring the caller to terminate the
1881  * cursor and retry.
1882  */
1883 int
1884 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1885                         hammer_tid_t tid)
1886 {
1887         hammer_off_t zone2_offset;
1888         hammer_record_t iprec;
1889         hammer_btree_elm_t elm;
1890         hammer_mount_t hmp;
1891         int error;
1892
1893         KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1894         KKASSERT(tid != 0);
1895         hmp = cursor->node->hmp;
1896
1897         /*
1898          * In-memory (unsynchronized) records can simply be freed.  This
1899          * only occurs in range iterations since all other records are
1900          * individually synchronized.  Thus there should be no confusion with
1901          * the interlock.
1902          *
1903          * An in-memory record may be deleted before being committed to disk,
1904          * but could have been accessed in the mean time.  The backing store
1905          * may never been marked allocated and so hammer_blockmap_free() may
1906          * never get called on it.  Because of this we have to make sure that
1907          * we've gotten rid of any related hammer_buffer or buffer cache
1908          * buffer.
1909          */
1910         if (hammer_cursor_inmem(cursor)) {
1911                 iprec = cursor->iprec;
1912                 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1913                 iprec->flags |= HAMMER_RECF_DELETED_FE;
1914                 iprec->flags |= HAMMER_RECF_DELETED_BE;
1915
1916                 if (iprec->leaf.data_offset && iprec->leaf.data_len) {
1917                         zone2_offset = hammer_blockmap_lookup(hmp, iprec->leaf.data_offset, &error);
1918                         KKASSERT(error == 0);
1919                         hammer_del_buffers(hmp,
1920                                            iprec->leaf.data_offset,
1921                                            zone2_offset,
1922                                            iprec->leaf.data_len);
1923                 }
1924                 return(0);
1925         }
1926
1927         /*
1928          * On-disk records are marked as deleted by updating their delete_tid.
1929          * This does not effect their position in the B-Tree (which is based
1930          * on their create_tid).
1931          */
1932         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1933         elm = NULL;
1934
1935         if (error == 0) {
1936                 error = hammer_delete_at_cursor(
1937                                 cursor,
1938                                 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
1939                                 NULL);
1940         }
1941         return(error);
1942 }
1943
1944 /*
1945  * Delete the B-Tree element at the current cursor and do any necessary
1946  * mirror propagation.
1947  *
1948  * The cursor must be properly positioned for an iteration on return but
1949  * may be pointing at an internal element.
1950  */
1951 int
1952 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1953                         int64_t *stat_bytes)
1954 {
1955         struct hammer_btree_leaf_elm save_leaf;
1956         hammer_btree_leaf_elm_t leaf;
1957         hammer_node_t node;
1958         hammer_btree_elm_t elm;
1959         hammer_off_t data_offset;
1960         int32_t data_len;
1961         u_int16_t rec_type;
1962         int error;
1963         int doprop;
1964
1965         error = hammer_cursor_upgrade(cursor);
1966         if (error)
1967                 return(error);
1968
1969         node = cursor->node;
1970         elm = &node->ondisk->elms[cursor->index];
1971         leaf = &elm->leaf;
1972         KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1973
1974         /*
1975          * Adjust the delete_tid.  Update the mirror_tid propagation field
1976          * as well.
1977          */
1978         doprop = 0;
1979         if (delete_flags & HAMMER_DELETE_ADJUST) {
1980                 hammer_modify_node(cursor->trans, node, elm, sizeof(*elm));
1981                 elm->leaf.base.delete_tid = cursor->trans->tid;
1982                 elm->leaf.delete_ts = cursor->trans->time32;
1983                 hammer_modify_node_done(node);
1984
1985                 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
1986                         hammer_modify_node_field(cursor->trans, node, mirror_tid);
1987                         node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
1988                         hammer_modify_node_done(node);
1989                         doprop = 1;
1990                 }
1991
1992                 /*
1993                  * Adjust for the iteration.  We have deleted the current
1994                  * element and want to clear ATEDISK so the iteration does
1995                  * not skip the element after, which now becomes the current
1996                  * element.
1997                  */
1998                 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1999                         cursor->flags |= HAMMER_CURSOR_DELBTREE;
2000                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2001                 }
2002
2003                 /*
2004                  * An on-disk record cannot have the same delete_tid
2005                  * as its create_tid.  In a chain of record updates
2006                  * this could result in a duplicate record.
2007                  */
2008                 KKASSERT(elm->leaf.base.delete_tid !=
2009                          elm->leaf.base.create_tid);
2010         }
2011
2012         /*
2013          * Destroy the B-Tree element if asked (typically if a nohistory
2014          * file or mount, or when called by the pruning code).
2015          *
2016          * Adjust the ATEDISK flag to properly support iterations.
2017          */
2018         if (delete_flags & HAMMER_DELETE_DESTROY) {
2019                 data_offset = elm->leaf.data_offset;
2020                 data_len = elm->leaf.data_len;
2021                 rec_type = elm->leaf.base.rec_type;
2022                 if (doprop) {
2023                         save_leaf = elm->leaf;
2024                         leaf = &save_leaf;
2025                 }
2026
2027                 error = hammer_btree_delete(cursor);
2028                 if (error == 0) {
2029                         /*
2030                          * This forces a fixup for the iteration because
2031                          * the cursor is now either sitting at the 'next'
2032                          * element or sitting at the end of a leaf.
2033                          */
2034                         if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2035                                 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2036                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2037                         }
2038                 }
2039                 if (error == 0) {
2040                         switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2041                         case HAMMER_ZONE_LARGE_DATA:
2042                         case HAMMER_ZONE_SMALL_DATA:
2043                         case HAMMER_ZONE_META:
2044                                 hammer_blockmap_free(cursor->trans,
2045                                                      data_offset, data_len);
2046                                 break;
2047                         default:
2048                                 break;
2049                         }
2050                 }
2051         }
2052
2053         /*
2054          * mirror_tid propagation occurs if the node's mirror_tid had to be
2055          * updated while adjusting the delete_tid.
2056          *
2057          * This occurs when deleting even in nohistory mode, but does not
2058          * occur when pruning an already-deleted node.
2059          */
2060         if (doprop) {
2061                 KKASSERT(cursor->ip != NULL);
2062                 hammer_btree_do_propagation(cursor, cursor->ip, leaf);
2063         }
2064         return (error);
2065 }
2066
2067 /*
2068  * Determine whether we can remove a directory.  This routine checks whether
2069  * a directory is empty or not and enforces flush connectivity.
2070  *
2071  * Flush connectivity requires that we block if the target directory is
2072  * currently flushing, otherwise it may not end up in the same flush group.
2073  *
2074  * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2075  */
2076 int
2077 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2078 {
2079         struct hammer_cursor cursor;
2080         int error;
2081
2082         /*
2083          * Check directory empty
2084          */
2085         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2086
2087         cursor.key_beg.localization = ip->obj_localization +
2088                                       HAMMER_LOCALIZE_MISC;
2089         cursor.key_beg.obj_id = ip->obj_id;
2090         cursor.key_beg.create_tid = 0;
2091         cursor.key_beg.delete_tid = 0;
2092         cursor.key_beg.obj_type = 0;
2093         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2094         cursor.key_beg.key = HAMMER_MIN_KEY;
2095
2096         cursor.key_end = cursor.key_beg;
2097         cursor.key_end.rec_type = 0xFFFF;
2098         cursor.key_end.key = HAMMER_MAX_KEY;
2099
2100         cursor.asof = ip->obj_asof;
2101         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2102
2103         error = hammer_ip_first(&cursor);
2104         if (error == ENOENT)
2105                 error = 0;
2106         else if (error == 0)
2107                 error = ENOTEMPTY;
2108         hammer_done_cursor(&cursor);
2109         return(error);
2110 }
2111