HAMMER 33C/many: features and bug fixes.
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.37 2008/03/20 06:08:40 dillon Exp $
35  */
36
37 #include "hammer.h"
38
39 static int hammer_mem_add(hammer_transaction_t trans, hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor, hammer_inode_t ip);
41 static int hammer_mem_first(hammer_cursor_t cursor, hammer_inode_t ip);
42
43 /*
44  * Red-black tree support.
45  */
46 static int
47 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
48 {
49         if (rec1->rec.base.base.rec_type < rec2->rec.base.base.rec_type)
50                 return(-1);
51         if (rec1->rec.base.base.rec_type > rec2->rec.base.base.rec_type)
52                 return(1);
53
54         if (rec1->rec.base.base.key < rec2->rec.base.base.key)
55                 return(-1);
56         if (rec1->rec.base.base.key > rec2->rec.base.base.key)
57                 return(1);
58
59         if (rec1->rec.base.base.create_tid == 0) {
60                 if (rec2->rec.base.base.create_tid == 0)
61                         return(0);
62                 return(1);
63         }
64         if (rec2->rec.base.base.create_tid == 0)
65                 return(-1);
66
67         if (rec1->rec.base.base.create_tid < rec2->rec.base.base.create_tid)
68                 return(-1);
69         if (rec1->rec.base.base.create_tid > rec2->rec.base.base.create_tid)
70                 return(1);
71         return(0);
72 }
73
74 static int
75 hammer_rec_compare(hammer_base_elm_t info, hammer_record_t rec)
76 {
77         if (info->rec_type < rec->rec.base.base.rec_type)
78                 return(-3);
79         if (info->rec_type > rec->rec.base.base.rec_type)
80                 return(3);
81
82         if (info->key < rec->rec.base.base.key)
83                 return(-2);
84         if (info->key > rec->rec.base.base.key)
85                 return(2);
86
87         if (info->create_tid == 0) {
88                 if (rec->rec.base.base.create_tid == 0)
89                         return(0);
90                 return(1);
91         }
92         if (rec->rec.base.base.create_tid == 0)
93                 return(-1);
94         if (info->create_tid < rec->rec.base.base.create_tid)
95                 return(-1);
96         if (info->create_tid > rec->rec.base.base.create_tid)
97                 return(1);
98         return(0);
99 }
100
101 /*
102  * RB_SCAN comparison code for hammer_mem_first().  The argument order
103  * is reversed so the comparison result has to be negated.  key_beg and
104  * key_end are both range-inclusive.
105  *
106  * The creation timestamp can cause hammer_rec_compare() to return -1 or +1.
107  * These do not stop the scan.
108  *
109  * Localized deletions are not cached in-memory.
110  */
111 static
112 int
113 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
114 {
115         hammer_cursor_t cursor = data;
116         int r;
117
118         r = hammer_rec_compare(&cursor->key_beg, rec);
119         if (r > 1)
120                 return(-1);
121         r = hammer_rec_compare(&cursor->key_end, rec);
122         if (r < -1)
123                 return(1);
124         return(0);
125 }
126
127 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
128 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
129                     hammer_rec_compare, hammer_base_elm_t);
130
131 /*
132  * Allocate a record for the caller to finish filling in.  The record is
133  * returned referenced.
134  */
135 hammer_record_t
136 hammer_alloc_mem_record(hammer_inode_t ip)
137 {
138         hammer_record_t record;
139
140         ++hammer_count_records;
141         record = kmalloc(sizeof(*record), M_HAMMER, M_WAITOK|M_ZERO);
142         record->ip = ip;
143         record->rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
144         hammer_ref(&record->lock);
145         return (record);
146 }
147
148 /*
149  * Release a memory record.  Records marked for deletion are immediately
150  * removed from the RB-Tree but otherwise left intact until the last ref
151  * goes away.
152  */
153 void
154 hammer_rel_mem_record(struct hammer_record *record)
155 {
156         hammer_unref(&record->lock);
157
158         if (record->flags & HAMMER_RECF_DELETED) {
159                 if (record->flags & HAMMER_RECF_ONRBTREE) {
160                         RB_REMOVE(hammer_rec_rb_tree, &record->ip->rec_tree,
161                                   record);
162                         record->flags &= ~HAMMER_RECF_ONRBTREE;
163                 }
164                 if (record->lock.refs == 0) {
165                         if (record->flags & HAMMER_RECF_ALLOCDATA) {
166                                 --hammer_count_record_datas;
167                                 kfree(record->data, M_HAMMER);
168                                 record->flags &= ~HAMMER_RECF_ALLOCDATA;
169                         }
170                         record->data = NULL;
171                         --hammer_count_records;
172                         kfree(record, M_HAMMER);
173                         return;
174                 }
175         }
176
177         /*
178          * If someone wanted the record wake them up.
179          */
180         if (record->flags & HAMMER_RECF_WANTED) {
181                 record->flags &= ~HAMMER_RECF_WANTED;
182                 wakeup(record);
183         }
184 }
185
186 /*
187  * Lookup an in-memory record given the key specified in the cursor.  Works
188  * just like hammer_btree_lookup() but operates on an inode's in-memory
189  * record list.
190  *
191  * The lookup must fail if the record is marked for deferred deletion.
192  */
193 static
194 int
195 hammer_mem_lookup(hammer_cursor_t cursor, hammer_inode_t ip)
196 {
197         int error;
198
199         if (cursor->iprec) {
200                 hammer_rel_mem_record(cursor->iprec);
201                 cursor->iprec = NULL;
202         }
203         if (cursor->ip) {
204                 hammer_rec_rb_tree_scan_info_done(&cursor->scan,
205                                                   &cursor->ip->rec_tree);
206         }
207         cursor->ip = ip;
208         hammer_rec_rb_tree_scan_info_link(&cursor->scan, &ip->rec_tree);
209         cursor->scan.node = NULL;
210         cursor->iprec = hammer_rec_rb_tree_RB_LOOKUP_INFO(
211                                 &ip->rec_tree, &cursor->key_beg);
212         if (cursor->iprec == NULL) {
213                 error = ENOENT;
214         } else {
215                 hammer_ref(&cursor->iprec->lock);
216                 error = 0;
217         }
218         return(error);
219 }
220
221 /*
222  * hammer_mem_first() - locate the first in-memory record matching the
223  * cursor.
224  *
225  * The RB_SCAN function we use is designed as a callback.  We terminate it
226  * (return -1) as soon as we get a match.
227  */
228 static
229 int
230 hammer_rec_scan_callback(hammer_record_t rec, void *data)
231 {
232         hammer_cursor_t cursor = data;
233
234         /*
235          * We terminate on success, so this should be NULL on entry.
236          */
237         KKASSERT(cursor->iprec == NULL);
238
239         /*
240          * Skip if the record was marked deleted
241          */
242         if (rec->flags & HAMMER_RECF_DELETED)
243                 return(0);
244
245         /*
246          * Skip if not visible due to our as-of TID
247          */
248         if (cursor->flags & HAMMER_CURSOR_ASOF) {
249                 if (cursor->asof < rec->rec.base.base.create_tid)
250                         return(0);
251                 if (rec->rec.base.base.delete_tid &&
252                     cursor->asof >= rec->rec.base.base.delete_tid) {
253                         return(0);
254                 }
255         }
256
257         /*
258          * Block if currently being synchronized to disk, otherwise we
259          * may get a duplicate.  Wakeup the syncer if it's stuck on
260          * the record.
261          */
262         hammer_ref(&rec->lock);
263         ++rec->blocked;
264         while (rec->flags & HAMMER_RECF_SYNCING) {
265                 rec->flags |= HAMMER_RECF_WANTED;
266                 tsleep(rec, 0, "hmrrc2", 0);
267         }
268         --rec->blocked;
269
270         /*
271          * The record may have been deleted while we were blocked.
272          */
273         if (rec->flags & HAMMER_RECF_DELETED) {
274                 hammer_rel_mem_record(cursor->iprec);
275                 return(0);
276         }
277
278         /*
279          * Set the matching record and stop the scan.
280          */
281         cursor->iprec = rec;
282         return(-1);
283 }
284
285 static
286 int
287 hammer_mem_first(hammer_cursor_t cursor, hammer_inode_t ip)
288 {
289         if (cursor->iprec) {
290                 hammer_rel_mem_record(cursor->iprec);
291                 cursor->iprec = NULL;
292         }
293         if (cursor->ip) {
294                 hammer_rec_rb_tree_scan_info_done(&cursor->scan,
295                                                   &cursor->ip->rec_tree);
296         }
297         cursor->ip = ip;
298         hammer_rec_rb_tree_scan_info_link(&cursor->scan, &ip->rec_tree);
299
300         cursor->scan.node = NULL;
301         hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
302                                    hammer_rec_scan_callback, cursor);
303
304         /*
305          * Adjust scan.node and keep it linked into the RB-tree so we can
306          * hold the cursor through third party modifications of the RB-tree.
307          */
308         if (cursor->iprec) {
309                 cursor->scan.node = hammer_rec_rb_tree_RB_NEXT(cursor->iprec);
310                 return(0);
311         }
312         return(ENOENT);
313 }
314
315 void
316 hammer_mem_done(hammer_cursor_t cursor)
317 {
318         if (cursor->ip) {
319                 hammer_rec_rb_tree_scan_info_done(&cursor->scan,
320                                                   &cursor->ip->rec_tree);
321                 cursor->ip = NULL;
322         }
323         if (cursor->iprec) {
324                 hammer_rel_mem_record(cursor->iprec);
325                 cursor->iprec = NULL;
326         }
327 }
328
329 /************************************************************************
330  *                   HAMMER IN-MEMORY RECORD FUNCTIONS                  *
331  ************************************************************************
332  *
333  * These functions manipulate in-memory records.  Such records typically
334  * exist prior to being committed to disk or indexed via the on-disk B-Tree.
335  */
336
337 /*
338  * Add a directory entry (dip,ncp) which references inode (ip).
339  *
340  * Note that the low 32 bits of the namekey are set temporarily to create
341  * a unique in-memory record, and may be modified a second time when the
342  * record is synchronized to disk.  In particular, the low 32 bits cannot be
343  * all 0's when synching to disk, which is not handled here.
344  */
345 int
346 hammer_ip_add_directory(struct hammer_transaction *trans,
347                      struct hammer_inode *dip, struct namecache *ncp,
348                      struct hammer_inode *ip)
349 {
350         hammer_record_t record;
351         int error;
352         int bytes;
353
354         record = hammer_alloc_mem_record(dip);
355
356         bytes = ncp->nc_nlen;   /* NOTE: terminating \0 is NOT included */
357         if (++trans->hmp->namekey_iterator == 0)
358                 ++trans->hmp->namekey_iterator;
359
360         record->rec.entry.base.base.obj_id = dip->obj_id;
361         record->rec.entry.base.base.key =
362                 hammer_directory_namekey(ncp->nc_name, bytes);
363         record->rec.entry.base.base.key += trans->hmp->namekey_iterator;
364         record->rec.entry.base.base.create_tid = trans->tid;
365         record->rec.entry.base.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
366         record->rec.entry.base.base.obj_type = ip->ino_rec.base.base.obj_type;
367         record->rec.entry.obj_id = ip->obj_id;
368         record->data = (void *)ncp->nc_name;
369         record->rec.entry.base.data_len = bytes;
370         ++ip->ino_rec.ino_nlinks;
371         hammer_modify_inode(trans, ip, HAMMER_INODE_RDIRTY);
372         /* NOTE: copies record->data */
373         error = hammer_mem_add(trans, record);
374         return(error);
375 }
376
377 /*
378  * Delete the directory entry and update the inode link count.  The
379  * cursor must be seeked to the directory entry record being deleted.
380  *
381  * NOTE: HAMMER_CURSOR_DELETE may not have been set.  XXX remove flag.
382  *
383  * This function can return EDEADLK requiring the caller to terminate
384  * the cursor and retry.
385  */
386 int
387 hammer_ip_del_directory(struct hammer_transaction *trans,
388                      hammer_cursor_t cursor, struct hammer_inode *dip,
389                      struct hammer_inode *ip)
390 {
391         int error;
392
393         error = hammer_ip_delete_record(cursor, trans->tid);
394
395         /*
396          * One less link.  The file may still be open in the OS even after
397          * all links have gone away so we only try to sync if the OS has
398          * no references and nlinks falls to 0.
399          *
400          * We have to terminate the cursor before syncing the inode to
401          * avoid deadlocking against ourselves.
402          *
403          * XXX we can't sync the inode here because the encompassing
404          * transaction might be a rename and might update the inode
405          * again with a new link.  That would force the delete_tid to be
406          * the same as the create_tid and cause a panic.
407          */
408         if (error == 0) {
409                 --ip->ino_rec.ino_nlinks;
410                 hammer_modify_inode(trans, ip, HAMMER_INODE_RDIRTY);
411                 if (ip->ino_rec.ino_nlinks == 0 &&
412                     (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
413                         hammer_done_cursor(cursor);
414                         /*hammer_sync_inode(ip, MNT_NOWAIT, 1);*/
415                 }
416
417         }
418         return(error);
419 }
420
421 /*
422  * Add a record to an inode.
423  *
424  * The caller must allocate the record with hammer_alloc_mem_record(ip) and
425  * initialize the following additional fields:
426  *
427  * record->rec.entry.base.base.key
428  * record->rec.entry.base.base.rec_type
429  * record->rec.entry.base.base.data_len
430  * record->data         (a copy will be kmalloc'd if it cannot be embedded)
431  */
432 int
433 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
434 {
435         hammer_inode_t ip = record->ip;
436         int error;
437
438         record->rec.base.base.obj_id = ip->obj_id;
439         record->rec.base.base.create_tid = trans->tid;
440         record->rec.base.base.obj_type = ip->ino_rec.base.base.obj_type;
441
442         hammer_modify_inode(trans, ip, HAMMER_INODE_RDIRTY);
443         /* NOTE: copies record->data */
444         error = hammer_mem_add(trans, record);
445         return(error);
446 }
447
448 /*
449  * Sync data from a buffer cache buffer (typically) to the filesystem.  This
450  * is called via the strategy called from a cached data source.  This code
451  * is responsible for actually writing a data record out to the disk.
452  *
453  * This can only occur non-historically (i.e. 'current' data only).
454  *
455  * The file offset must be HAMMER_BUFSIZE aligned but the data length
456  * can be truncated.  The record (currently) always represents a BUFSIZE
457  * swath of space whether the data is truncated or not.
458  */
459 int
460 hammer_ip_sync_data(hammer_transaction_t trans, hammer_inode_t ip,
461                        int64_t offset, void *data, int bytes)
462 {
463         struct hammer_cursor cursor;
464         hammer_record_ondisk_t rec;
465         union hammer_btree_elm elm;
466         hammer_off_t rec_offset;
467         void *bdata;
468         int error;
469
470         KKASSERT((offset & HAMMER_BUFMASK) == 0);
471 retry:
472         error = hammer_init_cursor(trans, &cursor, &ip->cache[0]);
473         if (error)
474                 return(error);
475         cursor.key_beg.obj_id = ip->obj_id;
476         cursor.key_beg.key = offset + bytes;
477         cursor.key_beg.create_tid = trans->tid;
478         cursor.key_beg.delete_tid = 0;
479         cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
480         cursor.asof = trans->tid;
481         cursor.flags |= HAMMER_CURSOR_INSERT;
482
483         /*
484          * Issue a lookup to position the cursor.
485          */
486         error = hammer_btree_lookup(&cursor);
487         if (error == 0) {
488                 kprintf("hammer_ip_sync_data: duplicate data at "
489                         "(%lld,%d) tid %016llx\n",
490                         offset, bytes, trans->tid);
491                 hammer_print_btree_elm(&cursor.node->ondisk->elms[cursor.index],
492                                        HAMMER_BTREE_TYPE_LEAF, cursor.index);
493                 panic("Duplicate data");
494                 error = EIO;
495         }
496         if (error != ENOENT)
497                 goto done;
498
499         /*
500          * Allocate record and data space.  HAMMER_RECTYPE_DATA records
501          * can cross buffer boundaries so we may have to split our bcopy.
502          */
503         rec = hammer_alloc_record(trans, &rec_offset, HAMMER_RECTYPE_DATA,
504                                   &cursor.record_buffer,
505                                   bytes, &bdata,
506                                   &cursor.data_buffer, &error);
507         if (rec == NULL)
508                 goto done;
509         if (hammer_debug_general & 0x1000)
510                 kprintf("OOB RECOR2 DATA REC %016llx DATA %016llx LEN=%d\n", rec_offset, rec->base.data_off, rec->base.data_len);
511
512         /*
513          * Fill everything in and insert our B-Tree node.
514          *
515          * NOTE: hammer_alloc_record() has already marked the related
516          * buffers as modified.  If we do it again we will generate
517          * unnecessary undo elements.
518          */
519         rec->base.base.btype = HAMMER_BTREE_TYPE_RECORD;
520         rec->base.base.obj_id = ip->obj_id;
521         rec->base.base.key = offset + bytes;
522         rec->base.base.create_tid = trans->tid;
523         rec->base.base.delete_tid = 0;
524         rec->base.base.rec_type = HAMMER_RECTYPE_DATA;
525         rec->base.data_crc = crc32(data, bytes);
526         KKASSERT(rec->base.data_len == bytes);
527
528         bcopy(data, bdata, bytes);
529
530         elm.leaf.base = rec->base.base;
531         elm.leaf.rec_offset = rec_offset;
532         elm.leaf.data_offset = rec->base.data_off;
533         elm.leaf.data_len = bytes;
534         elm.leaf.data_crc = rec->base.data_crc;
535
536         /*
537          * Data records can wind up on-disk before the inode itself is
538          * on-disk.  One must assume data records may be on-disk if either
539          * HAMMER_INODE_DONDISK or HAMMER_INODE_ONDISK is set
540          */
541         ip->flags |= HAMMER_INODE_DONDISK;
542
543         error = hammer_btree_insert(&cursor, &elm);
544         if (error == 0)
545                 goto done;
546
547         hammer_blockmap_free(trans, rec_offset, HAMMER_RECORD_SIZE);
548 done:
549         hammer_done_cursor(&cursor);
550         if (error == EDEADLK)
551                 goto retry;
552         return(error);
553 }
554
555 /*
556  * Sync an in-memory record to the disk.  This is typically called via fsync
557  * from a cached record source.  This code is responsible for actually
558  * writing a record out to the disk.
559  */
560 int
561 hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t record)
562 {
563         struct hammer_cursor cursor;
564         hammer_record_ondisk_t rec;
565         union hammer_btree_elm elm;
566         hammer_off_t rec_offset;
567         void *bdata;
568         int error;
569
570 retry:
571         /*
572          * If the record has been deleted or is being synchronized, stop.
573          * Interlock with the syncing flag.
574          */
575         if (record->flags & (HAMMER_RECF_DELETED | HAMMER_RECF_SYNCING))
576                 return(0);
577         record->flags |= HAMMER_RECF_SYNCING;
578
579         /*
580          * If someone other then us is referencing the record and not
581          * blocking waiting for us, we have to wait until they finish.
582          *
583          * It is possible the record got destroyed while we were blocked.
584          */
585         if (record->lock.refs > record->blocked + 1) {
586                 while (record->lock.refs > record->blocked + 1) {
587                         record->flags |= HAMMER_RECF_WANTED;
588                         tsleep(record, 0, "hmrrc1", 0);
589                 }
590                 if (record->flags & HAMMER_RECF_DELETED)
591                         return(0);
592         }
593
594         /*
595          * Get a cursor
596          */
597         error = hammer_init_cursor(trans, &cursor, &record->ip->cache[0]);
598         if (error)
599                 return(error);
600         cursor.key_beg = record->rec.base.base;
601         cursor.flags |= HAMMER_CURSOR_INSERT;
602
603         /*
604          * Issue a lookup to position the cursor and locate the cluster.  The
605          * target key should not exist.  If we are creating a directory entry
606          * we may have to iterate the low 32 bits of the key to find an unused
607          * key.
608          */
609         for (;;) {
610                 error = hammer_btree_lookup(&cursor);
611                 if (error)
612                         break;
613                 if (record->rec.base.base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
614                         kprintf("hammer_ip_sync_record: duplicate rec "
615                                 "at (%016llx)\n", record->rec.base.base.key);
616                         Debugger("duplicate record1");
617                         error = EIO;
618                         break;
619                 }
620                 if (++trans->hmp->namekey_iterator == 0)
621                         ++trans->hmp->namekey_iterator;
622                 record->rec.base.base.key &= ~(0xFFFFFFFFLL);
623                 record->rec.base.base.key |= trans->hmp->namekey_iterator;
624                 cursor.key_beg.key = record->rec.base.base.key;
625         }
626         if (error != ENOENT)
627                 goto done;
628
629         /*
630          * Mark the record as undergoing synchronization.  Our cursor is
631          * holding a locked B-Tree node for the insertion which interlocks
632          * anyone trying to access this record.
633          *
634          * XXX There is still a race present related to iterations.  An
635          * iteration may process the record, a sync may occur, and then
636          * later process the B-Tree element for the same record.
637          *
638          * We do not try to synchronize a deleted record.
639          */
640         if (record->flags & HAMMER_RECF_DELETED) {
641                 error = 0;
642                 goto done;
643         }
644
645         /*
646          * Allocate the record and data.  The result buffers will be
647          * marked as being modified and further calls to
648          * hammer_modify_buffer() will result in unneeded UNDO records.
649          *
650          * Support zero-fill records (data == NULL and data_len != 0)
651          */
652         if (record->data == NULL) {
653                 rec = hammer_alloc_record(trans, &rec_offset,
654                                           record->rec.base.base.rec_type,
655                                           &cursor.record_buffer,
656                                           0, &bdata,
657                                           NULL, &error);
658                 if (hammer_debug_general & 0x1000)
659                         kprintf("NULL RECORD DATA\n");
660         } else if (record->flags & HAMMER_RECF_INBAND) {
661                 rec = hammer_alloc_record(trans, &rec_offset,
662                                           record->rec.base.base.rec_type,
663                                           &cursor.record_buffer,
664                                           record->rec.base.data_len, &bdata,
665                                           NULL, &error);
666                 if (hammer_debug_general & 0x1000)
667                         kprintf("INBAND RECORD DATA %016llx DATA %016llx LEN=%d\n", rec_offset, rec->base.data_off, record->rec.base.data_len);
668         } else {
669                 rec = hammer_alloc_record(trans, &rec_offset,
670                                           record->rec.base.base.rec_type,
671                                           &cursor.record_buffer,
672                                           record->rec.base.data_len, &bdata,
673                                           &cursor.data_buffer, &error);
674                 if (hammer_debug_general & 0x1000)
675                         kprintf("OOB RECORD DATA REC %016llx DATA %016llx LEN=%d\n", rec_offset, rec->base.data_off, record->rec.base.data_len);
676         }
677
678         if (rec == NULL)
679                 goto done;
680
681         /*
682          * Fill in the remaining fields and insert our B-Tree node.
683          */
684         rec->base.base = record->rec.base.base;
685         bcopy(&record->rec.base + 1, &rec->base + 1,
686               HAMMER_RECORD_SIZE - sizeof(record->rec.base));
687
688         /*
689          * Copy the data and deal with zero-fill support.
690          */
691         if (record->data) {
692                 rec->base.data_crc = crc32(record->data, rec->base.data_len);
693                 bcopy(record->data, bdata, rec->base.data_len);
694         } else {
695                 rec->base.data_len = record->rec.base.data_len;
696         }
697
698         elm.leaf.base = record->rec.base.base;
699         elm.leaf.rec_offset = rec_offset;
700         elm.leaf.data_offset = rec->base.data_off;
701         elm.leaf.data_len = rec->base.data_len;
702         elm.leaf.data_crc = rec->base.data_crc;
703
704         error = hammer_btree_insert(&cursor, &elm);
705
706         /*
707          * Clean up on success, or fall through on error.
708          */
709         if (error == 0) {
710                 record->flags |= HAMMER_RECF_DELETED;
711                 goto done;
712         }
713
714         /*
715          * Try to unwind the allocation
716          */
717         hammer_blockmap_free(trans, rec_offset, HAMMER_RECORD_SIZE);
718 done:
719         record->flags &= ~HAMMER_RECF_SYNCING;
720         hammer_done_cursor(&cursor);
721         if (error == EDEADLK)
722                 goto retry;
723         return(error);
724 }
725
726 /*
727  * Add the record to the inode's rec_tree.  The low 32 bits of a directory
728  * entry's key is used to deal with hash collisions in the upper 32 bits.
729  * A unique 64 bit key is generated in-memory and may be regenerated a
730  * second time when the directory record is flushed to the on-disk B-Tree.
731  *
732  * A referenced record is passed to this function.  This function
733  * eats the reference.  If an error occurs the record will be deleted.
734  *
735  * A copy of the temporary record->data pointer provided by the caller
736  * will be made.
737  */
738 static
739 int
740 hammer_mem_add(struct hammer_transaction *trans, hammer_record_t record)
741 {
742         void *data;
743         int bytes;
744         int reclen;
745                 
746         /*
747          * Make a private copy of record->data
748          */
749         if (record->data) {
750                 /*
751                  * Try to embed the data in extra space in the record
752                  * union, otherwise allocate a copy.
753                  */
754                 bytes = record->rec.base.data_len;
755                 switch(record->rec.base.base.rec_type) {
756                 case HAMMER_RECTYPE_DIRENTRY:
757                         reclen = offsetof(struct hammer_entry_record, name[0]);
758                         break;
759                 case HAMMER_RECTYPE_DATA:
760                         reclen = offsetof(struct hammer_data_record, data[0]);
761                         break;
762                 default:
763                         reclen = sizeof(record->rec);
764                         break;
765                 }
766                 if (reclen + bytes <= HAMMER_RECORD_SIZE) {
767                         bcopy(record->data, (char *)&record->rec + reclen,
768                               bytes);
769                         record->data = (void *)((char *)&record->rec + reclen);
770                         record->flags |= HAMMER_RECF_INBAND;
771                 } else {
772                         ++hammer_count_record_datas;
773                         data = kmalloc(bytes, M_HAMMER, M_WAITOK);
774                         record->flags |= HAMMER_RECF_ALLOCDATA;
775                         bcopy(record->data, data, bytes);
776                         record->data = data;
777                 }
778         }
779
780         /*
781          * Insert into the RB tree, find an unused iterator if this is
782          * a directory entry.
783          */
784         while (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
785                 if (record->rec.base.base.rec_type != HAMMER_RECTYPE_DIRENTRY){
786                         record->flags |= HAMMER_RECF_DELETED;
787                         hammer_rel_mem_record(record);
788                         return (EEXIST);
789                 }
790                 if (++trans->hmp->namekey_iterator == 0)
791                         ++trans->hmp->namekey_iterator;
792                 record->rec.base.base.key &= ~(0xFFFFFFFFLL);
793                 record->rec.base.base.key |= trans->hmp->namekey_iterator;
794         }
795         record->flags |= HAMMER_RECF_ONRBTREE;
796         hammer_modify_inode(trans, record->ip, HAMMER_INODE_XDIRTY);
797         hammer_rel_mem_record(record);
798         return(0);
799 }
800
801 /************************************************************************
802  *                   HAMMER INODE MERGED-RECORD FUNCTIONS               *
803  ************************************************************************
804  *
805  * These functions augment the B-Tree scanning functions in hammer_btree.c
806  * by merging in-memory records with on-disk records.
807  */
808
809 /*
810  * Locate a particular record either in-memory or on-disk.
811  *
812  * NOTE: This is basically a standalone routine, hammer_ip_next() may
813  * NOT be called to iterate results.
814  */
815 int
816 hammer_ip_lookup(hammer_cursor_t cursor, struct hammer_inode *ip)
817 {
818         int error;
819
820         /*
821          * If the element is in-memory return it without searching the
822          * on-disk B-Tree
823          */
824         error = hammer_mem_lookup(cursor, ip);
825         if (error == 0) {
826                 cursor->record = &cursor->iprec->rec;
827                 return(error);
828         }
829         if (error != ENOENT)
830                 return(error);
831
832         /*
833          * If the inode has on-disk components search the on-disk B-Tree.
834          */
835         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
836                 return(error);
837         error = hammer_btree_lookup(cursor);
838         if (error == 0)
839                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_RECORD);
840         return(error);
841 }
842
843 /*
844  * Locate the first record within the cursor's key_beg/key_end range,
845  * restricted to a particular inode.  0 is returned on success, ENOENT
846  * if no records matched the requested range, or some other error.
847  *
848  * When 0 is returned hammer_ip_next() may be used to iterate additional
849  * records within the requested range.
850  *
851  * This function can return EDEADLK, requiring the caller to terminate
852  * the cursor and try again.
853  */
854 int
855 hammer_ip_first(hammer_cursor_t cursor, struct hammer_inode *ip)
856 {
857         int error;
858
859         /*
860          * Clean up fields and setup for merged scan
861          */
862         cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
863         cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
864         cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
865         if (cursor->iprec) {
866                 hammer_rel_mem_record(cursor->iprec);
867                 cursor->iprec = NULL;
868         }
869
870         /*
871          * Search the on-disk B-Tree.  hammer_btree_lookup() only does an
872          * exact lookup so if we get ENOENT we have to call the iterate
873          * function to validate the first record after the begin key.
874          *
875          * The ATEDISK flag is used by hammer_btree_iterate to determine
876          * whether it must index forwards or not.  It is also used here
877          * to select the next record from in-memory or on-disk.
878          *
879          * EDEADLK can only occur if the lookup hit an empty internal
880          * element and couldn't delete it.  Since this could only occur
881          * in-range, we can just iterate from the failure point.
882          */
883         if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
884                 error = hammer_btree_lookup(cursor);
885                 if (error == ENOENT || error == EDEADLK) {
886                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
887                         if (hammer_debug_general & 0x2000)
888                                 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
889                         error = hammer_btree_iterate(cursor);
890                 }
891                 if (error && error != ENOENT) 
892                         return(error);
893                 if (error == 0) {
894                         cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
895                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
896                 } else {
897                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
898                 }
899         }
900
901         /*
902          * Search the in-memory record list (Red-Black tree).  Unlike the
903          * B-Tree search, mem_first checks for records in the range.
904          */
905         error = hammer_mem_first(cursor, ip);
906         if (error && error != ENOENT)
907                 return(error);
908         if (error == 0) {
909                 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
910                 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
911         }
912
913         /*
914          * This will return the first matching record.
915          */
916         return(hammer_ip_next(cursor));
917 }
918
919 /*
920  * Retrieve the next record in a merged iteration within the bounds of the
921  * cursor.  This call may be made multiple times after the cursor has been
922  * initially searched with hammer_ip_first().
923  *
924  * 0 is returned on success, ENOENT if no further records match the
925  * requested range, or some other error code is returned.
926  */
927 int
928 hammer_ip_next(hammer_cursor_t cursor)
929 {
930         hammer_btree_elm_t elm;
931         hammer_record_t rec;
932         int error;
933         int r;
934
935         /*
936          * Load the current on-disk and in-memory record.  If we ate any
937          * records we have to get the next one. 
938          *
939          * If we deleted the last on-disk record we had scanned ATEDISK will
940          * be clear and DELBTREE will be set, forcing a call to iterate. The
941          * fact that ATEDISK is clear causes iterate to re-test the 'current'
942          * element.  If ATEDISK is set, iterate will skip the 'current'
943          * element.
944          *
945          * Get the next on-disk record
946          */
947         if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
948                 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
949                         error = hammer_btree_iterate(cursor);
950                         cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
951                         if (error == 0)
952                                 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
953                         else
954                                 cursor->flags |= HAMMER_CURSOR_DISKEOF |
955                                                  HAMMER_CURSOR_ATEDISK;
956                 }
957         }
958
959         /*
960          * Get the next in-memory record.  The record can be ripped out
961          * of the RB tree so we maintain a scan_info structure to track
962          * the next node.
963          *
964          * hammer_rec_scan_cmp:  Is the record still in our general range,
965          *                       (non-inclusive of snapshot exclusions)?
966          * hammer_rec_scan_callback: Is the record in our snapshot?
967          */
968         if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
969                 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
970                         if (cursor->iprec) {
971                                 hammer_rel_mem_record(cursor->iprec);
972                                 cursor->iprec = NULL;
973                         }
974                         rec = cursor->scan.node;        /* next node */
975                         while (rec) {
976                                 if (hammer_rec_scan_cmp(rec, cursor) != 0)
977                                         break;
978                                 if (hammer_rec_scan_callback(rec, cursor) != 0)
979                                         break;
980                                 rec = hammer_rec_rb_tree_RB_NEXT(rec);
981                         }
982                         if (cursor->iprec) {
983                                 KKASSERT(cursor->iprec == rec);
984                                 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
985                                 cursor->scan.node =
986                                         hammer_rec_rb_tree_RB_NEXT(rec);
987                         } else {
988                                 cursor->flags |= HAMMER_CURSOR_MEMEOF;
989                         }
990                 }
991         }
992
993         /*
994          * Extract either the disk or memory record depending on their
995          * relative position.
996          */
997         error = 0;
998         switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
999         case 0:
1000                 /*
1001                  * Both entries valid
1002                  */
1003                 elm = &cursor->node->ondisk->elms[cursor->index];
1004                 r = hammer_btree_cmp(&elm->base, &cursor->iprec->rec.base.base);
1005                 if (r < 0) {
1006                         error = hammer_btree_extract(cursor,
1007                                                      HAMMER_CURSOR_GET_RECORD);
1008                         cursor->flags |= HAMMER_CURSOR_ATEDISK;
1009                         break;
1010                 }
1011                 /* fall through to the memory entry */
1012         case HAMMER_CURSOR_ATEDISK:
1013                 /*
1014                  * Only the memory entry is valid
1015                  */
1016                 cursor->record = &cursor->iprec->rec;
1017                 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1018                 break;
1019         case HAMMER_CURSOR_ATEMEM:
1020                 /*
1021                  * Only the disk entry is valid
1022                  */
1023                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_RECORD);
1024                 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1025                 break;
1026         default:
1027                 /*
1028                  * Neither entry is valid
1029                  *
1030                  * XXX error not set properly
1031                  */
1032                 cursor->record = NULL;
1033                 error = ENOENT;
1034                 break;
1035         }
1036         return(error);
1037 }
1038
1039 /*
1040  * Resolve the cursor->data pointer for the current cursor position in
1041  * a merged iteration.
1042  */
1043 int
1044 hammer_ip_resolve_data(hammer_cursor_t cursor)
1045 {
1046         int error;
1047
1048         if (cursor->iprec && cursor->record == &cursor->iprec->rec) {
1049                 cursor->data = cursor->iprec->data;
1050                 error = 0;
1051         } else {
1052                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1053         }
1054         return(error);
1055 }
1056
1057 int
1058 hammer_ip_resolve_record_and_data(hammer_cursor_t cursor)
1059 {
1060         int error;
1061
1062         if (cursor->iprec && cursor->record == &cursor->iprec->rec) {
1063                 cursor->data = cursor->iprec->data;
1064                 error = 0;
1065         } else {
1066                 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
1067                                                      HAMMER_CURSOR_GET_RECORD);
1068         }
1069         return(error);
1070 }
1071
1072 /*
1073  * Delete all records within the specified range for inode ip.
1074  *
1075  * NOTE: An unaligned range will cause new records to be added to cover
1076  * the edge cases. (XXX not implemented yet).
1077  *
1078  * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1079  *
1080  * NOTE: Record keys for regular file data have to be special-cased since
1081  * they indicate the end of the range (key = base + bytes).
1082  */
1083 int
1084 hammer_ip_delete_range(hammer_transaction_t trans, hammer_inode_t ip,
1085                        int64_t ran_beg, int64_t ran_end)
1086 {
1087         struct hammer_cursor cursor;
1088         hammer_record_ondisk_t rec;
1089         hammer_base_elm_t base;
1090         int error;
1091         int64_t off;
1092
1093 retry:
1094         hammer_init_cursor(trans, &cursor, &ip->cache[0]);
1095
1096         cursor.key_beg.obj_id = ip->obj_id;
1097         cursor.key_beg.create_tid = 0;
1098         cursor.key_beg.delete_tid = 0;
1099         cursor.key_beg.obj_type = 0;
1100         cursor.asof = ip->obj_asof;
1101         cursor.flags |= HAMMER_CURSOR_ASOF;
1102
1103         cursor.key_end = cursor.key_beg;
1104         if (ip->ino_rec.base.base.obj_type == HAMMER_OBJTYPE_DBFILE) {
1105                 cursor.key_beg.key = ran_beg;
1106                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1107                 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1108                 cursor.key_end.key = ran_end;
1109         } else {
1110                 /*
1111                  * The key in the B-Tree is (base+bytes), so the first possible
1112                  * matching key is ran_beg + 1.
1113                  */
1114                 int64_t tmp64;
1115
1116                 cursor.key_beg.key = ran_beg + 1;
1117                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1118                 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
1119
1120                 tmp64 = ran_end + MAXPHYS + 1;  /* work around GCC-4 bug */
1121                 if (tmp64 < ran_end)
1122                         cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1123                 else
1124                         cursor.key_end.key = ran_end + MAXPHYS + 1;
1125         }
1126         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
1127
1128         error = hammer_ip_first(&cursor, ip);
1129
1130         /*
1131          * Iterate through matching records and mark them as deleted.
1132          */
1133         while (error == 0) {
1134                 rec = cursor.record;
1135                 base = &rec->base.base;
1136
1137                 KKASSERT(base->delete_tid == 0);
1138
1139                 /*
1140                  * There may be overlap cases for regular file data.  Also
1141                  * remember the key for a regular file record is the offset
1142                  * of the last byte of the record (base + len - 1), NOT the
1143                  * base offset.
1144                  */
1145 #if 0
1146                 kprintf("delete_range rec_type %02x\n", base->rec_type);
1147 #endif
1148                 if (base->rec_type == HAMMER_RECTYPE_DATA) {
1149 #if 0
1150                         kprintf("delete_range loop key %016llx\n",
1151                                 base->key - rec->base.data_len);
1152 #endif
1153                         off = base->key - rec->base.data_len;
1154                         /*
1155                          * Check the left edge case.  We currently do not
1156                          * split existing records.
1157                          */
1158                         if (off < ran_beg) {
1159                                 panic("hammer left edge case %016llx %d\n",
1160                                         base->key, rec->base.data_len);
1161                         }
1162
1163                         /*
1164                          * Check the right edge case.  Note that the
1165                          * record can be completely out of bounds, which
1166                          * terminates the search.
1167                          *
1168                          * base->key is exclusive of the right edge while
1169                          * ran_end is inclusive of the right edge.  The
1170                          * (key - data_len) left boundary is inclusive.
1171                          *
1172                          * XXX theory-check this test at some point, are
1173                          * we missing a + 1 somewhere?  Note that ran_end
1174                          * could overflow.
1175                          */
1176                         if (base->key - 1 > ran_end) {
1177                                 if (base->key - rec->base.data_len > ran_end)
1178                                         break;
1179                                 panic("hammer right edge case\n");
1180                         }
1181                 }
1182
1183                 /*
1184                  * Mark the record and B-Tree entry as deleted.  This will
1185                  * also physically delete the B-Tree entry, record, and
1186                  * data if the retention policy dictates.  The function
1187                  * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1188                  * uses to perform a fixup.
1189                  */
1190                 error = hammer_ip_delete_record(&cursor, trans->tid);
1191                 if (error)
1192                         break;
1193                 error = hammer_ip_next(&cursor);
1194         }
1195         hammer_done_cursor(&cursor);
1196         if (error == EDEADLK)
1197                 goto retry;
1198         if (error == ENOENT)
1199                 error = 0;
1200         return(error);
1201 }
1202
1203 /*
1204  * Delete all records associated with an inode except the inode record
1205  * itself.
1206  */
1207 int
1208 hammer_ip_delete_range_all(hammer_transaction_t trans, hammer_inode_t ip)
1209 {
1210         struct hammer_cursor cursor;
1211         hammer_record_ondisk_t rec;
1212         hammer_base_elm_t base;
1213         int error;
1214
1215 retry:
1216         hammer_init_cursor(trans, &cursor, &ip->cache[0]);
1217
1218         cursor.key_beg.obj_id = ip->obj_id;
1219         cursor.key_beg.create_tid = 0;
1220         cursor.key_beg.delete_tid = 0;
1221         cursor.key_beg.obj_type = 0;
1222         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1223         cursor.key_beg.key = HAMMER_MIN_KEY;
1224
1225         cursor.key_end = cursor.key_beg;
1226         cursor.key_end.rec_type = 0xFFFF;
1227         cursor.key_end.key = HAMMER_MAX_KEY;
1228
1229         cursor.asof = ip->obj_asof;
1230         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1231
1232         error = hammer_ip_first(&cursor, ip);
1233
1234         /*
1235          * Iterate through matching records and mark them as deleted.
1236          */
1237         while (error == 0) {
1238                 rec = cursor.record;
1239                 base = &rec->base.base;
1240
1241                 KKASSERT(base->delete_tid == 0);
1242
1243                 /*
1244                  * Mark the record and B-Tree entry as deleted.  This will
1245                  * also physically delete the B-Tree entry, record, and
1246                  * data if the retention policy dictates.  The function
1247                  * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1248                  * uses to perform a fixup.
1249                  */
1250                 error = hammer_ip_delete_record(&cursor, trans->tid);
1251                 if (error)
1252                         break;
1253                 error = hammer_ip_next(&cursor);
1254         }
1255         hammer_done_cursor(&cursor);
1256         if (error == EDEADLK)
1257                 goto retry;
1258         if (error == ENOENT)
1259                 error = 0;
1260         return(error);
1261 }
1262
1263 /*
1264  * Delete the record at the current cursor.  On success the cursor will
1265  * be positioned appropriately for an iteration but may no longer be at
1266  * a leaf node.
1267  *
1268  * NOTE: This can return EDEADLK, requiring the caller to terminate the
1269  * cursor and retry.
1270  */
1271 int
1272 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_tid_t tid)
1273 {
1274         hammer_btree_elm_t elm;
1275         hammer_mount_t hmp;
1276         int error;
1277         int dodelete;
1278
1279         /*
1280          * In-memory (unsynchronized) records can simply be freed.
1281          */
1282         if (cursor->record == &cursor->iprec->rec) {
1283                 cursor->iprec->flags |= HAMMER_RECF_DELETED;
1284                 return(0);
1285         }
1286
1287         /*
1288          * On-disk records are marked as deleted by updating their delete_tid.
1289          * This does not effect their position in the B-Tree (which is based
1290          * on their create_tid).
1291          */
1292         error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_RECORD);
1293         elm = NULL;
1294         hmp = cursor->node->hmp;
1295
1296         dodelete = 0;
1297         if (error == 0) {
1298                 error = hammer_cursor_upgrade(cursor);
1299                 if (error == 0) {
1300                         elm = &cursor->node->ondisk->elms[cursor->index];
1301                         hammer_modify_node(cursor->trans, cursor->node,
1302                                            elm, sizeof(*elm));
1303                         elm->leaf.base.delete_tid = tid;
1304
1305                         /*
1306                          * An on-disk record cannot have the same delete_tid
1307                          * as its create_tid.  In a chain of record updates
1308                          * this could result in a duplicate record.
1309                          */
1310                         KKASSERT(elm->leaf.base.delete_tid != elm->leaf.base.create_tid);
1311                         hammer_modify_buffer(cursor->trans, cursor->record_buffer, &cursor->record->base.base.delete_tid, sizeof(hammer_tid_t));
1312                         cursor->record->base.base.delete_tid = tid;
1313                 }
1314         }
1315
1316         /*
1317          * If we were mounted with the nohistory option, we physically
1318          * delete the record.
1319          */
1320         if (hmp->hflags & HMNT_NOHISTORY)
1321                 dodelete = 1;
1322
1323         if (error == 0 && dodelete) {
1324                 error = hammer_delete_at_cursor(cursor, NULL);
1325                 if (error) {
1326                         panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1327                         error = 0;
1328                 }
1329         }
1330         return(error);
1331 }
1332
1333 int
1334 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
1335 {
1336         hammer_btree_elm_t elm;
1337         hammer_off_t rec_offset;
1338         hammer_off_t data_offset;
1339         int32_t data_len;
1340         u_int16_t rec_type;
1341         int error;
1342
1343         elm = &cursor->node->ondisk->elms[cursor->index];
1344         KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1345
1346         rec_offset = elm->leaf.rec_offset;
1347         data_offset = elm->leaf.data_offset;
1348         data_len = elm->leaf.data_len;
1349         rec_type = elm->leaf.base.rec_type;
1350
1351         error = hammer_btree_delete(cursor);
1352         if (error == 0) {
1353                 /*
1354                  * This forces a fixup for the iteration because
1355                  * the cursor is now either sitting at the 'next'
1356                  * element or sitting at the end of a leaf.
1357                  */
1358                 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1359                         cursor->flags |= HAMMER_CURSOR_DELBTREE;
1360                         cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1361                 }
1362         }
1363         if (error == 0) {
1364                 hammer_blockmap_free(cursor->trans, rec_offset,
1365                                      sizeof(union hammer_record_ondisk));
1366         }
1367         if (error == 0) {
1368                 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
1369                 case HAMMER_ZONE_LARGE_DATA:
1370                 case HAMMER_ZONE_SMALL_DATA:
1371                         hammer_blockmap_free(cursor->trans,
1372                                              data_offset, data_len);
1373                         break;
1374                 default:
1375                         break;
1376                 }
1377         }
1378 #if 0
1379         kprintf("hammer_delete_at_cursor: %d:%d:%08x %08x/%d "
1380                 "(%d remain in cluster)\n",
1381                 cluster->volume->vol_no, cluster->clu_no,
1382                 rec_offset, data_offset, data_len,
1383                 cluster->ondisk->stat_records);
1384 #endif
1385         return (error);
1386 }
1387
1388 /*
1389  * Determine whether a directory is empty or not.  Returns 0 if the directory
1390  * is empty, ENOTEMPTY if it isn't, plus other possible errors.
1391  */
1392 int
1393 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
1394 {
1395         struct hammer_cursor cursor;
1396         int error;
1397
1398         hammer_init_cursor(trans, &cursor, &ip->cache[0]);
1399
1400         cursor.key_beg.obj_id = ip->obj_id;
1401         cursor.key_beg.create_tid = 0;
1402         cursor.key_beg.delete_tid = 0;
1403         cursor.key_beg.obj_type = 0;
1404         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1405         cursor.key_beg.key = HAMMER_MIN_KEY;
1406
1407         cursor.key_end = cursor.key_beg;
1408         cursor.key_end.rec_type = 0xFFFF;
1409         cursor.key_end.key = HAMMER_MAX_KEY;
1410
1411         cursor.asof = ip->obj_asof;
1412         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1413
1414         error = hammer_ip_first(&cursor, ip);
1415         if (error == ENOENT)
1416                 error = 0;
1417         else if (error == 0)
1418                 error = ENOTEMPTY;
1419         hammer_done_cursor(&cursor);
1420         return(error);
1421 }
1422