Add the 'hammer' utility. This is going to be a catch-all for various
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.15 2007/12/31 05:33:12 dillon Exp $
35 */
36
37#include "hammer.h"
38#include <sys/buf.h>
39#include <sys/buf2.h>
40
41int
42hammer_vop_inactive(struct vop_inactive_args *ap)
43{
44 struct hammer_inode *ip = VTOI(ap->a_vp);
45
46 /*
47 * Degenerate case
48 */
49 if (ip == NULL) {
50 vrecycle(ap->a_vp);
51 return(0);
52 }
53
54 /*
55 * If the inode no longer has any references we recover its
56 * in-memory resources immediately.
57 */
58 if (ip->ino_rec.ino_nlinks == 0 &&
59 (ip->hmp->mp->mnt_flag & MNT_RDONLY) == 0) {
60 hammer_sync_inode(ip, MNT_NOWAIT, 1);
61 }
62 return(0);
63}
64
65int
66hammer_vop_reclaim(struct vop_reclaim_args *ap)
67{
68 struct hammer_inode *ip;
69 struct vnode *vp;
70
71 vp = ap->a_vp;
72
73 /*
74 * Release the vnode association and ask that the inode be flushed.
75 */
76 if ((ip = vp->v_data) != NULL) {
77 vp->v_data = NULL;
78 ip->vp = NULL;
79 hammer_rel_inode(ip, 1);
80 }
81 return(0);
82}
83
84/*
85 * Obtain a vnode for the specified inode number. An exclusively locked
86 * vnode is returned.
87 */
88int
89hammer_vfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
90{
91 struct hammer_mount *hmp = (void *)mp->mnt_data;
92 struct hammer_inode *ip;
93 int error;
94
95 /*
96 * Get/allocate the hammer_inode structure. The structure must be
97 * unlocked while we manipulate the related vnode to avoid a
98 * deadlock.
99 */
100 ip = hammer_get_inode(hmp, ino, hmp->asof, &error);
101 if (ip == NULL) {
102 *vpp = NULL;
103 return(error);
104 }
105 error = hammer_get_vnode(ip, LK_EXCLUSIVE, vpp);
106 hammer_rel_inode(ip, 0);
107 return (error);
108}
109
110/*
111 * Return a locked vnode for the specified inode. The inode must be
112 * referenced but NOT LOCKED on entry and will remain referenced on
113 * return.
114 */
115int
116hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
117{
118 struct vnode *vp;
119 int error = 0;
120
121 for (;;) {
122 if ((vp = ip->vp) == NULL) {
123 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
124 if (error)
125 break;
126 hammer_lock_ex(&ip->lock);
127 if (ip->vp != NULL) {
128 hammer_unlock(&ip->lock);
129 vp->v_type = VBAD;
130 vx_put(vp);
131 continue;
132 }
133 hammer_ref(&ip->lock);
134 vp = *vpp;
135 ip->vp = vp;
136 vp->v_type = hammer_get_vnode_type(
137 ip->ino_rec.base.base.obj_type);
138
139 switch(ip->ino_rec.base.base.obj_type) {
140 case HAMMER_OBJTYPE_CDEV:
141 case HAMMER_OBJTYPE_BDEV:
142 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
143 addaliasu(vp, ip->ino_data.rmajor,
144 ip->ino_data.rminor);
145 break;
146 case HAMMER_OBJTYPE_FIFO:
147 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
148 break;
149 default:
150 break;
151 }
152 if (ip->obj_id == HAMMER_OBJID_ROOT)
153 vp->v_flag |= VROOT;
154
155 vp->v_data = (void *)ip;
156 /* vnode locked by getnewvnode() */
157 /* make related vnode dirty if inode dirty? */
158 hammer_unlock(&ip->lock);
159 if (vp->v_type == VREG)
160 vinitvmio(vp, ip->ino_rec.ino_size);
161 break;
162 }
163
164 /*
165 * loop if the vget fails (aka races), or if the vp
166 * no longer matches ip->vp.
167 */
168 if (vget(vp, LK_EXCLUSIVE) == 0) {
169 if (vp == ip->vp)
170 break;
171 vput(vp);
172 }
173 }
174 *vpp = vp;
175 return(error);
176}
177
178/*
179 * Acquire a HAMMER inode. The returned inode is not locked. These functions
180 * do not attach or detach the related vnode (use hammer_get_vnode() for
181 * that).
182 */
183struct hammer_inode *
184hammer_get_inode(struct hammer_mount *hmp, u_int64_t obj_id, hammer_tid_t asof,
185 int *errorp)
186{
187 struct hammer_inode_info iinfo;
188 struct hammer_cursor cursor;
189 struct hammer_inode *ip;
190
191 /*
192 * Determine if we already have an inode cached. If we do then
193 * we are golden.
194 */
195 iinfo.obj_id = obj_id;
196 iinfo.obj_asof = asof;
197loop:
198 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
199 if (ip) {
200 hammer_ref(&ip->lock);
201 *errorp = 0;
202 return(ip);
203 }
204
205 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
206 ++hammer_count_inodes;
207 ip->obj_id = obj_id;
208 ip->obj_asof = iinfo.obj_asof;
209 ip->hmp = hmp;
210 RB_INIT(&ip->rec_tree);
211
212 /*
213 * Locate the on-disk inode.
214 * If we do not have an inode cached search the HAMMER on-disk B-Tree
215 * for it.
216 */
217
218 hammer_init_cursor_hmp(&cursor, hmp);
219 cursor.key_beg.obj_id = ip->obj_id;
220 cursor.key_beg.key = 0;
221 cursor.key_beg.create_tid = iinfo.obj_asof;
222 cursor.key_beg.delete_tid = 0;
223 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
224 cursor.key_beg.obj_type = 0;
225 cursor.flags = HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_GET_DATA;
226
227 *errorp = hammer_btree_lookup(&cursor);
228
229 /*
230 * On success the B-Tree lookup will hold the appropriate
231 * buffer cache buffers and provide a pointer to the requested
232 * information. Copy the information to the in-memory inode.
233 */
234 if (*errorp == 0) {
235 ip->ino_rec = cursor.record->inode;
236 ip->ino_data = cursor.data->inode;
237 } else if (cursor.node) {
238 hammer_cache_node(cursor.node, &ip->cache);
239 }
240
241 /*
242 * On success load the inode's record and data and insert the
243 * inode into the B-Tree. It is possible to race another lookup
244 * insertion of the same inode so deal with that condition too.
245 *
246 * The cursor's locked node interlocks against others creating and
247 * destroying ip while we were blocked.
248 */
249 if (*errorp == 0) {
250 hammer_ref(&ip->lock);
251 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
252 hammer_uncache_node(&ip->cache);
253 hammer_unref(&ip->lock);
254 --hammer_count_inodes;
255 kfree(ip, M_HAMMER);
256 hammer_done_cursor(&cursor);
257 goto loop;
258 }
259 ip->flags |= HAMMER_INODE_ONDISK;
260 } else {
261 --hammer_count_inodes;
262 kfree(ip, M_HAMMER);
263 ip = NULL;
264 }
265 hammer_done_cursor(&cursor);
266 return (ip);
267}
268
269/*
270 * Create a new filesystem object, returning the inode in *ipp. The
271 * returned inode will be referenced but not locked.
272 *
273 * The inode is created in-memory and will be delay-synchronized to the
274 * disk.
275 */
276int
277hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
278 struct ucred *cred, hammer_inode_t dip,
279 struct hammer_inode **ipp)
280{
281 hammer_mount_t hmp;
282 hammer_inode_t ip;
283 uid_t xuid;
284
285 hmp = trans->hmp;
286 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
287 ++hammer_count_inodes;
288 ip->obj_id = hammer_alloc_tid(trans);
289 KKASSERT(ip->obj_id != 0);
290 ip->obj_asof = hmp->asof;
291 ip->hmp = hmp;
292 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
293 HAMMER_INODE_ITIMES;
294 ip->last_tid = trans->tid;
295
296 RB_INIT(&ip->rec_tree);
297
298 ip->ino_rec.ino_atime = trans->tid;
299 ip->ino_rec.ino_mtime = trans->tid;
300 ip->ino_rec.ino_size = 0;
301 ip->ino_rec.ino_nlinks = 0;
302 /* XXX */
303 ip->ino_rec.base.rec_id = hammer_alloc_recid(trans);
304 KKASSERT(ip->ino_rec.base.rec_id != 0);
305 ip->ino_rec.base.base.obj_id = ip->obj_id;
306 ip->ino_rec.base.base.key = 0;
307 ip->ino_rec.base.base.create_tid = trans->tid;
308 ip->ino_rec.base.base.delete_tid = 0;
309 ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
310 ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
311
312 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
313 ip->ino_data.mode = vap->va_mode;
314 ip->ino_data.ctime = trans->tid;
315 ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
316
317 switch(ip->ino_rec.base.base.obj_type) {
318 case HAMMER_OBJTYPE_CDEV:
319 case HAMMER_OBJTYPE_BDEV:
320 ip->ino_data.rmajor = vap->va_rmajor;
321 ip->ino_data.rminor = vap->va_rminor;
322 break;
323 default:
324 break;
325 }
326
327 /*
328 * Calculate default uid/gid and overwrite with information from
329 * the vap.
330 */
331 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
332 ip->ino_data.gid = dip->ino_data.gid;
333 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
334 &vap->va_mode);
335 ip->ino_data.mode = vap->va_mode;
336
337 if (vap->va_vaflags & VA_UID_UUID_VALID)
338 ip->ino_data.uid = vap->va_uid_uuid;
339 else if (vap->va_uid != (uid_t)VNOVAL)
340 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
341 if (vap->va_vaflags & VA_GID_UUID_VALID)
342 ip->ino_data.gid = vap->va_gid_uuid;
343 else if (vap->va_gid != (gid_t)VNOVAL)
344 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
345
346 hammer_ref(&ip->lock);
347 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
348 hammer_unref(&ip->lock);
349 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
350 }
351 *ipp = ip;
352 return(0);
353}
354
355int
356hammer_update_inode(hammer_inode_t ip)
357{
358 struct hammer_cursor cursor;
359 struct hammer_cursor *spike = NULL;
360 hammer_record_t record;
361 int error;
362
363 /*
364 * Locate the record on-disk and mark it as deleted. Both the B-Tree
365 * node and the record must be marked deleted. The record may or
366 * may not be physically deleted, depending on the retention policy.
367 *
368 * If the inode has already been deleted on-disk we have nothing
369 * to do.
370 *
371 * XXX Update the inode record and data in-place if the retention
372 * policy allows it.
373 */
374retry:
375 error = 0;
376
377 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
378 HAMMER_INODE_ONDISK) {
379 hammer_init_cursor_ip(&cursor, ip);
380 cursor.key_beg.obj_id = ip->obj_id;
381 cursor.key_beg.key = 0;
382 cursor.key_beg.create_tid = ip->obj_asof;
383 cursor.key_beg.delete_tid = 0;
384 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
385 cursor.key_beg.obj_type = 0;
386 cursor.flags = HAMMER_CURSOR_GET_RECORD;
387
388 error = hammer_btree_lookup(&cursor);
389
390 if (error == 0) {
391 error = hammer_ip_delete_record(&cursor, ip->last_tid);
392 if (error == 0)
393 ip->flags |= HAMMER_INODE_DELONDISK;
394 }
395 hammer_cache_node(cursor.node, &ip->cache);
396 hammer_done_cursor(&cursor);
397 }
398
399 /*
400 * Write out a new record if the in-memory inode is not marked
401 * as having been deleted. Update our inode statistics if this
402 * is the first application of the inode on-disk.
403 *
404 * If the inode has been deleted permanently, HAMMER_INODE_DELONDISK
405 * will remain set and prevent further updates.
406 */
407 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
408 record = hammer_alloc_mem_record(ip);
409 record->rec.inode = ip->ino_rec;
410 record->rec.inode.base.base.create_tid = ip->last_tid;
411 record->rec.inode.base.data_len = sizeof(ip->ino_data);
412 record->data = (void *)&ip->ino_data;
413 error = hammer_ip_sync_record(record, &spike);
414 record->flags |= HAMMER_RECF_DELETED;
415 hammer_rel_mem_record(record);
416 if (error == ENOSPC) {
417 error = hammer_spike(&spike);
418 if (error == 0)
419 goto retry;
420 }
421 KKASSERT(spike == NULL);
422 if (error == 0) {
423 ip->flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
424 HAMMER_INODE_DELONDISK);
425 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
426 hammer_modify_volume(ip->hmp->rootvol);
427 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
428 hammer_modify_volume_done(ip->hmp->rootvol);
429 ip->flags |= HAMMER_INODE_ONDISK;
430 }
431 }
432 }
433 if (error == 0)
434 ip->flags &= ~HAMMER_INODE_TID;
435 return(error);
436}
437
438/*
439 * Release a reference on an inode and unload it if told to flush.
440 */
441void
442hammer_rel_inode(struct hammer_inode *ip, int flush)
443{
444 hammer_unref(&ip->lock);
445 if (flush || ip->ino_rec.ino_nlinks == 0)
446 ip->flags |= HAMMER_INODE_FLUSH;
447 if (ip->lock.refs == 0 && (ip->flags & HAMMER_INODE_FLUSH))
448 hammer_unload_inode(ip, NULL);
449}
450
451/*
452 * Unload and destroy the specified inode.
453 *
454 * (called via RB_SCAN)
455 */
456int
457hammer_unload_inode(struct hammer_inode *ip, void *data __unused)
458{
459 int error;
460
461 KASSERT(ip->lock.refs == 0,
462 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
463 KKASSERT(ip->vp == NULL);
464 hammer_ref(&ip->lock);
465
466 error = hammer_sync_inode(ip, MNT_WAIT, 1);
467 if (error)
468 kprintf("hammer_sync_inode failed error %d\n", error);
469 KKASSERT(RB_EMPTY(&ip->rec_tree));
470 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
471
472 hammer_uncache_node(&ip->cache);
473 --hammer_count_inodes;
474 kfree(ip, M_HAMMER);
475 return(0);
476}
477
478/*
479 * A transaction has modified an inode, requiring a new record and possibly
480 * also data to be written out.
481 *
482 * last_tid is the TID to use for the disk sync.
483 */
484void
485hammer_modify_inode(struct hammer_transaction *trans,
486 struct hammer_inode *ip, int flags)
487{
488 if ((flags & HAMMER_INODE_TID) && (ip->flags & HAMMER_INODE_TID) == 0) {
489 ip->last_tid = trans->tid;
490 }
491 ip->flags |= flags;
492}
493
494/*
495 * Sync any dirty buffers and records associated with an inode. The
496 * inode's last_tid field is used as the transaction id for the sync,
497 * overriding any intermediate TIDs that were used for records. Note
498 * that the dirty buffer cache buffers do not have any knowledge of
499 * the transaction id they were modified under.
500 *
501 * If we can't sync due to a cluster becoming full the spike structure
502 * will be filled in and ENOSPC returned. We must return -ENOSPC to
503 * terminate the RB_SCAN.
504 */
505static int
506hammer_sync_inode_callback(hammer_record_t rec, void *data)
507{
508 struct hammer_cursor **spike = data;
509 int error;
510
511 hammer_ref(&rec->lock);
512 error = hammer_ip_sync_record(rec, spike);
513 hammer_rel_mem_record(rec);
514
515 if (error) {
516 error = -error;
517 if (error != -ENOSPC) {
518 kprintf("hammer_sync_inode_callback: sync failed rec "
519 "%p, error %d\n", rec, error);
520 }
521 }
522 return(error);
523}
524
525/*
526 * XXX error handling
527 */
528int
529hammer_sync_inode(hammer_inode_t ip, int waitfor, int handle_delete)
530{
531 struct hammer_transaction trans;
532 struct hammer_cursor *spike = NULL;
533 int error;
534
535 hammer_lock_ex(&ip->lock);
536 hammer_start_transaction(&trans, ip->hmp);
537
538 /*
539 * If the inode has been deleted (nlinks == 0), and the OS no longer
540 * has any references to it (handle_delete != 0), clean up in-memory
541 * data.
542 *
543 * NOTE: We do not set the RDIRTY flag when updating the delete_tid,
544 * setting HAMMER_INODE_DELETED takes care of it.
545 *
546 * NOTE: Because we may sync records within this new transaction,
547 * force the inode update later on to use our transaction id or
548 * the delete_tid of the inode may be less then the create_tid of
549 * the inode update. XXX shouldn't happen but don't take the chance.
550 *
551 * NOTE: The call to hammer_ip_delete_range() cannot return ENOSPC
552 * so we can pass a NULL spike structure, because no partial data
553 * deletion can occur (yet).
554 */
555 if (ip->ino_rec.ino_nlinks == 0 && handle_delete) {
556 if (ip->vp)
557 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
558 error = hammer_ip_delete_range_all(&trans, ip);
559 KKASSERT(RB_EMPTY(&ip->rec_tree));
560 ip->flags &= ~HAMMER_INODE_TID;
561 ip->ino_rec.base.base.delete_tid = trans.tid;
562 hammer_modify_inode(&trans, ip,
563 HAMMER_INODE_DELETED | HAMMER_INODE_TID);
564 hammer_modify_volume(ip->hmp->rootvol);
565 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
566 hammer_modify_volume_done(ip->hmp->rootvol);
567 }
568
569 /*
570 * Sync the buffer cache
571 */
572 if (ip->vp != NULL)
573 error = vfsync(ip->vp, waitfor, 1, NULL, NULL);
574 else
575 error = 0;
576
577 /*
578 * Now sync related records
579 */
580 for (;;) {
581 error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
582 hammer_sync_inode_callback, &spike);
583 KKASSERT(error <= 0);
584 if (error < 0)
585 error = -error;
586 if (error == ENOSPC) {
587 error = hammer_spike(&spike);
588 if (error == 0)
589 continue;
590 }
591 break;
592 }
593
594 /*
595 * Now update the inode's on-disk inode-data and/or on-disk record.
596 */
597 switch(ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK)) {
598 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
599 /*
600 * If deleted and on-disk, don't set any additional flags.
601 * the delete flag takes care of things.
602 */
603 break;
604 case HAMMER_INODE_DELETED:
605 /*
606 * Take care of the case where a deleted inode was never
607 * flushed to the disk in the first place.
608 */
609 ip->flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY);
610 while (RB_ROOT(&ip->rec_tree)) {
611 hammer_record_t rec = RB_ROOT(&ip->rec_tree);
612 hammer_ref(&rec->lock);
613 rec->flags |= HAMMER_RECF_DELETED;
614 hammer_rel_mem_record(rec);
615 }
616 break;
617 case HAMMER_INODE_ONDISK:
618 /*
619 * If already on-disk, do not set any additional flags.
620 */
621 break;
622 default:
623 /*
624 * If not on-disk and not deleted, set both dirty flags
625 * to force an initial record to be written.
626 */
627 ip->flags |= HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY;
628 break;
629 }
630
631 /*
632 * If RDIRTY or DDIRTY is set, write out a new record. If the
633 * inode is already on-disk, the old record is marked as deleted.
634 */
635 if (ip->flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
636 HAMMER_INODE_DELETED)) {
637 error = hammer_update_inode(ip);
638 }
639 hammer_commit_transaction(&trans);
640 hammer_unlock(&ip->lock);
641 return(error);
642}
643
644/*
645 * Access the filesystem buffer containing the cluster-relative byte
646 * offset, validate the buffer type, load *bufferp and return a
647 * pointer to the requested data. The buffer is reference and locked on
648 * return.
649 *
650 * If buf_type is 0 the buffer is assumed to be a pure-data buffer and
651 * no type or crc check is performed.
652 *
653 * If *bufferp is not NULL on entry it is assumed to contain a locked
654 * and referenced buffer which will then be replaced.
655 *
656 * If the caller is holding another unrelated buffer locked it must be
657 * passed in reorderbuf so we can properly order buffer locks.
658 *
659 * XXX add a flag for the buffer type and check the CRC here XXX
660 */
661void *
662hammer_bread(hammer_cluster_t cluster, int32_t cloff,
663 u_int64_t buf_type, int *errorp,
664 struct hammer_buffer **bufferp)
665{
666 hammer_buffer_t buffer;
667 int32_t buf_no;
668 int32_t buf_off;
669
670 /*
671 * Load the correct filesystem buffer, replacing *bufferp.
672 */
673 buf_no = cloff / HAMMER_BUFSIZE;
674 buffer = *bufferp;
675 if (buffer == NULL || buffer->cluster != cluster ||
676 buffer->buf_no != buf_no) {
677 if (buffer) {
678 /*hammer_unlock(&buffer->io.lock);*/
679 hammer_rel_buffer(buffer, 0);
680 }
681 buffer = hammer_get_buffer(cluster, buf_no, 0, errorp);
682 *bufferp = buffer;
683 if (buffer == NULL)
684 return(NULL);
685 /*hammer_lock_ex(&buffer->io.lock);*/
686 }
687
688 /*
689 * Validate the buffer type
690 */
691 buf_off = cloff & HAMMER_BUFMASK;
692 if (buf_type) {
693 if (buf_type != buffer->ondisk->head.buf_type) {
694 kprintf("BUFFER HEAD TYPE MISMATCH %llx %llx\n",
695 buf_type, buffer->ondisk->head.buf_type);
696 *errorp = EIO;
697 return(NULL);
698 }
699 if (buf_off < sizeof(buffer->ondisk->head)) {
700 kprintf("BUFFER OFFSET TOO LOW %d\n", buf_off);
701 *errorp = EIO;
702 return(NULL);
703 }
704 }
705
706 /*
707 * Return a pointer to the buffer data.
708 */
709 *errorp = 0;
710 return((char *)buffer->ondisk + buf_off);
711}
712