2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
61 #include <vfs/fifofs/fifo.h>
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
72 * Strategy code (async logical file buffer I/O from system)
74 * Except for the transaction init (which should normally not block),
75 * we essentially run the strategy operation asynchronously via a XOP.
77 * WARNING! The XOP deals with buffer synchronization. It is not synchronized
80 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
81 * calls but it has in the past when multiple flushes are queued.
83 * XXX We currently terminate the transaction once we get a quorum, otherwise
84 * the frontend can stall, but this can leave the remaining nodes with
85 * a potential flush conflict. We need to delay flushes on those nodes
86 * until running transactions complete separately from the normal
87 * transaction sequencing. FIXME TODO.
89 static int hammer2_strategy_read(struct vop_strategy_args *ap);
90 static int hammer2_strategy_write(struct vop_strategy_args *ap);
91 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
92 const char *data, struct bio *bio);
94 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
95 char **datap, int pblksize);
98 hammer2_vop_strategy(struct vop_strategy_args *ap)
109 error = hammer2_strategy_read(ap);
110 ++hammer2_iod_file_read;
113 error = hammer2_strategy_write(ap);
114 ++hammer2_iod_file_write;
117 bp->b_error = error = EINVAL;
118 bp->b_flags |= B_ERROR;
126 * Return the largest contiguous physical disk range for the logical
129 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
131 * Basically disabled, the logical buffer write thread has to deal with
132 * buffers one-at-a-time. Note that this should not prevent cluster_read()
133 * from reading-ahead, it simply prevents it from trying form a single
134 * cluster buffer for the logical request. H2 already uses 64KB buffers!
137 hammer2_vop_bmap(struct vop_bmap_args *ap)
139 *ap->a_doffsetp = NOOFFSET;
147 /****************************************************************************
149 ****************************************************************************/
151 * Callback used in read path in case that a block is compressed with LZ4.
155 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
158 char *compressed_buffer;
165 if bio->bio_caller_info2.index &&
166 bio->bio_caller_info1.uvalue32 !=
167 crc32(bp->b_data, bp->b_bufsize) --- return error
170 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
171 compressed_size = *(const int *)data;
172 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
174 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
175 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
180 kprintf("READ PATH: Error during decompression."
182 (intmax_t)bio->bio_offset, bytes);
183 /* make sure it isn't random garbage */
184 bzero(compressed_buffer, bp->b_bufsize);
186 KKASSERT(result <= bp->b_bufsize);
187 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
188 if (result < bp->b_bufsize)
189 bzero(bp->b_data + result, bp->b_bufsize - result);
190 objcache_put(cache_buffer_read, compressed_buffer);
192 bp->b_flags |= B_AGE;
196 * Callback used in read path in case that a block is compressed with ZLIB.
197 * It is almost identical to LZ4 callback, so in theory they can be unified,
198 * but we didn't want to make changes in bio structure for that.
202 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
205 char *compressed_buffer;
206 z_stream strm_decompress;
212 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
213 strm_decompress.avail_in = 0;
214 strm_decompress.next_in = Z_NULL;
216 ret = inflateInit(&strm_decompress);
219 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
221 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
222 strm_decompress.next_in = __DECONST(char *, data);
224 /* XXX supply proper size, subset of device bp */
225 strm_decompress.avail_in = bytes;
226 strm_decompress.next_out = compressed_buffer;
227 strm_decompress.avail_out = bp->b_bufsize;
229 ret = inflate(&strm_decompress, Z_FINISH);
230 if (ret != Z_STREAM_END) {
231 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
232 bzero(compressed_buffer, bp->b_bufsize);
234 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
235 result = bp->b_bufsize - strm_decompress.avail_out;
236 if (result < bp->b_bufsize)
237 bzero(bp->b_data + result, strm_decompress.avail_out);
238 objcache_put(cache_buffer_read, compressed_buffer);
239 ret = inflateEnd(&strm_decompress);
242 bp->b_flags |= B_AGE;
246 * Logical buffer I/O, async read.
250 hammer2_strategy_read(struct vop_strategy_args *ap)
252 hammer2_xop_strategy_t *xop;
262 nbio = push_bio(bio);
264 lbase = bio->bio_offset;
265 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
267 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
271 hammer2_mtx_init(&xop->lock, "h2bior");
272 hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
273 /* asynchronous completion */
279 * Per-node XOP (threaded), do a synchronous lookup of the chain and
280 * its data. The frontend is asynchronous, so we are also responsible
281 * for racing to terminate the frontend.
284 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
286 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
287 hammer2_chain_t *parent;
288 hammer2_chain_t *chain;
289 hammer2_chain_t *focus;
290 hammer2_key_t key_dummy;
298 * Note that we can race completion of the bio supplied by
299 * the front-end so we cannot access it until we determine
300 * that we are the ones finishing it up.
305 * This is difficult to optimize. The logical buffer might be
306 * partially dirty (contain dummy zero-fill pages), which would
307 * mess up our crc calculation if we were to try a direct read.
308 * So for now we always double-buffer through the underlying
311 * If not for the above problem we could conditionalize on
312 * (1) 64KB buffer, (2) one chain (not multi-master) and
313 * (3) !hammer2_double_buffer, and issue a direct read into the
316 parent = hammer2_inode_chain(xop->head.ip1, clindex,
317 HAMMER2_RESOLVE_ALWAYS |
318 HAMMER2_RESOLVE_SHARED);
320 chain = hammer2_chain_lookup(&parent, &key_dummy,
323 HAMMER2_LOOKUP_ALWAYS |
324 HAMMER2_LOOKUP_SHARED);
326 error = chain->error;
328 error = HAMMER2_ERROR_EIO;
331 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
333 hammer2_chain_unlock(chain);
334 hammer2_chain_drop(chain);
337 hammer2_chain_unlock(parent);
338 hammer2_chain_drop(parent);
340 chain = NULL; /* safety */
341 parent = NULL; /* safety */
344 * Race to finish the frontend. First-to-complete. bio is only
345 * valid if we are determined to be the ones able to complete
350 hammer2_mtx_ex(&xop->lock);
352 hammer2_mtx_unlock(&xop->lock);
360 * Async operation has not completed and we now own the lock.
361 * Determine if we can complete the operation by issuing the
362 * frontend collection non-blocking.
364 * H2 double-buffers the data, setting B_NOTMETA on the logical
365 * buffer hints to the OS that the logical buffer should not be
366 * swapcached (since the device buffer can be).
368 * Also note that even for compressed data we would rather the
369 * kernel cache/swapcache device buffers more and (decompressed)
370 * logical buffers less, since that will significantly improve
371 * the amount of end-user data that can be cached.
373 * NOTE: The chain->data for xop->head.cluster.focus will be
374 * synchronized to the current cpu by xop_collect(),
375 * but other chains in the cluster might not be.
377 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
382 hammer2_mtx_unlock(&xop->lock);
383 bp->b_flags |= B_NOTMETA;
384 focus = xop->head.cluster.focus;
385 data = hammer2_xop_gdata(&xop->head)->buf;
386 hammer2_strategy_read_completion(focus, data, xop->bio);
387 hammer2_xop_pdata(&xop->head);
389 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
391 case HAMMER2_ERROR_ENOENT:
393 hammer2_mtx_unlock(&xop->lock);
394 bp->b_flags |= B_NOTMETA;
397 bzero(bp->b_data, bp->b_bcount);
399 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
401 case HAMMER2_ERROR_EINPROGRESS:
402 hammer2_mtx_unlock(&xop->lock);
405 kprintf("xop_strategy_read: error %08x loff=%016jx\n",
406 error, bp->b_loffset);
408 hammer2_mtx_unlock(&xop->lock);
409 bp->b_flags |= B_ERROR;
412 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
419 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
422 struct buf *bp = bio->bio_buf;
424 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
426 * Copy from in-memory inode structure.
428 bcopy(((const hammer2_inode_data_t *)data)->u.data,
429 bp->b_data, HAMMER2_EMBEDDED_BYTES);
430 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
431 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
434 } else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
436 * Data is on-media, record for live dedup. Release the
437 * chain (try to free it) when done. The data is still
438 * cached by both the buffer cache in front and the
439 * block device behind us. This leaves more room in the
440 * LRU chain cache for meta-data chains which we really
443 * NOTE: Deduplication cannot be safely recorded for
444 * records without a check code.
446 hammer2_dedup_record(focus, NULL, data);
447 atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
450 * Decompression and copy.
452 switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
453 case HAMMER2_COMP_LZ4:
454 hammer2_decompress_LZ4_callback(data, focus->bytes,
456 /* b_resid set by call */
458 case HAMMER2_COMP_ZLIB:
459 hammer2_decompress_ZLIB_callback(data, focus->bytes,
461 /* b_resid set by call */
463 case HAMMER2_COMP_NONE:
464 KKASSERT(focus->bytes <= bp->b_bcount);
465 bcopy(data, bp->b_data, focus->bytes);
466 if (focus->bytes < bp->b_bcount) {
467 bzero(bp->b_data + focus->bytes,
468 bp->b_bcount - focus->bytes);
474 panic("hammer2_strategy_read: "
475 "unknown compression type");
478 panic("hammer2_strategy_read: unknown bref type");
482 /****************************************************************************
484 ****************************************************************************/
487 * Functions for compression in threads,
488 * from hammer2_vnops.c
490 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
491 hammer2_chain_t **parentp,
492 hammer2_key_t lbase, int ioflag, int pblksize,
493 hammer2_tid_t mtid, int *errorp);
494 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
495 hammer2_chain_t **parentp,
496 hammer2_key_t lbase, int ioflag, int pblksize,
497 hammer2_tid_t mtid, int *errorp,
498 int comp_algo, int check_algo);
499 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
500 hammer2_chain_t **parentp,
501 hammer2_key_t lbase, int ioflag, int pblksize,
502 hammer2_tid_t mtid, int *errorp,
504 static int test_block_zeros(const char *buf, size_t bytes);
505 static void zero_write(char *data, hammer2_inode_t *ip,
506 hammer2_chain_t **parentp,
508 hammer2_tid_t mtid, int *errorp);
509 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
510 int ioflag, int pblksize,
511 hammer2_tid_t mtid, int *errorp,
515 hammer2_strategy_write(struct vop_strategy_args *ap)
517 hammer2_xop_strategy_t *xop;
528 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
529 hammer2_lwinprog_ref(pmp);
530 hammer2_trans_assert_strategy(pmp);
531 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
533 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
534 HAMMER2_XOP_STRATEGY);
537 xop->lbase = bio->bio_offset;
538 hammer2_mtx_init(&xop->lock, "h2biow");
539 hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
540 /* asynchronous completion */
542 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
548 * Per-node XOP (threaded). Write the logical buffer to the media.
550 * This is a bit problematic because there may be multiple target and
551 * any of them may be able to release the bp. In addition, if our
552 * particulr target is offline we don't want to block the bp (and thus
553 * the frontend). To accomplish this we copy the data to the per-thr
557 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
559 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
560 hammer2_chain_t *parent;
568 hammer2_off_t bio_offset;
572 * We can only access the bp/bio if the frontend has not yet
577 hammer2_mtx_sh(&xop->lock);
579 hammer2_mtx_unlock(&xop->lock);
584 bio = xop->bio; /* ephermal */
585 bp = bio->bio_buf; /* ephermal */
586 ip = xop->head.ip1; /* retained by ref */
587 bio_offset = bio->bio_offset;
590 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
592 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
593 pblksize = hammer2_calc_physical(ip, lbase);
595 KKASSERT(lblksize <= MAXPHYS);
596 bcopy(bp->b_data, bio_data, lblksize);
598 hammer2_mtx_unlock(&xop->lock);
599 bp = NULL; /* safety, illegal to access after unlock */
600 bio = NULL; /* safety, illegal to access after unlock */
605 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
606 hammer2_write_file_core(bio_data, ip, &parent,
607 lbase, IO_ASYNC, pblksize,
608 xop->head.mtid, &error);
610 hammer2_chain_unlock(parent);
611 hammer2_chain_drop(parent);
612 parent = NULL; /* safety */
614 hammer2_xop_feed(&xop->head, NULL, clindex, error);
617 * Try to complete the operation on behalf of the front-end.
621 hammer2_mtx_ex(&xop->lock);
623 hammer2_mtx_unlock(&xop->lock);
628 * Async operation has not completed and we now own the lock.
629 * Determine if we can complete the operation by issuing the
630 * frontend collection non-blocking.
632 * H2 double-buffers the data, setting B_NOTMETA on the logical
633 * buffer hints to the OS that the logical buffer should not be
634 * swapcached (since the device buffer can be).
636 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
638 if (error == HAMMER2_ERROR_EINPROGRESS) {
639 hammer2_mtx_unlock(&xop->lock);
644 * Async operation has completed.
647 hammer2_mtx_unlock(&xop->lock);
649 bio = xop->bio; /* now owned by us */
650 bp = bio->bio_buf; /* now owned by us */
652 if (error == HAMMER2_ERROR_ENOENT || error == 0) {
653 bp->b_flags |= B_NOTMETA;
658 kprintf("xop_strategy_write: error %d loff=%016jx\n",
659 error, bp->b_loffset);
660 bp->b_flags |= B_ERROR;
664 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
665 hammer2_trans_assert_strategy(ip->pmp);
666 hammer2_lwinprog_drop(ip->pmp);
667 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE);
671 * Wait for pending I/O to complete
674 hammer2_bioq_sync(hammer2_pfs_t *pmp)
676 hammer2_lwinprog_wait(pmp, 0);
680 * Assign physical storage at (cparent, lbase), returning a suitable chain
681 * and setting *errorp appropriately.
683 * If no error occurs, the returned chain will be in a modified state.
685 * If an error occurs, the returned chain may or may not be NULL. If
686 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
687 * So the caller only needs to test *errorp.
689 * cparent can wind up being anything.
691 * If datap is not NULL, *datap points to the real data we intend to write.
692 * If we can dedup the storage location we set *datap to NULL to indicate
693 * to the caller that a dedup occurred.
695 * NOTE: Special case for data embedded in inode.
699 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
700 hammer2_key_t lbase, int pblksize,
701 hammer2_tid_t mtid, char **datap, int *errorp)
703 hammer2_chain_t *chain;
704 hammer2_key_t key_dummy;
705 hammer2_off_t dedup_off;
706 int pradix = hammer2_getradix(pblksize);
709 * Locate the chain associated with lbase, return a locked chain.
710 * However, do not instantiate any data reference (which utilizes a
711 * device buffer) because we will be using direct IO via the
712 * logical buffer cache buffer.
714 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
716 chain = hammer2_chain_lookup(parentp, &key_dummy,
719 HAMMER2_LOOKUP_NODATA);
722 * The lookup code should not return a DELETED chain to us, unless
723 * its a short-file embedded in the inode. Then it is possible for
724 * the lookup to return a deleted inode.
726 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
727 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
728 kprintf("assign physical deleted chain @ "
729 "%016jx (%016jx.%02x) ip %016jx\n",
730 lbase, chain->bref.data_off, chain->bref.type,
737 * We found a hole, create a new chain entry.
739 * NOTE: DATA chains are created without device backing
740 * store (nor do we want any).
742 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
744 *errorp |= hammer2_chain_create(parentp, &chain, NULL, ip->pmp,
745 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
746 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
747 lbase, HAMMER2_PBUFRADIX,
748 HAMMER2_BREF_TYPE_DATA,
753 /*ip->delta_dcount += pblksize;*/
754 } else if (chain->error == 0) {
755 switch (chain->bref.type) {
756 case HAMMER2_BREF_TYPE_INODE:
758 * The data is embedded in the inode, which requires
761 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
763 case HAMMER2_BREF_TYPE_DATA:
764 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
766 if (chain->bytes != pblksize) {
767 *errorp |= hammer2_chain_resize(chain,
770 HAMMER2_MODIFY_OPTDATA);
776 * DATA buffers must be marked modified whether the
777 * data is in a logical buffer or not. We also have
778 * to make this call to fixup the chain data pointers
779 * after resizing in case this is an encrypted or
782 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
783 HAMMER2_MODIFY_OPTDATA);
786 panic("hammer2_assign_physical: bad type");
791 *errorp = chain->error;
793 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
799 * hammer2_write_file_core() - hammer2_write_thread() helper
801 * The core write function which determines which path to take
802 * depending on compression settings. We also have to locate the
803 * related chains so we can calculate and set the check data for
808 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
809 hammer2_chain_t **parentp,
810 hammer2_key_t lbase, int ioflag, int pblksize,
811 hammer2_tid_t mtid, int *errorp)
813 hammer2_chain_t *chain;
818 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
819 case HAMMER2_COMP_NONE:
821 * We have to assign physical storage to the buffer
822 * we intend to dirty or write now to avoid deadlocks
823 * in the strategy code later.
825 * This can return NOOFFSET for inode-embedded data.
826 * The strategy code will take care of it in that case.
829 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
830 mtid, &bdata, errorp);
832 /* skip modifications */
833 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
834 hammer2_inode_data_t *wipdata;
836 wipdata = &chain->data->ipdata;
837 KKASSERT(wipdata->meta.op_flags &
838 HAMMER2_OPFLAG_DIRECTDATA);
839 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
840 ++hammer2_iod_file_wembed;
841 } else if (bdata == NULL) {
843 * Copy of data already present on-media.
845 chain->bref.methods =
846 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
847 HAMMER2_ENC_CHECK(ip->meta.check_algo);
848 hammer2_chain_setcheck(chain, data);
850 hammer2_write_bp(chain, data, ioflag, pblksize,
851 mtid, errorp, ip->meta.check_algo);
854 hammer2_chain_unlock(chain);
855 hammer2_chain_drop(chain);
858 case HAMMER2_COMP_AUTOZERO:
860 * Check for zero-fill only
862 hammer2_zero_check_and_write(data, ip, parentp,
863 lbase, ioflag, pblksize,
865 ip->meta.check_algo);
867 case HAMMER2_COMP_LZ4:
868 case HAMMER2_COMP_ZLIB:
871 * Check for zero-fill and attempt compression.
873 hammer2_compress_and_write(data, ip, parentp,
874 lbase, ioflag, pblksize,
877 ip->meta.check_algo);
885 * Generic function that will perform the compression in compression
886 * write path. The compression algorithm is determined by the settings
887 * obtained from inode.
891 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
892 hammer2_chain_t **parentp,
893 hammer2_key_t lbase, int ioflag, int pblksize,
894 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
896 hammer2_chain_t *chain;
903 * An all-zeros write creates a hole unless the check code
904 * is disabled. When the check code is disabled all writes
905 * are done in-place, including any all-zeros writes.
907 * NOTE: A snapshot will still force a copy-on-write
908 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
910 if (check_algo != HAMMER2_CHECK_NONE &&
911 test_block_zeros(data, pblksize)) {
912 zero_write(data, ip, parentp, lbase, mtid, errorp);
917 * Compression requested. Try to compress the block. We store
918 * the data normally if we cannot sufficiently compress it.
920 * We have a heuristic to detect files which are mostly
921 * uncompressable and avoid the compression attempt in that
922 * case. If the compression heuristic is turned off, we always
928 KKASSERT(pblksize / 2 <= 32768);
930 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
931 hammer2_always_compress) {
932 z_stream strm_compress;
936 switch(HAMMER2_DEC_ALGO(comp_algo)) {
937 case HAMMER2_COMP_LZ4:
939 * We need to prefix with the size, LZ4
940 * doesn't do it for us. Add the related
943 * NOTE: The LZ4 code seems to assume at least an
944 * 8-byte buffer size granularity and may
945 * overrun the buffer if given a 4-byte
948 comp_buffer = objcache_get(cache_buffer_write,
950 comp_size = LZ4_compress_limitedOutput(
952 &comp_buffer[sizeof(int)],
954 pblksize / 2 - sizeof(int64_t));
955 *(int *)comp_buffer = comp_size;
957 comp_size += sizeof(int);
959 case HAMMER2_COMP_ZLIB:
960 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
962 comp_level = 6; /* default zlib compression */
963 else if (comp_level < 6)
965 else if (comp_level > 9)
967 ret = deflateInit(&strm_compress, comp_level);
969 kprintf("HAMMER2 ZLIB: fatal error "
970 "on deflateInit.\n");
973 comp_buffer = objcache_get(cache_buffer_write,
975 strm_compress.next_in = data;
976 strm_compress.avail_in = pblksize;
977 strm_compress.next_out = comp_buffer;
978 strm_compress.avail_out = pblksize / 2;
979 ret = deflate(&strm_compress, Z_FINISH);
980 if (ret == Z_STREAM_END) {
981 comp_size = pblksize / 2 -
982 strm_compress.avail_out;
986 ret = deflateEnd(&strm_compress);
989 kprintf("Error: Unknown compression method.\n");
990 kprintf("Comp_method = %d.\n", comp_algo);
995 if (comp_size == 0) {
997 * compression failed or turned off
999 comp_block_size = pblksize; /* safety */
1000 if (++ip->comp_heuristic > 128)
1001 ip->comp_heuristic = 8;
1004 * compression succeeded
1006 ip->comp_heuristic = 0;
1007 if (comp_size <= 1024) {
1008 comp_block_size = 1024;
1009 } else if (comp_size <= 2048) {
1010 comp_block_size = 2048;
1011 } else if (comp_size <= 4096) {
1012 comp_block_size = 4096;
1013 } else if (comp_size <= 8192) {
1014 comp_block_size = 8192;
1015 } else if (comp_size <= 16384) {
1016 comp_block_size = 16384;
1017 } else if (comp_size <= 32768) {
1018 comp_block_size = 32768;
1020 panic("hammer2: WRITE PATH: "
1021 "Weird comp_size value.");
1023 comp_block_size = pblksize;
1027 * Must zero the remainder or dedup (which operates on a
1028 * physical block basis) will not find matches.
1030 if (comp_size < comp_block_size) {
1031 bzero(comp_buffer + comp_size,
1032 comp_block_size - comp_size);
1037 * Assign physical storage, data will be set to NULL if a live-dedup
1040 bdata = comp_size ? comp_buffer : data;
1041 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1042 mtid, &bdata, errorp);
1048 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1049 hammer2_inode_data_t *wipdata;
1051 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1053 wipdata = &chain->data->ipdata;
1054 KKASSERT(wipdata->meta.op_flags &
1055 HAMMER2_OPFLAG_DIRECTDATA);
1056 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1057 ++hammer2_iod_file_wembed;
1059 } else if (bdata == NULL) {
1061 * Live deduplication, a copy of the data is already present
1065 chain->bref.methods =
1066 HAMMER2_ENC_COMP(comp_algo) +
1067 HAMMER2_ENC_CHECK(check_algo);
1069 chain->bref.methods =
1071 HAMMER2_COMP_NONE) +
1072 HAMMER2_ENC_CHECK(check_algo);
1074 bdata = comp_size ? comp_buffer : data;
1075 hammer2_chain_setcheck(chain, bdata);
1076 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1080 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1082 switch(chain->bref.type) {
1083 case HAMMER2_BREF_TYPE_INODE:
1084 panic("hammer2_write_bp: unexpected inode\n");
1086 case HAMMER2_BREF_TYPE_DATA:
1088 * Optimize out the read-before-write
1091 *errorp = hammer2_io_newnz(chain->hmp,
1093 chain->bref.data_off,
1097 hammer2_io_brelse(&dio);
1098 kprintf("hammer2: WRITE PATH: "
1099 "dbp bread error\n");
1102 bdata = hammer2_io_data(dio, chain->bref.data_off);
1105 * When loading the block make sure we don't
1106 * leave garbage after the compressed data.
1109 chain->bref.methods =
1110 HAMMER2_ENC_COMP(comp_algo) +
1111 HAMMER2_ENC_CHECK(check_algo);
1112 bcopy(comp_buffer, bdata, comp_size);
1114 chain->bref.methods =
1116 HAMMER2_COMP_NONE) +
1117 HAMMER2_ENC_CHECK(check_algo);
1118 bcopy(data, bdata, pblksize);
1122 * The flush code doesn't calculate check codes for
1123 * file data (doing so can result in excessive I/O),
1126 hammer2_chain_setcheck(chain, bdata);
1129 * Device buffer is now valid, chain is no longer in
1130 * the initial state.
1132 * (No blockref table worries with file data)
1134 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1135 hammer2_dedup_record(chain, dio, bdata);
1137 /* Now write the related bdp. */
1138 if (ioflag & IO_SYNC) {
1140 * Synchronous I/O requested.
1142 hammer2_io_bwrite(&dio);
1144 } else if ((ioflag & IO_DIRECT) &&
1145 loff + n == pblksize) {
1146 hammer2_io_bdwrite(&dio);
1148 } else if (ioflag & IO_ASYNC) {
1149 hammer2_io_bawrite(&dio);
1151 hammer2_io_bdwrite(&dio);
1155 panic("hammer2_write_bp: bad chain type %d\n",
1163 hammer2_chain_unlock(chain);
1164 hammer2_chain_drop(chain);
1167 objcache_put(cache_buffer_write, comp_buffer);
1173 * Function that performs zero-checking and writing without compression,
1174 * it corresponds to default zero-checking path.
1178 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1179 hammer2_chain_t **parentp,
1180 hammer2_key_t lbase, int ioflag, int pblksize,
1181 hammer2_tid_t mtid, int *errorp,
1184 hammer2_chain_t *chain;
1187 if (check_algo != HAMMER2_CHECK_NONE &&
1188 test_block_zeros(data, pblksize)) {
1190 * An all-zeros write creates a hole unless the check code
1191 * is disabled. When the check code is disabled all writes
1192 * are done in-place, including any all-zeros writes.
1194 * NOTE: A snapshot will still force a copy-on-write
1195 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1197 zero_write(data, ip, parentp, lbase, mtid, errorp);
1203 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1204 mtid, &bdata, errorp);
1208 hammer2_write_bp(chain, data, ioflag, pblksize,
1209 mtid, errorp, check_algo);
1211 /* dedup occurred */
1212 chain->bref.methods =
1213 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1214 HAMMER2_ENC_CHECK(check_algo);
1215 hammer2_chain_setcheck(chain, data);
1218 hammer2_chain_unlock(chain);
1219 hammer2_chain_drop(chain);
1227 * A function to test whether a block of data contains only zeros,
1228 * returns TRUE (non-zero) if the block is all zeros.
1232 test_block_zeros(const char *buf, size_t bytes)
1236 for (i = 0; i < bytes; i += sizeof(long)) {
1237 if (*(const long *)(buf + i) != 0)
1246 * Function to "write" a block that contains only zeros.
1250 zero_write(char *data, hammer2_inode_t *ip,
1251 hammer2_chain_t **parentp,
1252 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1254 hammer2_chain_t *chain;
1255 hammer2_key_t key_dummy;
1257 chain = hammer2_chain_lookup(parentp, &key_dummy,
1260 HAMMER2_LOOKUP_NODATA);
1262 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1263 hammer2_inode_data_t *wipdata;
1266 *errorp = hammer2_chain_modify_ip(ip, chain,
1270 wipdata = &chain->data->ipdata;
1271 KKASSERT(wipdata->meta.op_flags &
1272 HAMMER2_OPFLAG_DIRECTDATA);
1273 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1274 ++hammer2_iod_file_wembed;
1277 /* chain->error ok for deletion */
1278 hammer2_chain_delete(*parentp, chain,
1279 mtid, HAMMER2_DELETE_PERMANENT);
1280 ++hammer2_iod_file_wzero;
1282 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1283 hammer2_chain_unlock(chain);
1284 hammer2_chain_drop(chain);
1286 ++hammer2_iod_file_wzero;
1293 * Function to write the data as it is, without performing any sort of
1294 * compression. This function is used in path without compression and
1295 * default zero-checking path.
1299 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1301 hammer2_tid_t mtid, int *errorp, int check_algo)
1303 hammer2_inode_data_t *wipdata;
1308 error = 0; /* XXX TODO below */
1310 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1312 switch(chain->bref.type) {
1313 case HAMMER2_BREF_TYPE_INODE:
1314 wipdata = &chain->data->ipdata;
1315 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1316 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1318 ++hammer2_iod_file_wembed;
1320 case HAMMER2_BREF_TYPE_DATA:
1321 error = hammer2_io_newnz(chain->hmp,
1323 chain->bref.data_off,
1324 chain->bytes, &dio);
1326 hammer2_io_bqrelse(&dio);
1327 kprintf("hammer2: WRITE PATH: "
1328 "dbp bread error\n");
1331 bdata = hammer2_io_data(dio, chain->bref.data_off);
1333 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1334 HAMMER2_ENC_CHECK(check_algo);
1335 bcopy(data, bdata, chain->bytes);
1338 * The flush code doesn't calculate check codes for
1339 * file data (doing so can result in excessive I/O),
1342 hammer2_chain_setcheck(chain, bdata);
1345 * Device buffer is now valid, chain is no longer in
1346 * the initial state.
1348 * (No blockref table worries with file data)
1350 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1351 hammer2_dedup_record(chain, dio, bdata);
1353 if (ioflag & IO_SYNC) {
1355 * Synchronous I/O requested.
1357 hammer2_io_bwrite(&dio);
1359 } else if ((ioflag & IO_DIRECT) &&
1360 loff + n == pblksize) {
1361 hammer2_io_bdwrite(&dio);
1363 } else if (ioflag & IO_ASYNC) {
1364 hammer2_io_bawrite(&dio);
1366 hammer2_io_bdwrite(&dio);
1370 panic("hammer2_write_bp: bad chain type %d\n",
1380 * LIVE DEDUP HEURISTICS
1382 * Record media and crc information for possible dedup operation. Note
1383 * that the dedup mask bits must also be set in the related DIO for a dedup
1384 * to be fully validated (which is handled in the freemap allocation code).
1386 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1387 * All fields must be loaded into locals and validated.
1389 * WARNING! Should only be used for file data and directory entries,
1390 * hammer2_chain_modify() only checks for the dedup case on data
1391 * chains. Also, dedup data can only be recorded for committed
1392 * chains (so NOT strategy writes which can undergo further
1393 * modification after the fact!).
1396 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1400 hammer2_dedup_t *dedup;
1408 * We can only record a dedup if we have media data to test against.
1409 * If dedup is not enabled, return early, which allows a chain to
1410 * remain marked MODIFIED (which might have benefits in special
1411 * situations, though typically it does not).
1413 if (hammer2_dedup_enable == 0)
1423 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1424 case HAMMER2_CHECK_ISCSI32:
1426 * XXX use the built-in crc (the dedup lookup sequencing
1427 * needs to be fixed so the check code is already present
1428 * when dedup_lookup is called)
1431 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1433 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1435 case HAMMER2_CHECK_XXHASH64:
1436 crc = chain->bref.check.xxhash64.value;
1438 case HAMMER2_CHECK_SHA192:
1440 * XXX use the built-in crc (the dedup lookup sequencing
1441 * needs to be fixed so the check code is already present
1442 * when dedup_lookup is called)
1445 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1446 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1447 ((uint64_t *)chain->bref.check.sha192.data)[2];
1449 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1453 * Cannot dedup without a check code
1455 * NOTE: In particular, CHECK_NONE allows a sector to be
1456 * overwritten without copy-on-write, recording
1457 * a dedup block for a CHECK_NONE object would be
1463 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1465 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1466 for (i = 0; i < 4; ++i) {
1467 if (dedup[i].data_crc == crc) {
1471 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1472 if (dticks < 0 || dticks > hz * 60 * 30)
1476 if (hammer2_debug & 0x40000) {
1477 kprintf("REC %04x %016jx %016jx\n",
1478 (int)(dedup - hmp->heur_dedup),
1480 chain->bref.data_off);
1482 dedup->ticks = ticks;
1483 dedup->data_off = chain->bref.data_off;
1484 dedup->data_crc = crc;
1487 * Set the valid bits for the dedup only after we know the data
1488 * buffer has been updated. The alloc bits were set (and the valid
1489 * bits cleared) when the media was allocated.
1491 * This is done in two stages becuase the bulkfree code can race
1492 * the gap between allocation and data population. Both masks must
1493 * be set before a bcmp/dedup operation is able to use the block.
1495 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1496 atomic_set_64(&dio->dedup_valid, mask);
1500 * XXX removed. MODIFIED is an integral part of the flush code,
1501 * lets not just clear it
1504 * Once we record the dedup the chain must be marked clean to
1505 * prevent reuse of the underlying block. Remember that this
1506 * write occurs when the buffer cache is flushed (i.e. on sync(),
1507 * fsync(), filesystem periodic sync, or when the kernel needs to
1508 * flush a buffer), and not whenever the user write()s.
1510 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1511 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1512 atomic_add_long(&hammer2_count_modified_chains, -1);
1514 hammer2_pfs_memory_wakeup(chain->pmp);
1521 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1523 hammer2_dedup_t *dedup;
1532 if (hammer2_dedup_enable == 0)
1539 * XXX use the built-in crc (the dedup lookup sequencing
1540 * needs to be fixed so the check code is already present
1541 * when dedup_lookup is called)
1543 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1544 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1546 if (hammer2_debug & 0x40000) {
1547 kprintf("LOC %04x/4 %016jx\n",
1548 (int)(dedup - hmp->heur_dedup),
1552 for (i = 0; i < 4; ++i) {
1553 off = dedup[i].data_off;
1555 if (dedup[i].data_crc != crc)
1557 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1559 dio = hammer2_io_getquick(hmp, off, pblksize);
1561 dtmp = hammer2_io_data(dio, off),
1562 mask = hammer2_dedup_mask(dio, off, pblksize);
1563 if ((dio->dedup_alloc & mask) == mask &&
1564 (dio->dedup_valid & mask) == mask &&
1565 bcmp(data, dtmp, pblksize) == 0) {
1566 if (hammer2_debug & 0x40000) {
1567 kprintf("DEDUP SUCCESS %016jx\n",
1570 hammer2_io_putblk(&dio);
1572 dedup[i].ticks = ticks; /* update use */
1573 atomic_add_long(&hammer2_iod_file_wdedup,
1576 return off; /* RETURN */
1578 hammer2_io_putblk(&dio);
1585 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1586 * before or while we are clearing it they will also recover the freemap
1587 * entry (set it to fully allocated), so a bulkfree race can only set it
1588 * to a possibly-free state.
1590 * XXX ok, well, not really sure races are ok but going to run with it
1594 hammer2_dedup_clear(hammer2_dev_t *hmp)
1598 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1599 hmp->heur_dedup[i].data_off = 0;
1600 hmp->heur_dedup[i].ticks = ticks - 1;