2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
61 #include <vfs/fifofs/fifo.h>
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
72 * Strategy code (async logical file buffer I/O from system)
74 * WARNING: The strategy code cannot safely use hammer2 transactions
75 * as this can deadlock against vfs_sync's vfsync() call
76 * if multiple flushes are queued. All H2 structures must
77 * already be present and ready for the DIO.
79 * Reads can be initiated asynchronously, writes have to be
80 * spooled to a separate thread for action to avoid deadlocks.
82 static void hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex);
83 static void hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex);
84 static int hammer2_strategy_read(struct vop_strategy_args *ap);
85 static int hammer2_strategy_write(struct vop_strategy_args *ap);
86 static void hammer2_strategy_read_completion(hammer2_chain_t *chain,
87 char *data, struct bio *bio);
89 static void hammer2_dedup_record(hammer2_chain_t *chain, char *data);
90 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
91 char **datap, int pblksize);
97 #define TIMER(which) do { \
99 h2timer[h2lid] += (int)(ticks - h2last);\
105 hammer2_vop_strategy(struct vop_strategy_args *ap)
116 error = hammer2_strategy_read(ap);
117 ++hammer2_iod_file_read;
120 error = hammer2_strategy_write(ap);
121 ++hammer2_iod_file_write;
124 bp->b_error = error = EINVAL;
125 bp->b_flags |= B_ERROR;
133 * Return the largest contiguous physical disk range for the logical
136 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
138 * Basically disabled, the logical buffer write thread has to deal with
139 * buffers one-at-a-time. Note that this should not prevent cluster_read()
140 * from reading-ahead, it simply prevents it from trying form a single
141 * cluster buffer for the logical request. H2 already uses 64KB buffers!
144 hammer2_vop_bmap(struct vop_bmap_args *ap)
146 *ap->a_doffsetp = NOOFFSET;
154 /****************************************************************************
156 ****************************************************************************/
158 * Callback used in read path in case that a block is compressed with LZ4.
162 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
165 char *compressed_buffer;
172 if bio->bio_caller_info2.index &&
173 bio->bio_caller_info1.uvalue32 !=
174 crc32(bp->b_data, bp->b_bufsize) --- return error
177 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
178 compressed_size = *(const int *)data;
179 KKASSERT(compressed_size <= bytes - sizeof(int));
181 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
182 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
187 kprintf("READ PATH: Error during decompression."
189 (intmax_t)bio->bio_offset, bytes);
190 /* make sure it isn't random garbage */
191 bzero(compressed_buffer, bp->b_bufsize);
193 KKASSERT(result <= bp->b_bufsize);
194 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
195 if (result < bp->b_bufsize)
196 bzero(bp->b_data + result, bp->b_bufsize - result);
197 objcache_put(cache_buffer_read, compressed_buffer);
199 bp->b_flags |= B_AGE;
203 * Callback used in read path in case that a block is compressed with ZLIB.
204 * It is almost identical to LZ4 callback, so in theory they can be unified,
205 * but we didn't want to make changes in bio structure for that.
209 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
212 char *compressed_buffer;
213 z_stream strm_decompress;
219 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
220 strm_decompress.avail_in = 0;
221 strm_decompress.next_in = Z_NULL;
223 ret = inflateInit(&strm_decompress);
226 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
228 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
229 strm_decompress.next_in = __DECONST(char *, data);
231 /* XXX supply proper size, subset of device bp */
232 strm_decompress.avail_in = bytes;
233 strm_decompress.next_out = compressed_buffer;
234 strm_decompress.avail_out = bp->b_bufsize;
236 ret = inflate(&strm_decompress, Z_FINISH);
237 if (ret != Z_STREAM_END) {
238 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
239 bzero(compressed_buffer, bp->b_bufsize);
241 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
242 result = bp->b_bufsize - strm_decompress.avail_out;
243 if (result < bp->b_bufsize)
244 bzero(bp->b_data + result, strm_decompress.avail_out);
245 objcache_put(cache_buffer_read, compressed_buffer);
246 ret = inflateEnd(&strm_decompress);
249 bp->b_flags |= B_AGE;
253 * Logical buffer I/O, async read.
257 hammer2_strategy_read(struct vop_strategy_args *ap)
259 hammer2_xop_strategy_t *xop;
269 nbio = push_bio(bio);
271 lbase = bio->bio_offset;
272 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
274 if (bp->b_bio1.bio_flags & BIO_SYNC) {
275 xop = hammer2_xop_alloc(ip, 0);
277 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_ITERATOR);
282 hammer2_mtx_init(&xop->lock, "h2bior");
283 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read);
284 /* asynchronous completion */
290 * Per-node XOP (threaded), do a synchronous lookup of the chain and
291 * its data. The frontend is asynchronous, so we are also responsible
292 * for racing to terminate the frontend.
296 hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex)
298 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
299 hammer2_chain_t *parent;
300 hammer2_chain_t *chain;
301 hammer2_key_t key_dummy;
305 int cache_index = -1;
314 * This is difficult to optimize. The logical buffer might be
315 * partially dirty (contain dummy zero-fill pages), which would
316 * mess up our crc calculation if we were to try a direct read.
317 * So for now we always double-buffer through the underlying
320 * If not for the above problem we could conditionalize on
321 * (1) 64KB buffer, (2) one chain (not multi-master) and
322 * (3) !hammer2_double_buffer, and issue a direct read into the
325 parent = hammer2_inode_chain(xop->head.ip1, clindex,
326 HAMMER2_RESOLVE_ALWAYS |
327 HAMMER2_RESOLVE_SHARED);
330 chain = hammer2_chain_lookup(&parent, &key_dummy,
333 HAMMER2_LOOKUP_ALWAYS |
334 HAMMER2_LOOKUP_SHARED);
335 error = chain ? chain->error : 0;
341 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
344 hammer2_chain_unlock(chain);
345 hammer2_chain_drop(chain);
348 hammer2_chain_unlock(parent);
349 hammer2_chain_drop(parent);
351 chain = NULL; /* safety */
352 parent = NULL; /* safety */
356 * Race to finish the frontend
360 hammer2_mtx_ex(&xop->lock);
362 hammer2_mtx_unlock(&xop->lock);
367 * Async operation has not completed and we now own the lock.
368 * Determine if we can complete the operation by issuing the
369 * frontend collection non-blocking.
371 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
377 hammer2_mtx_unlock(&xop->lock);
378 chain = xop->head.cluster.focus;
379 hammer2_strategy_read_completion(chain, (char *)chain->data,
382 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
386 hammer2_mtx_unlock(&xop->lock);
389 bzero(bp->b_data, bp->b_bcount);
391 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
394 hammer2_mtx_unlock(&xop->lock);
398 hammer2_mtx_unlock(&xop->lock);
399 bp->b_flags |= B_ERROR;
402 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
410 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data,
413 struct buf *bp = bio->bio_buf;
415 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
417 * Data is embedded in the inode (copy from inode).
419 bcopy(((hammer2_inode_data_t *)data)->u.data,
420 bp->b_data, HAMMER2_EMBEDDED_BYTES);
421 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
422 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
425 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
427 * Data is on-media, record for live dedup.
429 hammer2_dedup_record(chain, data);
432 * Decompression and copy.
434 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
435 case HAMMER2_COMP_LZ4:
436 hammer2_decompress_LZ4_callback(data, chain->bytes,
439 case HAMMER2_COMP_ZLIB:
440 hammer2_decompress_ZLIB_callback(data, chain->bytes,
443 case HAMMER2_COMP_NONE:
444 KKASSERT(chain->bytes <= bp->b_bcount);
445 bcopy(data, bp->b_data, chain->bytes);
446 if (chain->bytes < bp->b_bcount) {
447 bzero(bp->b_data + chain->bytes,
448 bp->b_bcount - chain->bytes);
450 bp->b_flags |= B_NOTMETA;
455 panic("hammer2_strategy_read: "
456 "unknown compression type");
459 panic("hammer2_strategy_read: unknown bref type");
463 /****************************************************************************
465 ****************************************************************************/
468 * Functions for compression in threads,
469 * from hammer2_vnops.c
471 static void hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip,
472 hammer2_chain_t **parentp,
473 hammer2_key_t lbase, int ioflag, int pblksize,
474 hammer2_tid_t mtid, int *errorp);
475 static void hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip,
476 hammer2_chain_t **parentp,
477 hammer2_key_t lbase, int ioflag, int pblksize,
478 hammer2_tid_t mtid, int *errorp,
479 int comp_algo, int check_algo);
480 static void hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip,
481 hammer2_chain_t **parentp,
482 hammer2_key_t lbase, int ioflag, int pblksize,
483 hammer2_tid_t mtid, int *errorp,
485 static int test_block_zeros(const char *buf, size_t bytes);
486 static void zero_write(struct buf *bp, hammer2_inode_t *ip,
487 hammer2_chain_t **parentp,
489 hammer2_tid_t mtid, int *errorp);
490 static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp,
491 int ioflag, int pblksize,
492 hammer2_tid_t mtid, int *errorp,
497 hammer2_strategy_write(struct vop_strategy_args *ap)
499 hammer2_xop_strategy_t *xop;
510 hammer2_lwinprog_ref(pmp);
511 hammer2_trans_assert_strategy(pmp);
513 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
516 xop->lbase = bio->bio_offset;
517 hammer2_mtx_init(&xop->lock, "h2biow");
518 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write);
519 /* asynchronous completion */
521 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
527 * Per-node XOP (threaded). Write the logical buffer to the media.
531 hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex)
533 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
534 hammer2_chain_t *parent;
548 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
550 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
551 pblksize = hammer2_calc_physical(ip, lbase);
552 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
553 hammer2_write_file_core(bp, ip, &parent,
554 lbase, IO_ASYNC, pblksize,
555 xop->head.mtid, &error);
557 hammer2_chain_unlock(parent);
558 hammer2_chain_drop(parent);
559 parent = NULL; /* safety */
561 error = hammer2_xop_feed(&xop->head, NULL, clindex, error);
564 * Race to finish the frontend
568 hammer2_mtx_ex(&xop->lock);
570 hammer2_mtx_unlock(&xop->lock);
575 * Async operation has not completed and we now own the lock.
576 * Determine if we can complete the operation by issuing the
577 * frontend collection non-blocking.
579 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
585 hammer2_mtx_unlock(&xop->lock);
589 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
590 hammer2_lwinprog_drop(ip->pmp);
593 hammer2_mtx_unlock(&xop->lock);
597 hammer2_mtx_unlock(&xop->lock);
598 bp->b_flags |= B_ERROR;
601 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
602 hammer2_lwinprog_drop(ip->pmp);
608 * Wait for pending I/O to complete
611 hammer2_bioq_sync(hammer2_pfs_t *pmp)
613 hammer2_lwinprog_wait(pmp, 0);
617 * Create a new cluster at (cparent, lbase) and assign physical storage,
618 * returning a cluster suitable for I/O. The cluster will be in a modified
621 * cparent can wind up being anything.
623 * If datap is not NULL, *datap points to the real data we intend to write.
624 * If we can dedup the storage location we set *datap to NULL to indicate
625 * to the caller that a dedup occurred.
627 * NOTE: Special case for data embedded in inode.
631 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
632 hammer2_key_t lbase, int pblksize,
633 hammer2_tid_t mtid, char **datap, int *errorp)
635 hammer2_chain_t *chain;
636 hammer2_key_t key_dummy;
637 hammer2_off_t dedup_off;
638 int pradix = hammer2_getradix(pblksize);
639 int cache_index = -1;
642 * Locate the chain associated with lbase, return a locked chain.
643 * However, do not instantiate any data reference (which utilizes a
644 * device buffer) because we will be using direct IO via the
645 * logical buffer cache buffer.
648 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
651 chain = hammer2_chain_lookup(parentp, &key_dummy,
654 HAMMER2_LOOKUP_NODATA);
657 * We found a hole, create a new chain entry.
659 * NOTE: DATA chains are created without device backing
660 * store (nor do we want any).
662 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
664 *errorp = hammer2_chain_create(parentp, &chain, ip->pmp,
665 lbase, HAMMER2_PBUFRADIX,
666 HAMMER2_BREF_TYPE_DATA,
670 panic("hammer2_chain_create: par=%p error=%d\n",
674 /*ip->delta_dcount += pblksize;*/
676 switch (chain->bref.type) {
677 case HAMMER2_BREF_TYPE_INODE:
679 * The data is embedded in the inode, which requires
682 hammer2_chain_modify_ip(ip, chain, mtid, 0);
684 case HAMMER2_BREF_TYPE_DATA:
685 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
687 if (chain->bytes != pblksize) {
688 hammer2_chain_resize(ip, *parentp, chain,
691 HAMMER2_MODIFY_OPTDATA);
695 * DATA buffers must be marked modified whether the
696 * data is in a logical buffer or not. We also have
697 * to make this call to fixup the chain data pointers
698 * after resizing in case this is an encrypted or
701 hammer2_chain_modify(chain, mtid, dedup_off,
702 HAMMER2_MODIFY_OPTDATA);
705 panic("hammer2_assign_physical: bad type");
715 * hammer2_write_file_core() - hammer2_write_thread() helper
717 * The core write function which determines which path to take
718 * depending on compression settings. We also have to locate the
719 * related chains so we can calculate and set the check data for
724 hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip,
725 hammer2_chain_t **parentp,
726 hammer2_key_t lbase, int ioflag, int pblksize,
727 hammer2_tid_t mtid, int *errorp)
729 hammer2_chain_t *chain;
730 char *data = bp->b_data;
732 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
733 case HAMMER2_COMP_NONE:
735 * We have to assign physical storage to the buffer
736 * we intend to dirty or write now to avoid deadlocks
737 * in the strategy code later.
739 * This can return NOOFFSET for inode-embedded data.
740 * The strategy code will take care of it in that case.
742 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
743 mtid, &data, errorp);
744 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
745 hammer2_inode_data_t *wipdata;
747 wipdata = &chain->data->ipdata;
748 KKASSERT(wipdata->meta.op_flags &
749 HAMMER2_OPFLAG_DIRECTDATA);
750 KKASSERT(bp->b_loffset == 0);
751 bcopy(bp->b_data, wipdata->u.data,
752 HAMMER2_EMBEDDED_BYTES);
753 ++hammer2_iod_file_wembed;
754 } else if (data == NULL) {
756 * Copy of data already present on-media.
758 chain->bref.methods =
759 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
760 HAMMER2_ENC_CHECK(ip->meta.check_algo);
761 hammer2_chain_setcheck(chain, bp->b_data);
763 hammer2_write_bp(chain, bp, ioflag, pblksize,
764 mtid, errorp, ip->meta.check_algo);
767 hammer2_chain_unlock(chain);
768 hammer2_chain_drop(chain);
771 case HAMMER2_COMP_AUTOZERO:
773 * Check for zero-fill only
775 hammer2_zero_check_and_write(bp, ip, parentp,
776 lbase, ioflag, pblksize,
778 ip->meta.check_algo);
780 case HAMMER2_COMP_LZ4:
781 case HAMMER2_COMP_ZLIB:
784 * Check for zero-fill and attempt compression.
786 hammer2_compress_and_write(bp, ip, parentp,
787 lbase, ioflag, pblksize,
790 ip->meta.check_algo);
798 * Generic function that will perform the compression in compression
799 * write path. The compression algorithm is determined by the settings
800 * obtained from inode.
804 hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip,
805 hammer2_chain_t **parentp,
806 hammer2_key_t lbase, int ioflag, int pblksize,
807 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
809 hammer2_chain_t *chain;
815 if (test_block_zeros(bp->b_data, pblksize)) {
816 zero_write(bp, ip, parentp, lbase, mtid, errorp);
823 KKASSERT(pblksize / 2 <= 32768);
825 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) {
826 z_stream strm_compress;
830 switch(HAMMER2_DEC_ALGO(comp_algo)) {
831 case HAMMER2_COMP_LZ4:
832 comp_buffer = objcache_get(cache_buffer_write,
834 comp_size = LZ4_compress_limitedOutput(
836 &comp_buffer[sizeof(int)],
838 pblksize / 2 - sizeof(int));
840 * We need to prefix with the size, LZ4
841 * doesn't do it for us. Add the related
844 *(int *)comp_buffer = comp_size;
846 comp_size += sizeof(int);
848 case HAMMER2_COMP_ZLIB:
849 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
851 comp_level = 6; /* default zlib compression */
852 else if (comp_level < 6)
854 else if (comp_level > 9)
856 ret = deflateInit(&strm_compress, comp_level);
858 kprintf("HAMMER2 ZLIB: fatal error "
859 "on deflateInit.\n");
862 comp_buffer = objcache_get(cache_buffer_write,
864 strm_compress.next_in = bp->b_data;
865 strm_compress.avail_in = pblksize;
866 strm_compress.next_out = comp_buffer;
867 strm_compress.avail_out = pblksize / 2;
868 ret = deflate(&strm_compress, Z_FINISH);
869 if (ret == Z_STREAM_END) {
870 comp_size = pblksize / 2 -
871 strm_compress.avail_out;
875 ret = deflateEnd(&strm_compress);
878 kprintf("Error: Unknown compression method.\n");
879 kprintf("Comp_method = %d.\n", comp_algo);
884 if (comp_size == 0) {
886 * compression failed or turned off
888 comp_block_size = pblksize; /* safety */
889 if (++ip->comp_heuristic > 128)
890 ip->comp_heuristic = 8;
893 * compression succeeded
895 ip->comp_heuristic = 0;
896 if (comp_size <= 1024) {
897 comp_block_size = 1024;
898 } else if (comp_size <= 2048) {
899 comp_block_size = 2048;
900 } else if (comp_size <= 4096) {
901 comp_block_size = 4096;
902 } else if (comp_size <= 8192) {
903 comp_block_size = 8192;
904 } else if (comp_size <= 16384) {
905 comp_block_size = 16384;
906 } else if (comp_size <= 32768) {
907 comp_block_size = 32768;
909 panic("hammer2: WRITE PATH: "
910 "Weird comp_size value.");
912 comp_block_size = pblksize;
916 * Must zero the remainder or dedup (which operates on a
917 * physical block basis) will not find matches.
919 if (comp_size < comp_block_size) {
920 bzero(comp_buffer + comp_size,
921 comp_block_size - comp_size);
926 * Assign physical storage, data will be set to NULL if a live-dedup
929 data = comp_size ? comp_buffer : bp->b_data;
930 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
931 mtid, &data, errorp);
934 kprintf("WRITE PATH: An error occurred while "
935 "assigning physical space.\n");
936 KKASSERT(chain == NULL);
940 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
941 hammer2_inode_data_t *wipdata;
943 hammer2_chain_modify_ip(ip, chain, mtid, 0);
944 wipdata = &chain->data->ipdata;
945 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
946 KKASSERT(bp->b_loffset == 0);
947 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
948 ++hammer2_iod_file_wembed;
949 } else if (data == NULL) {
951 * Live deduplication, a copy of the data is already present
957 chain->bref.methods =
958 HAMMER2_ENC_COMP(comp_algo) +
959 HAMMER2_ENC_CHECK(check_algo);
961 chain->bref.methods =
964 HAMMER2_ENC_CHECK(check_algo);
966 bdata = comp_size ? comp_buffer : bp->b_data;
967 hammer2_chain_setcheck(chain, bdata);
968 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
973 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
975 switch(chain->bref.type) {
976 case HAMMER2_BREF_TYPE_INODE:
977 panic("hammer2_write_bp: unexpected inode\n");
979 case HAMMER2_BREF_TYPE_DATA:
981 * Optimize out the read-before-write
984 *errorp = hammer2_io_newnz(chain->hmp,
986 chain->bref.data_off,
990 hammer2_io_brelse(&dio);
991 kprintf("hammer2: WRITE PATH: "
992 "dbp bread error\n");
995 bdata = hammer2_io_data(dio, chain->bref.data_off);
998 * When loading the block make sure we don't
999 * leave garbage after the compressed data.
1002 chain->bref.methods =
1003 HAMMER2_ENC_COMP(comp_algo) +
1004 HAMMER2_ENC_CHECK(check_algo);
1005 bcopy(comp_buffer, bdata, comp_size);
1007 chain->bref.methods =
1009 HAMMER2_COMP_NONE) +
1010 HAMMER2_ENC_CHECK(check_algo);
1011 bcopy(bp->b_data, bdata, pblksize);
1015 * The flush code doesn't calculate check codes for
1016 * file data (doing so can result in excessive I/O),
1019 * Record for dedup only after the DIO's buffer cache
1020 * buffer has been updated.
1022 hammer2_chain_setcheck(chain, bdata);
1023 hammer2_dedup_record(chain, bdata);
1026 * Device buffer is now valid, chain is no longer in
1027 * the initial state.
1029 * (No blockref table worries with file data)
1031 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1033 /* Now write the related bdp. */
1034 if (ioflag & IO_SYNC) {
1036 * Synchronous I/O requested.
1038 hammer2_io_bwrite(&dio);
1040 } else if ((ioflag & IO_DIRECT) &&
1041 loff + n == pblksize) {
1042 hammer2_io_bdwrite(&dio);
1044 } else if (ioflag & IO_ASYNC) {
1045 hammer2_io_bawrite(&dio);
1047 hammer2_io_bdwrite(&dio);
1051 panic("hammer2_write_bp: bad chain type %d\n",
1059 hammer2_chain_unlock(chain);
1060 hammer2_chain_drop(chain);
1063 objcache_put(cache_buffer_write, comp_buffer);
1069 * Function that performs zero-checking and writing without compression,
1070 * it corresponds to default zero-checking path.
1074 hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip,
1075 hammer2_chain_t **parentp,
1076 hammer2_key_t lbase, int ioflag, int pblksize,
1077 hammer2_tid_t mtid, int *errorp,
1080 hammer2_chain_t *chain;
1081 char *data = bp->b_data;
1083 if (test_block_zeros(bp->b_data, pblksize)) {
1084 zero_write(bp, ip, parentp, lbase, mtid, errorp);
1086 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1087 mtid, &data, errorp);
1089 hammer2_write_bp(chain, bp, ioflag, pblksize,
1090 mtid, errorp, check_algo);
1091 } /* else dedup occurred */
1093 hammer2_chain_unlock(chain);
1094 hammer2_chain_drop(chain);
1102 * A function to test whether a block of data contains only zeros,
1103 * returns TRUE (non-zero) if the block is all zeros.
1107 test_block_zeros(const char *buf, size_t bytes)
1111 for (i = 0; i < bytes; i += sizeof(long)) {
1112 if (*(const long *)(buf + i) != 0)
1121 * Function to "write" a block that contains only zeros.
1125 zero_write(struct buf *bp, hammer2_inode_t *ip,
1126 hammer2_chain_t **parentp,
1127 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp __unused)
1129 hammer2_chain_t *chain;
1130 hammer2_key_t key_dummy;
1131 int cache_index = -1;
1133 chain = hammer2_chain_lookup(parentp, &key_dummy,
1136 HAMMER2_LOOKUP_NODATA);
1138 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1139 hammer2_inode_data_t *wipdata;
1141 hammer2_chain_modify_ip(ip, chain, mtid, 0);
1142 wipdata = &chain->data->ipdata;
1143 KKASSERT(wipdata->meta.op_flags &
1144 HAMMER2_OPFLAG_DIRECTDATA);
1145 KKASSERT(bp->b_loffset == 0);
1146 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1147 ++hammer2_iod_file_wembed;
1149 hammer2_chain_delete(*parentp, chain,
1150 mtid, HAMMER2_DELETE_PERMANENT);
1151 ++hammer2_iod_file_wzero;
1153 hammer2_chain_unlock(chain);
1154 hammer2_chain_drop(chain);
1156 ++hammer2_iod_file_wzero;
1163 * Function to write the data as it is, without performing any sort of
1164 * compression. This function is used in path without compression and
1165 * default zero-checking path.
1169 hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag,
1171 hammer2_tid_t mtid, int *errorp, int check_algo)
1173 hammer2_inode_data_t *wipdata;
1178 error = 0; /* XXX TODO below */
1180 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1182 switch(chain->bref.type) {
1183 case HAMMER2_BREF_TYPE_INODE:
1184 wipdata = &chain->data->ipdata;
1185 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1186 KKASSERT(bp->b_loffset == 0);
1187 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1189 ++hammer2_iod_file_wembed;
1191 case HAMMER2_BREF_TYPE_DATA:
1192 error = hammer2_io_newnz(chain->hmp,
1194 chain->bref.data_off,
1195 chain->bytes, &dio);
1197 hammer2_io_bqrelse(&dio);
1198 kprintf("hammer2: WRITE PATH: "
1199 "dbp bread error\n");
1202 bdata = hammer2_io_data(dio, chain->bref.data_off);
1204 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1205 HAMMER2_ENC_CHECK(check_algo);
1206 bcopy(bp->b_data, bdata, chain->bytes);
1209 * The flush code doesn't calculate check codes for
1210 * file data (doing so can result in excessive I/O),
1213 * Record for dedup only after the DIO's buffer cache
1214 * buffer has been updated.
1216 hammer2_chain_setcheck(chain, bdata);
1217 hammer2_dedup_record(chain, bdata);
1220 * Device buffer is now valid, chain is no longer in
1221 * the initial state.
1223 * (No blockref table worries with file data)
1225 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1227 if (ioflag & IO_SYNC) {
1229 * Synchronous I/O requested.
1231 hammer2_io_bwrite(&dio);
1233 } else if ((ioflag & IO_DIRECT) &&
1234 loff + n == pblksize) {
1235 hammer2_io_bdwrite(&dio);
1237 } else if (ioflag & IO_ASYNC) {
1238 hammer2_io_bawrite(&dio);
1240 hammer2_io_bdwrite(&dio);
1244 panic("hammer2_write_bp: bad chain type %d\n",
1250 KKASSERT(error == 0); /* XXX TODO */
1255 * LIVE DEDUP HEURISTIC
1257 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1258 * All fields must be loaded into locals and validated.
1262 hammer2_dedup_record(hammer2_chain_t *chain, char *data)
1265 hammer2_dedup_t *dedup;
1273 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1274 case HAMMER2_CHECK_ISCSI32:
1276 * XXX use the built-in crc (the dedup lookup sequencing
1277 * needs to be fixed so the check code is already present
1278 * when dedup_lookup is called)
1281 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1283 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1285 case HAMMER2_CHECK_XXHASH64:
1286 crc = chain->bref.check.xxhash64.value;
1288 case HAMMER2_CHECK_SHA192:
1290 * XXX use the built-in crc (the dedup lookup sequencing
1291 * needs to be fixed so the check code is already present
1292 * when dedup_lookup is called)
1295 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1296 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1297 ((uint64_t *)chain->bref.check.sha192.data)[2];
1299 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1303 * Cannot dedup without a check code
1307 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1308 for (i = 0; i < 4; ++i) {
1309 if (dedup[i].data_crc == crc) {
1313 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1314 if (dticks < 0 || dticks > hz * 60 * 30)
1318 if (hammer2_debug & 0x40000) {
1319 kprintf("REC %04x %016jx %016jx\n",
1320 (int)(dedup - hmp->heur_dedup),
1322 chain->bref.data_off);
1324 dedup->ticks = ticks;
1325 dedup->data_off = chain->bref.data_off;
1326 dedup->data_crc = crc;
1327 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUP);
1332 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1334 hammer2_dedup_t *dedup;
1346 * XXX use the built-in crc (the dedup lookup sequencing
1347 * needs to be fixed so the check code is already present
1348 * when dedup_lookup is called)
1350 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1351 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1353 if (hammer2_debug & 0x40000) {
1354 kprintf("LOC %04x/4 %016jx\n",
1355 (int)(dedup - hmp->heur_dedup),
1359 for (i = 0; i < 4; ++i) {
1360 off = dedup[i].data_off;
1362 if (dedup[i].data_crc != crc)
1364 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1366 dio = hammer2_io_getquick(hmp, off, pblksize);
1368 bcmp(data, hammer2_io_data(dio, off), pblksize) == 0) {
1369 if (hammer2_debug & 0x40000) {
1370 kprintf("DEDUP SUCCESS %016jx\n",
1373 hammer2_io_putblk(&dio);
1375 dedup[i].ticks = ticks; /* update use */
1376 ++hammer2_iod_file_wdedup;
1377 return off; /* RETURN */
1380 hammer2_io_putblk(&dio);
1386 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1387 * before or while we are clearing it they will also recover the freemap
1388 * entry (set it to fully allocated), so a bulkfree race can only set it
1389 * to a possibly-free state.
1391 * XXX ok, well, not really sure races are ok but going to run with it
1395 hammer2_dedup_clear(hammer2_dev_t *hmp)
1399 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1400 hmp->heur_dedup[i].data_off = 0;
1401 hmp->heur_dedup[i].ticks = ticks - 1;