2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
52 #include <sys/mount.h>
53 #include <sys/vnode.h>
54 #include <sys/mountctl.h>
55 #include <sys/dirent.h>
57 #include <sys/objcache.h>
58 #include <sys/event.h>
60 #include <vfs/fifofs/fifo.h>
63 #include "hammer2_lz4.h"
65 #include "zlib/hammer2_zlib.h"
67 struct objcache *cache_buffer_read;
68 struct objcache *cache_buffer_write;
71 * Strategy code (async logical file buffer I/O from system)
73 * Except for the transaction init (which should normally not block),
74 * we essentially run the strategy operation asynchronously via a XOP.
76 * WARNING! The XOP deals with buffer synchronization. It is not synchronized
79 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
80 * calls but it has in the past when multiple flushes are queued.
82 * XXX We currently terminate the transaction once we get a quorum, otherwise
83 * the frontend can stall, but this can leave the remaining nodes with
84 * a potential flush conflict. We need to delay flushes on those nodes
85 * until running transactions complete separately from the normal
86 * transaction sequencing. FIXME TODO.
88 static int hammer2_strategy_read(struct vop_strategy_args *ap);
89 static int hammer2_strategy_write(struct vop_strategy_args *ap);
90 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
91 const char *data, struct bio *bio);
93 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
94 char **datap, int pblksize);
97 hammer2_vop_strategy(struct vop_strategy_args *ap)
108 error = hammer2_strategy_read(ap);
109 ++hammer2_iod_file_read;
112 error = hammer2_strategy_write(ap);
113 ++hammer2_iod_file_write;
116 bp->b_error = error = EINVAL;
117 bp->b_flags |= B_ERROR;
125 * Return the largest contiguous physical disk range for the logical
128 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
130 * Basically disabled, the logical buffer write thread has to deal with
131 * buffers one-at-a-time. Note that this should not prevent cluster_read()
132 * from reading-ahead, it simply prevents it from trying form a single
133 * cluster buffer for the logical request. H2 already uses 64KB buffers!
136 hammer2_vop_bmap(struct vop_bmap_args *ap)
138 *ap->a_doffsetp = NOOFFSET;
146 /****************************************************************************
148 ****************************************************************************/
150 * Callback used in read path in case that a block is compressed with LZ4.
154 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
157 char *compressed_buffer;
164 if bio->bio_caller_info2.index &&
165 bio->bio_caller_info1.uvalue32 !=
166 crc32(bp->b_data, bp->b_bufsize) --- return error
169 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
170 compressed_size = *(const int *)data;
171 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
173 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
174 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
179 kprintf("READ PATH: Error during decompression."
181 (intmax_t)bio->bio_offset, bytes);
182 /* make sure it isn't random garbage */
183 bzero(compressed_buffer, bp->b_bufsize);
185 KKASSERT(result <= bp->b_bufsize);
186 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
187 if (result < bp->b_bufsize)
188 bzero(bp->b_data + result, bp->b_bufsize - result);
189 objcache_put(cache_buffer_read, compressed_buffer);
191 bp->b_flags |= B_AGE;
195 * Callback used in read path in case that a block is compressed with ZLIB.
196 * It is almost identical to LZ4 callback, so in theory they can be unified,
197 * but we didn't want to make changes in bio structure for that.
201 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
204 char *compressed_buffer;
205 z_stream strm_decompress;
211 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
212 strm_decompress.avail_in = 0;
213 strm_decompress.next_in = Z_NULL;
215 ret = inflateInit(&strm_decompress);
218 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
220 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
221 strm_decompress.next_in = __DECONST(char *, data);
223 /* XXX supply proper size, subset of device bp */
224 strm_decompress.avail_in = bytes;
225 strm_decompress.next_out = compressed_buffer;
226 strm_decompress.avail_out = bp->b_bufsize;
228 ret = inflate(&strm_decompress, Z_FINISH);
229 if (ret != Z_STREAM_END) {
230 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
231 bzero(compressed_buffer, bp->b_bufsize);
233 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
234 result = bp->b_bufsize - strm_decompress.avail_out;
235 if (result < bp->b_bufsize)
236 bzero(bp->b_data + result, strm_decompress.avail_out);
237 objcache_put(cache_buffer_read, compressed_buffer);
238 ret = inflateEnd(&strm_decompress);
241 bp->b_flags |= B_AGE;
245 * Logical buffer I/O, async read.
249 hammer2_strategy_read(struct vop_strategy_args *ap)
251 hammer2_xop_strategy_t *xop;
259 lbase = bio->bio_offset;
260 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
262 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
266 hammer2_mtx_init(&xop->lock, "h2bior");
267 hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
268 /* asynchronous completion */
274 * Per-node XOP (threaded), do a synchronous lookup of the chain and
275 * its data. The frontend is asynchronous, so we are also responsible
276 * for racing to terminate the frontend.
279 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
281 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
282 hammer2_chain_t *parent;
283 hammer2_chain_t *chain;
284 hammer2_chain_t *focus;
285 hammer2_key_t key_dummy;
293 * Note that we can race completion of the bio supplied by
294 * the front-end so we cannot access it until we determine
295 * that we are the ones finishing it up.
300 * This is difficult to optimize. The logical buffer might be
301 * partially dirty (contain dummy zero-fill pages), which would
302 * mess up our crc calculation if we were to try a direct read.
303 * So for now we always double-buffer through the underlying
306 * If not for the above problem we could conditionalize on
307 * (1) 64KB buffer, (2) one chain (not multi-master) and
308 * (3) !hammer2_double_buffer, and issue a direct read into the
311 parent = hammer2_inode_chain(xop->head.ip1, clindex,
312 HAMMER2_RESOLVE_ALWAYS |
313 HAMMER2_RESOLVE_SHARED);
315 chain = hammer2_chain_lookup(&parent, &key_dummy,
318 HAMMER2_LOOKUP_ALWAYS |
319 HAMMER2_LOOKUP_SHARED);
321 error = chain->error;
323 error = HAMMER2_ERROR_EIO;
326 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
328 hammer2_chain_unlock(chain);
329 hammer2_chain_drop(chain);
332 hammer2_chain_unlock(parent);
333 hammer2_chain_drop(parent);
335 chain = NULL; /* safety */
336 parent = NULL; /* safety */
339 * Race to finish the frontend. First-to-complete. bio is only
340 * valid if we are determined to be the ones able to complete
345 hammer2_mtx_ex(&xop->lock);
347 hammer2_mtx_unlock(&xop->lock);
355 * Async operation has not completed and we now own the lock.
356 * Determine if we can complete the operation by issuing the
357 * frontend collection non-blocking.
359 * H2 double-buffers the data, setting B_NOTMETA on the logical
360 * buffer hints to the OS that the logical buffer should not be
361 * swapcached (since the device buffer can be).
363 * Also note that even for compressed data we would rather the
364 * kernel cache/swapcache device buffers more and (decompressed)
365 * logical buffers less, since that will significantly improve
366 * the amount of end-user data that can be cached.
368 * NOTE: The chain->data for xop->head.cluster.focus will be
369 * synchronized to the current cpu by xop_collect(),
370 * but other chains in the cluster might not be.
372 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
377 hammer2_mtx_unlock(&xop->lock);
378 bp->b_flags |= B_NOTMETA;
379 focus = xop->head.cluster.focus;
380 data = hammer2_xop_gdata(&xop->head)->buf;
381 hammer2_strategy_read_completion(focus, data, xop->bio);
382 hammer2_xop_pdata(&xop->head);
384 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
386 case HAMMER2_ERROR_ENOENT:
388 hammer2_mtx_unlock(&xop->lock);
389 bp->b_flags |= B_NOTMETA;
392 bzero(bp->b_data, bp->b_bcount);
394 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
396 case HAMMER2_ERROR_EINPROGRESS:
397 hammer2_mtx_unlock(&xop->lock);
400 kprintf("xop_strategy_read: error %08x loff=%016jx\n",
401 error, bp->b_loffset);
403 hammer2_mtx_unlock(&xop->lock);
404 bp->b_flags |= B_ERROR;
407 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
414 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
417 struct buf *bp = bio->bio_buf;
419 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
421 * Copy from in-memory inode structure.
423 bcopy(((const hammer2_inode_data_t *)data)->u.data,
424 bp->b_data, HAMMER2_EMBEDDED_BYTES);
425 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
426 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
429 } else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
431 * Data is on-media, record for live dedup. Release the
432 * chain (try to free it) when done. The data is still
433 * cached by both the buffer cache in front and the
434 * block device behind us. This leaves more room in the
435 * LRU chain cache for meta-data chains which we really
438 * NOTE: Deduplication cannot be safely recorded for
439 * records without a check code.
441 hammer2_dedup_record(focus, NULL, data);
442 atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
445 * Decompression and copy.
447 switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
448 case HAMMER2_COMP_LZ4:
449 hammer2_decompress_LZ4_callback(data, focus->bytes,
451 /* b_resid set by call */
453 case HAMMER2_COMP_ZLIB:
454 hammer2_decompress_ZLIB_callback(data, focus->bytes,
456 /* b_resid set by call */
458 case HAMMER2_COMP_NONE:
459 KKASSERT(focus->bytes <= bp->b_bcount);
460 bcopy(data, bp->b_data, focus->bytes);
461 if (focus->bytes < bp->b_bcount) {
462 bzero(bp->b_data + focus->bytes,
463 bp->b_bcount - focus->bytes);
469 panic("hammer2_strategy_read_completion: "
470 "unknown compression type");
473 panic("hammer2_strategy_read_completion: unknown bref type");
477 /****************************************************************************
479 ****************************************************************************/
482 * Functions for compression in threads,
483 * from hammer2_vnops.c
485 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
486 hammer2_chain_t **parentp,
487 hammer2_key_t lbase, int ioflag, int pblksize,
488 hammer2_tid_t mtid, int *errorp);
489 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
490 hammer2_chain_t **parentp,
491 hammer2_key_t lbase, int ioflag, int pblksize,
492 hammer2_tid_t mtid, int *errorp,
493 int comp_algo, int check_algo);
494 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
495 hammer2_chain_t **parentp,
496 hammer2_key_t lbase, int ioflag, int pblksize,
497 hammer2_tid_t mtid, int *errorp,
499 static int test_block_zeros(const char *buf, size_t bytes);
500 static void zero_write(char *data, hammer2_inode_t *ip,
501 hammer2_chain_t **parentp,
503 hammer2_tid_t mtid, int *errorp);
504 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
505 int ioflag, int pblksize,
506 hammer2_tid_t mtid, int *errorp,
510 hammer2_strategy_write(struct vop_strategy_args *ap)
512 hammer2_xop_strategy_t *xop;
521 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
522 hammer2_lwinprog_ref(pmp);
523 hammer2_trans_assert_strategy(pmp);
524 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
526 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
527 HAMMER2_XOP_STRATEGY);
530 xop->lbase = bio->bio_offset;
531 hammer2_mtx_init(&xop->lock, "h2biow");
532 hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
533 /* asynchronous completion */
535 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
541 * Per-node XOP (threaded). Write the logical buffer to the media.
543 * This is a bit problematic because there may be multiple target and
544 * any of them may be able to release the bp. In addition, if our
545 * particulr target is offline we don't want to block the bp (and thus
546 * the frontend). To accomplish this we copy the data to the per-thr
550 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
552 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
553 hammer2_chain_t *parent;
561 hammer2_off_t bio_offset;
565 * We can only access the bp/bio if the frontend has not yet
570 hammer2_mtx_sh(&xop->lock);
572 hammer2_mtx_unlock(&xop->lock);
577 bio = xop->bio; /* ephermal */
578 bp = bio->bio_buf; /* ephermal */
579 ip = xop->head.ip1; /* retained by ref */
580 bio_offset = bio->bio_offset;
583 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
585 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
586 pblksize = hammer2_calc_physical(ip, lbase);
588 KKASSERT(lblksize <= MAXPHYS);
589 bcopy(bp->b_data, bio_data, lblksize);
591 hammer2_mtx_unlock(&xop->lock);
592 bp = NULL; /* safety, illegal to access after unlock */
593 bio = NULL; /* safety, illegal to access after unlock */
598 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
599 hammer2_write_file_core(bio_data, ip, &parent,
600 lbase, IO_ASYNC, pblksize,
601 xop->head.mtid, &error);
603 hammer2_chain_unlock(parent);
604 hammer2_chain_drop(parent);
605 parent = NULL; /* safety */
607 hammer2_xop_feed(&xop->head, NULL, clindex, error);
610 * Try to complete the operation on behalf of the front-end.
614 hammer2_mtx_ex(&xop->lock);
616 hammer2_mtx_unlock(&xop->lock);
621 * Async operation has not completed and we now own the lock.
622 * Determine if we can complete the operation by issuing the
623 * frontend collection non-blocking.
625 * H2 double-buffers the data, setting B_NOTMETA on the logical
626 * buffer hints to the OS that the logical buffer should not be
627 * swapcached (since the device buffer can be).
629 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
631 if (error == HAMMER2_ERROR_EINPROGRESS) {
632 hammer2_mtx_unlock(&xop->lock);
637 * Async operation has completed.
640 hammer2_mtx_unlock(&xop->lock);
642 bio = xop->bio; /* now owned by us */
643 bp = bio->bio_buf; /* now owned by us */
645 if (error == HAMMER2_ERROR_ENOENT || error == 0) {
646 bp->b_flags |= B_NOTMETA;
651 kprintf("xop_strategy_write: error %d loff=%016jx\n",
652 error, bp->b_loffset);
653 bp->b_flags |= B_ERROR;
657 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
658 hammer2_trans_assert_strategy(ip->pmp);
659 hammer2_lwinprog_drop(ip->pmp);
660 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE);
664 * Wait for pending I/O to complete
667 hammer2_bioq_sync(hammer2_pfs_t *pmp)
669 hammer2_lwinprog_wait(pmp, 0);
673 * Assign physical storage at (cparent, lbase), returning a suitable chain
674 * and setting *errorp appropriately.
676 * If no error occurs, the returned chain will be in a modified state.
678 * If an error occurs, the returned chain may or may not be NULL. If
679 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
680 * So the caller only needs to test *errorp.
682 * cparent can wind up being anything.
684 * If datap is not NULL, *datap points to the real data we intend to write.
685 * If we can dedup the storage location we set *datap to NULL to indicate
686 * to the caller that a dedup occurred.
688 * NOTE: Special case for data embedded in inode.
692 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
693 hammer2_key_t lbase, int pblksize,
694 hammer2_tid_t mtid, char **datap, int *errorp)
696 hammer2_chain_t *chain;
697 hammer2_key_t key_dummy;
698 hammer2_off_t dedup_off;
699 int pradix = hammer2_getradix(pblksize);
702 * Locate the chain associated with lbase, return a locked chain.
703 * However, do not instantiate any data reference (which utilizes a
704 * device buffer) because we will be using direct IO via the
705 * logical buffer cache buffer.
707 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
709 chain = hammer2_chain_lookup(parentp, &key_dummy,
712 HAMMER2_LOOKUP_NODATA);
715 * The lookup code should not return a DELETED chain to us, unless
716 * its a short-file embedded in the inode. Then it is possible for
717 * the lookup to return a deleted inode.
719 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
720 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
721 kprintf("assign physical deleted chain @ "
722 "%016jx (%016jx.%02x) ip %016jx\n",
723 lbase, chain->bref.data_off, chain->bref.type,
730 * We found a hole, create a new chain entry.
732 * NOTE: DATA chains are created without device backing
733 * store (nor do we want any).
735 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
737 *errorp |= hammer2_chain_create(parentp, &chain, NULL, ip->pmp,
738 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
739 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
740 lbase, HAMMER2_PBUFRADIX,
741 HAMMER2_BREF_TYPE_DATA,
746 /*ip->delta_dcount += pblksize;*/
747 } else if (chain->error == 0) {
748 switch (chain->bref.type) {
749 case HAMMER2_BREF_TYPE_INODE:
751 * The data is embedded in the inode, which requires
754 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
756 case HAMMER2_BREF_TYPE_DATA:
757 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
759 if (chain->bytes != pblksize) {
760 *errorp |= hammer2_chain_resize(chain,
763 HAMMER2_MODIFY_OPTDATA);
769 * DATA buffers must be marked modified whether the
770 * data is in a logical buffer or not. We also have
771 * to make this call to fixup the chain data pointers
772 * after resizing in case this is an encrypted or
775 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
776 HAMMER2_MODIFY_OPTDATA);
779 panic("hammer2_assign_physical: bad type");
784 *errorp = chain->error;
786 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
792 * hammer2_write_file_core()
794 * The core write function which determines which path to take
795 * depending on compression settings. We also have to locate the
796 * related chains so we can calculate and set the check data for
801 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
802 hammer2_chain_t **parentp,
803 hammer2_key_t lbase, int ioflag, int pblksize,
804 hammer2_tid_t mtid, int *errorp)
806 hammer2_chain_t *chain;
811 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
812 case HAMMER2_COMP_NONE:
814 * We have to assign physical storage to the buffer
815 * we intend to dirty or write now to avoid deadlocks
816 * in the strategy code later.
818 * This can return NOOFFSET for inode-embedded data.
819 * The strategy code will take care of it in that case.
822 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
823 mtid, &bdata, errorp);
825 /* skip modifications */
826 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
827 hammer2_inode_data_t *wipdata;
829 wipdata = &chain->data->ipdata;
830 KKASSERT(wipdata->meta.op_flags &
831 HAMMER2_OPFLAG_DIRECTDATA);
832 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
833 ++hammer2_iod_file_wembed;
834 } else if (bdata == NULL) {
836 * Copy of data already present on-media.
838 chain->bref.methods =
839 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
840 HAMMER2_ENC_CHECK(ip->meta.check_algo);
841 hammer2_chain_setcheck(chain, data);
842 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
844 hammer2_write_bp(chain, data, ioflag, pblksize,
845 mtid, errorp, ip->meta.check_algo);
848 hammer2_chain_unlock(chain);
849 hammer2_chain_drop(chain);
852 case HAMMER2_COMP_AUTOZERO:
854 * Check for zero-fill only
856 hammer2_zero_check_and_write(data, ip, parentp,
857 lbase, ioflag, pblksize,
859 ip->meta.check_algo);
861 case HAMMER2_COMP_LZ4:
862 case HAMMER2_COMP_ZLIB:
865 * Check for zero-fill and attempt compression.
867 hammer2_compress_and_write(data, ip, parentp,
868 lbase, ioflag, pblksize,
871 ip->meta.check_algo);
879 * Generic function that will perform the compression in compression
880 * write path. The compression algorithm is determined by the settings
881 * obtained from inode.
885 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
886 hammer2_chain_t **parentp,
887 hammer2_key_t lbase, int ioflag, int pblksize,
888 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
890 hammer2_chain_t *chain;
897 * An all-zeros write creates a hole unless the check code
898 * is disabled. When the check code is disabled all writes
899 * are done in-place, including any all-zeros writes.
901 * NOTE: A snapshot will still force a copy-on-write
902 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
904 if (check_algo != HAMMER2_CHECK_NONE &&
905 test_block_zeros(data, pblksize)) {
906 zero_write(data, ip, parentp, lbase, mtid, errorp);
911 * Compression requested. Try to compress the block. We store
912 * the data normally if we cannot sufficiently compress it.
914 * We have a heuristic to detect files which are mostly
915 * uncompressable and avoid the compression attempt in that
916 * case. If the compression heuristic is turned off, we always
922 KKASSERT(pblksize / 2 <= 32768);
924 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
925 hammer2_always_compress) {
926 z_stream strm_compress;
930 switch(HAMMER2_DEC_ALGO(comp_algo)) {
931 case HAMMER2_COMP_LZ4:
933 * We need to prefix with the size, LZ4
934 * doesn't do it for us. Add the related
937 * NOTE: The LZ4 code seems to assume at least an
938 * 8-byte buffer size granularity and may
939 * overrun the buffer if given a 4-byte
942 comp_buffer = objcache_get(cache_buffer_write,
944 comp_size = LZ4_compress_limitedOutput(
946 &comp_buffer[sizeof(int)],
948 pblksize / 2 - sizeof(int64_t));
949 *(int *)comp_buffer = comp_size;
951 comp_size += sizeof(int);
953 case HAMMER2_COMP_ZLIB:
954 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
956 comp_level = 6; /* default zlib compression */
957 else if (comp_level < 6)
959 else if (comp_level > 9)
961 ret = deflateInit(&strm_compress, comp_level);
963 kprintf("HAMMER2 ZLIB: fatal error "
964 "on deflateInit.\n");
967 comp_buffer = objcache_get(cache_buffer_write,
969 strm_compress.next_in = data;
970 strm_compress.avail_in = pblksize;
971 strm_compress.next_out = comp_buffer;
972 strm_compress.avail_out = pblksize / 2;
973 ret = deflate(&strm_compress, Z_FINISH);
974 if (ret == Z_STREAM_END) {
975 comp_size = pblksize / 2 -
976 strm_compress.avail_out;
980 ret = deflateEnd(&strm_compress);
983 kprintf("Error: Unknown compression method.\n");
984 kprintf("Comp_method = %d.\n", comp_algo);
989 if (comp_size == 0) {
991 * compression failed or turned off
993 comp_block_size = pblksize; /* safety */
994 if (++ip->comp_heuristic > 128)
995 ip->comp_heuristic = 8;
998 * compression succeeded
1000 ip->comp_heuristic = 0;
1001 if (comp_size <= 1024) {
1002 comp_block_size = 1024;
1003 } else if (comp_size <= 2048) {
1004 comp_block_size = 2048;
1005 } else if (comp_size <= 4096) {
1006 comp_block_size = 4096;
1007 } else if (comp_size <= 8192) {
1008 comp_block_size = 8192;
1009 } else if (comp_size <= 16384) {
1010 comp_block_size = 16384;
1011 } else if (comp_size <= 32768) {
1012 comp_block_size = 32768;
1014 panic("hammer2: WRITE PATH: "
1015 "Weird comp_size value.");
1017 comp_block_size = pblksize;
1021 * Must zero the remainder or dedup (which operates on a
1022 * physical block basis) will not find matches.
1024 if (comp_size < comp_block_size) {
1025 bzero(comp_buffer + comp_size,
1026 comp_block_size - comp_size);
1031 * Assign physical storage, bdata will be set to NULL if a live-dedup
1034 bdata = comp_size ? comp_buffer : data;
1035 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1036 mtid, &bdata, errorp);
1042 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1043 hammer2_inode_data_t *wipdata;
1045 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1047 wipdata = &chain->data->ipdata;
1048 KKASSERT(wipdata->meta.op_flags &
1049 HAMMER2_OPFLAG_DIRECTDATA);
1050 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1051 ++hammer2_iod_file_wembed;
1053 } else if (bdata == NULL) {
1055 * Live deduplication, a copy of the data is already present
1059 chain->bref.methods =
1060 HAMMER2_ENC_COMP(comp_algo) +
1061 HAMMER2_ENC_CHECK(check_algo);
1063 chain->bref.methods =
1065 HAMMER2_COMP_NONE) +
1066 HAMMER2_ENC_CHECK(check_algo);
1068 bdata = comp_size ? comp_buffer : data;
1069 hammer2_chain_setcheck(chain, bdata);
1070 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1074 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1076 switch(chain->bref.type) {
1077 case HAMMER2_BREF_TYPE_INODE:
1078 panic("hammer2_compress_and_write: unexpected inode\n");
1080 case HAMMER2_BREF_TYPE_DATA:
1082 * Optimize out the read-before-write
1085 *errorp = hammer2_io_newnz(chain->hmp,
1087 chain->bref.data_off,
1091 hammer2_io_brelse(&dio);
1092 kprintf("hammer2: WRITE PATH: "
1093 "dbp bread error\n");
1096 bdata = hammer2_io_data(dio, chain->bref.data_off);
1099 * When loading the block make sure we don't
1100 * leave garbage after the compressed data.
1103 chain->bref.methods =
1104 HAMMER2_ENC_COMP(comp_algo) +
1105 HAMMER2_ENC_CHECK(check_algo);
1106 bcopy(comp_buffer, bdata, comp_block_size);
1108 chain->bref.methods =
1110 HAMMER2_COMP_NONE) +
1111 HAMMER2_ENC_CHECK(check_algo);
1112 bcopy(data, bdata, pblksize);
1116 * The flush code doesn't calculate check codes for
1117 * file data (doing so can result in excessive I/O),
1120 hammer2_chain_setcheck(chain, bdata);
1123 * Device buffer is now valid, chain is no longer in
1124 * the initial state.
1126 * (No blockref table worries with file data)
1128 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1129 hammer2_dedup_record(chain, dio, bdata);
1131 /* Now write the related bdp. */
1132 if (ioflag & IO_SYNC) {
1134 * Synchronous I/O requested.
1136 hammer2_io_bwrite(&dio);
1138 } else if ((ioflag & IO_DIRECT) &&
1139 loff + n == pblksize) {
1140 hammer2_io_bdwrite(&dio);
1142 } else if (ioflag & IO_ASYNC) {
1143 hammer2_io_bawrite(&dio);
1145 hammer2_io_bdwrite(&dio);
1149 panic("hammer2_compress_and_write: bad chain type %d\n",
1157 hammer2_chain_unlock(chain);
1158 hammer2_chain_drop(chain);
1161 objcache_put(cache_buffer_write, comp_buffer);
1167 * Function that performs zero-checking and writing without compression,
1168 * it corresponds to default zero-checking path.
1172 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1173 hammer2_chain_t **parentp,
1174 hammer2_key_t lbase, int ioflag, int pblksize,
1175 hammer2_tid_t mtid, int *errorp,
1178 hammer2_chain_t *chain;
1181 if (check_algo != HAMMER2_CHECK_NONE &&
1182 test_block_zeros(data, pblksize)) {
1184 * An all-zeros write creates a hole unless the check code
1185 * is disabled. When the check code is disabled all writes
1186 * are done in-place, including any all-zeros writes.
1188 * NOTE: A snapshot will still force a copy-on-write
1189 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1191 zero_write(data, ip, parentp, lbase, mtid, errorp);
1194 * Normal write (bdata set to NULL if de-duplicated)
1197 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1198 mtid, &bdata, errorp);
1202 hammer2_write_bp(chain, data, ioflag, pblksize,
1203 mtid, errorp, check_algo);
1205 /* dedup occurred */
1206 chain->bref.methods =
1207 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1208 HAMMER2_ENC_CHECK(check_algo);
1209 hammer2_chain_setcheck(chain, data);
1210 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1213 hammer2_chain_unlock(chain);
1214 hammer2_chain_drop(chain);
1222 * A function to test whether a block of data contains only zeros,
1223 * returns TRUE (non-zero) if the block is all zeros.
1227 test_block_zeros(const char *buf, size_t bytes)
1231 for (i = 0; i < bytes; i += sizeof(long)) {
1232 if (*(const long *)(buf + i) != 0)
1241 * Function to "write" a block that contains only zeros.
1245 zero_write(char *data, hammer2_inode_t *ip,
1246 hammer2_chain_t **parentp,
1247 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1249 hammer2_chain_t *chain;
1250 hammer2_key_t key_dummy;
1252 chain = hammer2_chain_lookup(parentp, &key_dummy,
1255 HAMMER2_LOOKUP_NODATA);
1257 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1258 hammer2_inode_data_t *wipdata;
1261 *errorp = hammer2_chain_modify_ip(ip, chain,
1265 wipdata = &chain->data->ipdata;
1266 KKASSERT(wipdata->meta.op_flags &
1267 HAMMER2_OPFLAG_DIRECTDATA);
1268 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1269 ++hammer2_iod_file_wembed;
1272 /* chain->error ok for deletion */
1273 hammer2_chain_delete(*parentp, chain,
1274 mtid, HAMMER2_DELETE_PERMANENT);
1275 ++hammer2_iod_file_wzero;
1277 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1278 hammer2_chain_unlock(chain);
1279 hammer2_chain_drop(chain);
1281 ++hammer2_iod_file_wzero;
1288 * Function to write the data as it is, without performing any sort of
1289 * compression. This function is used in path without compression and
1290 * default zero-checking path.
1294 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1296 hammer2_tid_t mtid, int *errorp, int check_algo)
1298 hammer2_inode_data_t *wipdata;
1303 error = 0; /* XXX TODO below */
1305 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1307 switch(chain->bref.type) {
1308 case HAMMER2_BREF_TYPE_INODE:
1309 wipdata = &chain->data->ipdata;
1310 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1311 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1313 ++hammer2_iod_file_wembed;
1315 case HAMMER2_BREF_TYPE_DATA:
1316 error = hammer2_io_newnz(chain->hmp,
1318 chain->bref.data_off,
1319 chain->bytes, &dio);
1321 hammer2_io_bqrelse(&dio);
1322 kprintf("hammer2: WRITE PATH: "
1323 "dbp bread error\n");
1326 bdata = hammer2_io_data(dio, chain->bref.data_off);
1328 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1329 HAMMER2_ENC_CHECK(check_algo);
1330 bcopy(data, bdata, chain->bytes);
1333 * The flush code doesn't calculate check codes for
1334 * file data (doing so can result in excessive I/O),
1337 hammer2_chain_setcheck(chain, bdata);
1340 * Device buffer is now valid, chain is no longer in
1341 * the initial state.
1343 * (No blockref table worries with file data)
1345 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1346 hammer2_dedup_record(chain, dio, bdata);
1348 if (ioflag & IO_SYNC) {
1350 * Synchronous I/O requested.
1352 hammer2_io_bwrite(&dio);
1354 } else if ((ioflag & IO_DIRECT) &&
1355 loff + n == pblksize) {
1356 hammer2_io_bdwrite(&dio);
1358 } else if (ioflag & IO_ASYNC) {
1359 hammer2_io_bawrite(&dio);
1361 hammer2_io_bdwrite(&dio);
1365 panic("hammer2_write_bp: bad chain type %d\n",
1375 * LIVE DEDUP HEURISTICS
1377 * Record media and crc information for possible dedup operation. Note
1378 * that the dedup mask bits must also be set in the related DIO for a dedup
1379 * to be fully validated (which is handled in the freemap allocation code).
1381 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1382 * All fields must be loaded into locals and validated.
1384 * WARNING! Should only be used for file data and directory entries,
1385 * hammer2_chain_modify() only checks for the dedup case on data
1386 * chains. Also, dedup data can only be recorded for committed
1387 * chains (so NOT strategy writes which can undergo further
1388 * modification after the fact!).
1391 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1395 hammer2_dedup_t *dedup;
1403 * We can only record a dedup if we have media data to test against.
1404 * If dedup is not enabled, return early, which allows a chain to
1405 * remain marked MODIFIED (which might have benefits in special
1406 * situations, though typically it does not).
1408 if (hammer2_dedup_enable == 0)
1418 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1419 case HAMMER2_CHECK_ISCSI32:
1421 * XXX use the built-in crc (the dedup lookup sequencing
1422 * needs to be fixed so the check code is already present
1423 * when dedup_lookup is called)
1426 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1428 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1430 case HAMMER2_CHECK_XXHASH64:
1431 crc = chain->bref.check.xxhash64.value;
1433 case HAMMER2_CHECK_SHA192:
1435 * XXX use the built-in crc (the dedup lookup sequencing
1436 * needs to be fixed so the check code is already present
1437 * when dedup_lookup is called)
1440 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1441 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1442 ((uint64_t *)chain->bref.check.sha192.data)[2];
1444 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1448 * Cannot dedup without a check code
1450 * NOTE: In particular, CHECK_NONE allows a sector to be
1451 * overwritten without copy-on-write, recording
1452 * a dedup block for a CHECK_NONE object would be
1458 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1460 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1461 for (i = 0; i < 4; ++i) {
1462 if (dedup[i].data_crc == crc) {
1466 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1467 if (dticks < 0 || dticks > hz * 60 * 30)
1471 if (hammer2_debug & 0x40000) {
1472 kprintf("REC %04x %016jx %016jx\n",
1473 (int)(dedup - hmp->heur_dedup),
1475 chain->bref.data_off);
1477 dedup->ticks = ticks;
1478 dedup->data_off = chain->bref.data_off;
1479 dedup->data_crc = crc;
1482 * Set the valid bits for the dedup only after we know the data
1483 * buffer has been updated. The alloc bits were set (and the valid
1484 * bits cleared) when the media was allocated.
1486 * This is done in two stages becuase the bulkfree code can race
1487 * the gap between allocation and data population. Both masks must
1488 * be set before a bcmp/dedup operation is able to use the block.
1490 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1491 atomic_set_64(&dio->dedup_valid, mask);
1495 * XXX removed. MODIFIED is an integral part of the flush code,
1496 * lets not just clear it
1499 * Once we record the dedup the chain must be marked clean to
1500 * prevent reuse of the underlying block. Remember that this
1501 * write occurs when the buffer cache is flushed (i.e. on sync(),
1502 * fsync(), filesystem periodic sync, or when the kernel needs to
1503 * flush a buffer), and not whenever the user write()s.
1505 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1506 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1507 atomic_add_long(&hammer2_count_modified_chains, -1);
1509 hammer2_pfs_memory_wakeup(chain->pmp, -1);
1516 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1518 hammer2_dedup_t *dedup;
1527 if (hammer2_dedup_enable == 0)
1534 * XXX use the built-in crc (the dedup lookup sequencing
1535 * needs to be fixed so the check code is already present
1536 * when dedup_lookup is called)
1538 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1539 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1541 if (hammer2_debug & 0x40000) {
1542 kprintf("LOC %04x/4 %016jx\n",
1543 (int)(dedup - hmp->heur_dedup),
1547 for (i = 0; i < 4; ++i) {
1548 off = dedup[i].data_off;
1550 if (dedup[i].data_crc != crc)
1552 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1554 dio = hammer2_io_getquick(hmp, off, pblksize);
1556 dtmp = hammer2_io_data(dio, off),
1557 mask = hammer2_dedup_mask(dio, off, pblksize);
1558 if ((dio->dedup_alloc & mask) == mask &&
1559 (dio->dedup_valid & mask) == mask &&
1560 bcmp(data, dtmp, pblksize) == 0) {
1561 if (hammer2_debug & 0x40000) {
1562 kprintf("DEDUP SUCCESS %016jx\n",
1565 hammer2_io_putblk(&dio);
1567 dedup[i].ticks = ticks; /* update use */
1568 atomic_add_long(&hammer2_iod_file_wdedup,
1571 return off; /* RETURN */
1573 hammer2_io_putblk(&dio);
1580 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1581 * before or while we are clearing it they will also recover the freemap
1582 * entry (set it to fully allocated), so a bulkfree race can only set it
1583 * to a possibly-free state.
1585 * XXX ok, well, not really sure races are ok but going to run with it
1589 hammer2_dedup_clear(hammer2_dev_t *hmp)
1593 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1594 hmp->heur_dedup[i].data_off = 0;
1595 hmp->heur_dedup[i].ticks = ticks - 1;