2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
61 #include <vfs/fifofs/fifo.h>
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
72 * Strategy code (async logical file buffer I/O from system)
74 * Except for the transaction init (which should normally not block),
75 * we essentially run the strategy operation asynchronously via a XOP.
77 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
78 * calls but it has in the past when multiple flushes are queued.
80 * XXX We currently terminate the transaction once we get a quorum, otherwise
81 * the frontend can stall, but this can leave the remaining nodes with
82 * a potential flush conflict. We need to delay flushes on those nodes
83 * until running transactions complete separately from the normal
84 * transaction sequencing. FIXME TODO.
86 static void hammer2_strategy_xop_read(hammer2_thread_t *thr,
88 static void hammer2_strategy_xop_write(hammer2_thread_t *thr,
90 static int hammer2_strategy_read(struct vop_strategy_args *ap);
91 static int hammer2_strategy_write(struct vop_strategy_args *ap);
92 static void hammer2_strategy_read_completion(hammer2_chain_t *chain,
93 char *data, struct bio *bio);
95 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
96 char **datap, int pblksize);
102 #define TIMER(which) do { \
104 h2timer[h2lid] += (int)(ticks - h2last);\
110 hammer2_vop_strategy(struct vop_strategy_args *ap)
121 error = hammer2_strategy_read(ap);
122 ++hammer2_iod_file_read;
125 error = hammer2_strategy_write(ap);
126 ++hammer2_iod_file_write;
129 bp->b_error = error = EINVAL;
130 bp->b_flags |= B_ERROR;
138 * Return the largest contiguous physical disk range for the logical
141 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
143 * Basically disabled, the logical buffer write thread has to deal with
144 * buffers one-at-a-time. Note that this should not prevent cluster_read()
145 * from reading-ahead, it simply prevents it from trying form a single
146 * cluster buffer for the logical request. H2 already uses 64KB buffers!
149 hammer2_vop_bmap(struct vop_bmap_args *ap)
151 *ap->a_doffsetp = NOOFFSET;
159 /****************************************************************************
161 ****************************************************************************/
163 * Callback used in read path in case that a block is compressed with LZ4.
167 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
170 char *compressed_buffer;
177 if bio->bio_caller_info2.index &&
178 bio->bio_caller_info1.uvalue32 !=
179 crc32(bp->b_data, bp->b_bufsize) --- return error
182 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
183 compressed_size = *(const int *)data;
184 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
186 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
187 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
192 kprintf("READ PATH: Error during decompression."
194 (intmax_t)bio->bio_offset, bytes);
195 /* make sure it isn't random garbage */
196 bzero(compressed_buffer, bp->b_bufsize);
198 KKASSERT(result <= bp->b_bufsize);
199 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
200 if (result < bp->b_bufsize)
201 bzero(bp->b_data + result, bp->b_bufsize - result);
202 objcache_put(cache_buffer_read, compressed_buffer);
204 bp->b_flags |= B_AGE;
208 * Callback used in read path in case that a block is compressed with ZLIB.
209 * It is almost identical to LZ4 callback, so in theory they can be unified,
210 * but we didn't want to make changes in bio structure for that.
214 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
217 char *compressed_buffer;
218 z_stream strm_decompress;
224 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
225 strm_decompress.avail_in = 0;
226 strm_decompress.next_in = Z_NULL;
228 ret = inflateInit(&strm_decompress);
231 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
233 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
234 strm_decompress.next_in = __DECONST(char *, data);
236 /* XXX supply proper size, subset of device bp */
237 strm_decompress.avail_in = bytes;
238 strm_decompress.next_out = compressed_buffer;
239 strm_decompress.avail_out = bp->b_bufsize;
241 ret = inflate(&strm_decompress, Z_FINISH);
242 if (ret != Z_STREAM_END) {
243 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
244 bzero(compressed_buffer, bp->b_bufsize);
246 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
247 result = bp->b_bufsize - strm_decompress.avail_out;
248 if (result < bp->b_bufsize)
249 bzero(bp->b_data + result, strm_decompress.avail_out);
250 objcache_put(cache_buffer_read, compressed_buffer);
251 ret = inflateEnd(&strm_decompress);
254 bp->b_flags |= B_AGE;
258 * Logical buffer I/O, async read.
262 hammer2_strategy_read(struct vop_strategy_args *ap)
264 hammer2_xop_strategy_t *xop;
274 nbio = push_bio(bio);
276 lbase = bio->bio_offset;
277 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
279 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
283 hammer2_mtx_init(&xop->lock, "h2bior");
284 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read);
285 /* asynchronous completion */
291 * Per-node XOP (threaded), do a synchronous lookup of the chain and
292 * its data. The frontend is asynchronous, so we are also responsible
293 * for racing to terminate the frontend.
297 hammer2_strategy_xop_read(hammer2_thread_t *thr, hammer2_xop_t *arg)
299 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
300 hammer2_chain_t *parent;
301 hammer2_chain_t *chain;
302 hammer2_key_t key_dummy;
306 int cache_index = -1;
310 * Note that we can race completion of the bio supplied by
311 * the front-end so we cannot access it until we determine
312 * that we are the ones finishing it up.
318 * This is difficult to optimize. The logical buffer might be
319 * partially dirty (contain dummy zero-fill pages), which would
320 * mess up our crc calculation if we were to try a direct read.
321 * So for now we always double-buffer through the underlying
324 * If not for the above problem we could conditionalize on
325 * (1) 64KB buffer, (2) one chain (not multi-master) and
326 * (3) !hammer2_double_buffer, and issue a direct read into the
329 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
330 HAMMER2_RESOLVE_ALWAYS |
331 HAMMER2_RESOLVE_SHARED);
334 chain = hammer2_chain_lookup(&parent, &key_dummy,
337 HAMMER2_LOOKUP_ALWAYS |
338 HAMMER2_LOOKUP_SHARED);
339 error = chain ? chain->error : 0;
345 error = hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
348 hammer2_chain_unlock(chain);
349 hammer2_chain_drop(chain);
352 hammer2_chain_unlock(parent);
353 hammer2_chain_drop(parent);
355 chain = NULL; /* safety */
356 parent = NULL; /* safety */
360 * Race to finish the frontend. First-to-complete. bio is only
361 * valid if we are determined to be the ones able to complete
366 hammer2_mtx_ex(&xop->lock);
368 hammer2_mtx_unlock(&xop->lock);
375 * Async operation has not completed and we now own the lock.
376 * Determine if we can complete the operation by issuing the
377 * frontend collection non-blocking.
379 * H2 double-buffers the data, setting B_NOTMETA on the logical
380 * buffer hints to the OS that the logical buffer should not be
381 * swapcached (since the device buffer can be).
383 * Also note that even for compressed data we would rather the
384 * kernel cache/swapcache device buffers more and (decompressed)
385 * logical buffers less, since that will significantly improve
386 * the amount of end-user data that can be cached.
388 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
394 hammer2_mtx_unlock(&xop->lock);
395 bp->b_flags |= B_NOTMETA;
396 chain = xop->head.cluster.focus;
397 hammer2_strategy_read_completion(chain, (char *)chain->data,
400 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
404 hammer2_mtx_unlock(&xop->lock);
405 bp->b_flags |= B_NOTMETA;
408 bzero(bp->b_data, bp->b_bcount);
410 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
413 hammer2_mtx_unlock(&xop->lock);
416 kprintf("strategy_xop_read: error %d loff=%016jx\n",
417 error, bp->b_loffset);
419 hammer2_mtx_unlock(&xop->lock);
420 bp->b_flags |= B_ERROR;
423 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
431 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data,
434 struct buf *bp = bio->bio_buf;
436 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
438 * Copy from in-memory inode structure.
440 bcopy(((hammer2_inode_data_t *)data)->u.data,
441 bp->b_data, HAMMER2_EMBEDDED_BYTES);
442 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
443 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
446 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
448 * Data is on-media, record for live dedup. Release the
449 * chain (try to free it) when done. The data is still
450 * cached by both the buffer cache in front and the
451 * block device behind us. This leaves more room in the
452 * LRU chain cache for meta-data chains which we really
455 hammer2_dedup_record(chain, data);
456 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
459 * Decompression and copy.
461 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
462 case HAMMER2_COMP_LZ4:
463 hammer2_decompress_LZ4_callback(data, chain->bytes,
465 /* b_resid set by call */
467 case HAMMER2_COMP_ZLIB:
468 hammer2_decompress_ZLIB_callback(data, chain->bytes,
470 /* b_resid set by call */
472 case HAMMER2_COMP_NONE:
473 KKASSERT(chain->bytes <= bp->b_bcount);
474 bcopy(data, bp->b_data, chain->bytes);
475 if (chain->bytes < bp->b_bcount) {
476 bzero(bp->b_data + chain->bytes,
477 bp->b_bcount - chain->bytes);
483 panic("hammer2_strategy_read: "
484 "unknown compression type");
487 panic("hammer2_strategy_read: unknown bref type");
491 /****************************************************************************
493 ****************************************************************************/
496 * Functions for compression in threads,
497 * from hammer2_vnops.c
499 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
500 hammer2_chain_t **parentp,
501 hammer2_key_t lbase, int ioflag, int pblksize,
502 hammer2_tid_t mtid, int *errorp);
503 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
504 hammer2_chain_t **parentp,
505 hammer2_key_t lbase, int ioflag, int pblksize,
506 hammer2_tid_t mtid, int *errorp,
507 int comp_algo, int check_algo);
508 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
509 hammer2_chain_t **parentp,
510 hammer2_key_t lbase, int ioflag, int pblksize,
511 hammer2_tid_t mtid, int *errorp,
513 static int test_block_zeros(const char *buf, size_t bytes);
514 static void zero_write(char *data, hammer2_inode_t *ip,
515 hammer2_chain_t **parentp,
517 hammer2_tid_t mtid, int *errorp);
518 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
519 int ioflag, int pblksize,
520 hammer2_tid_t mtid, int *errorp,
525 hammer2_strategy_write(struct vop_strategy_args *ap)
527 hammer2_xop_strategy_t *xop;
538 hammer2_lwinprog_ref(pmp);
539 hammer2_trans_assert_strategy(pmp);
540 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
542 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
543 HAMMER2_XOP_STRATEGY);
546 xop->lbase = bio->bio_offset;
547 hammer2_mtx_init(&xop->lock, "h2biow");
548 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write);
549 /* asynchronous completion */
551 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
557 * Per-node XOP (threaded). Write the logical buffer to the media.
559 * This is a bit problematic because there may be multiple target and
560 * any of them may be able to release the bp. In addition, if our
561 * particulr target is offline we don't want to block the bp (and thus
562 * the frontend). To accomplish this we copy the data to the per-thr
567 hammer2_strategy_xop_write(hammer2_thread_t *thr, hammer2_xop_t *arg)
569 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
570 hammer2_chain_t *parent;
578 hammer2_off_t bio_offset;
582 * We can only access the bp/bio if the frontend has not yet
587 hammer2_mtx_sh(&xop->lock);
589 hammer2_mtx_unlock(&xop->lock);
594 bio = xop->bio; /* ephermal */
595 bp = bio->bio_buf; /* ephermal */
596 ip = xop->head.ip1; /* retained by ref */
597 bio_offset = bio->bio_offset;
598 bio_data = thr->scratch;
600 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
602 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
603 pblksize = hammer2_calc_physical(ip, lbase);
604 bcopy(bp->b_data, bio_data, lblksize);
606 hammer2_mtx_unlock(&xop->lock);
607 bp = NULL; /* safety, illegal to access after unlock */
608 bio = NULL; /* safety, illegal to access after unlock */
613 parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
614 hammer2_write_file_core(bio_data, ip, &parent,
615 lbase, IO_ASYNC, pblksize,
616 xop->head.mtid, &error);
618 hammer2_chain_unlock(parent);
619 hammer2_chain_drop(parent);
620 parent = NULL; /* safety */
622 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
625 * Try to complete the operation on behalf of the front-end.
629 hammer2_mtx_ex(&xop->lock);
631 hammer2_mtx_unlock(&xop->lock);
636 * Async operation has not completed and we now own the lock.
637 * Determine if we can complete the operation by issuing the
638 * frontend collection non-blocking.
640 * H2 double-buffers the data, setting B_NOTMETA on the logical
641 * buffer hints to the OS that the logical buffer should not be
642 * swapcached (since the device buffer can be).
644 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
646 if (error == EINPROGRESS) {
647 hammer2_mtx_unlock(&xop->lock);
652 * Async operation has completed.
655 hammer2_mtx_unlock(&xop->lock);
657 bio = xop->bio; /* now owned by us */
658 bp = bio->bio_buf; /* now owned by us */
660 if (error == ENOENT || error == 0) {
661 bp->b_flags |= B_NOTMETA;
666 kprintf("strategy_xop_write: error %d loff=%016jx\n",
667 error, bp->b_loffset);
668 bp->b_flags |= B_ERROR;
672 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
673 hammer2_trans_assert_strategy(ip->pmp);
674 hammer2_lwinprog_drop(ip->pmp);
675 hammer2_trans_done(ip->pmp);
679 * Wait for pending I/O to complete
682 hammer2_bioq_sync(hammer2_pfs_t *pmp)
684 hammer2_lwinprog_wait(pmp, 0);
688 * Create a new cluster at (cparent, lbase) and assign physical storage,
689 * returning a cluster suitable for I/O. The cluster will be in a modified
692 * cparent can wind up being anything.
694 * If datap is not NULL, *datap points to the real data we intend to write.
695 * If we can dedup the storage location we set *datap to NULL to indicate
696 * to the caller that a dedup occurred.
698 * NOTE: Special case for data embedded in inode.
702 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
703 hammer2_key_t lbase, int pblksize,
704 hammer2_tid_t mtid, char **datap, int *errorp)
706 hammer2_chain_t *chain;
707 hammer2_key_t key_dummy;
708 hammer2_off_t dedup_off;
709 int pradix = hammer2_getradix(pblksize);
710 int cache_index = -1;
713 * Locate the chain associated with lbase, return a locked chain.
714 * However, do not instantiate any data reference (which utilizes a
715 * device buffer) because we will be using direct IO via the
716 * logical buffer cache buffer.
719 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
722 chain = hammer2_chain_lookup(parentp, &key_dummy,
725 HAMMER2_LOOKUP_NODATA);
728 * The lookup code should not return a DELETED chain to us, unless
729 * its a short-file embedded in the inode. Then it is possible for
730 * the lookup to return a deleted inode.
732 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
733 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
734 kprintf("assign physical deleted chain @ "
735 "%016jx (%016jx.%02x) ip %016jx\n",
736 lbase, chain->bref.data_off, chain->bref.type,
743 * We found a hole, create a new chain entry.
745 * NOTE: DATA chains are created without device backing
746 * store (nor do we want any).
748 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
750 *errorp = hammer2_chain_create(parentp, &chain,
752 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
753 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
754 lbase, HAMMER2_PBUFRADIX,
755 HAMMER2_BREF_TYPE_DATA,
759 panic("hammer2_chain_create: par=%p error=%d\n",
763 /*ip->delta_dcount += pblksize;*/
765 switch (chain->bref.type) {
766 case HAMMER2_BREF_TYPE_INODE:
768 * The data is embedded in the inode, which requires
771 hammer2_chain_modify_ip(ip, chain, mtid, 0);
773 case HAMMER2_BREF_TYPE_DATA:
774 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
776 if (chain->bytes != pblksize) {
777 hammer2_chain_resize(chain,
780 HAMMER2_MODIFY_OPTDATA);
784 * DATA buffers must be marked modified whether the
785 * data is in a logical buffer or not. We also have
786 * to make this call to fixup the chain data pointers
787 * after resizing in case this is an encrypted or
790 hammer2_chain_modify(chain, mtid, dedup_off,
791 HAMMER2_MODIFY_OPTDATA);
794 panic("hammer2_assign_physical: bad type");
804 * hammer2_write_file_core() - hammer2_write_thread() helper
806 * The core write function which determines which path to take
807 * depending on compression settings. We also have to locate the
808 * related chains so we can calculate and set the check data for
813 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
814 hammer2_chain_t **parentp,
815 hammer2_key_t lbase, int ioflag, int pblksize,
816 hammer2_tid_t mtid, int *errorp)
818 hammer2_chain_t *chain;
823 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
824 case HAMMER2_COMP_NONE:
826 * We have to assign physical storage to the buffer
827 * we intend to dirty or write now to avoid deadlocks
828 * in the strategy code later.
830 * This can return NOOFFSET for inode-embedded data.
831 * The strategy code will take care of it in that case.
834 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
835 mtid, &bdata, errorp);
836 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
837 hammer2_inode_data_t *wipdata;
839 wipdata = &chain->data->ipdata;
840 KKASSERT(wipdata->meta.op_flags &
841 HAMMER2_OPFLAG_DIRECTDATA);
842 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
843 ++hammer2_iod_file_wembed;
844 } else if (bdata == NULL) {
846 * Copy of data already present on-media.
848 chain->bref.methods =
849 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
850 HAMMER2_ENC_CHECK(ip->meta.check_algo);
851 hammer2_chain_setcheck(chain, data);
853 hammer2_write_bp(chain, data, ioflag, pblksize,
854 mtid, errorp, ip->meta.check_algo);
857 hammer2_chain_unlock(chain);
858 hammer2_chain_drop(chain);
861 case HAMMER2_COMP_AUTOZERO:
863 * Check for zero-fill only
865 hammer2_zero_check_and_write(data, ip, parentp,
866 lbase, ioflag, pblksize,
868 ip->meta.check_algo);
870 case HAMMER2_COMP_LZ4:
871 case HAMMER2_COMP_ZLIB:
874 * Check for zero-fill and attempt compression.
876 hammer2_compress_and_write(data, ip, parentp,
877 lbase, ioflag, pblksize,
880 ip->meta.check_algo);
888 * Generic function that will perform the compression in compression
889 * write path. The compression algorithm is determined by the settings
890 * obtained from inode.
894 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
895 hammer2_chain_t **parentp,
896 hammer2_key_t lbase, int ioflag, int pblksize,
897 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
899 hammer2_chain_t *chain;
905 if (test_block_zeros(data, pblksize)) {
906 zero_write(data, ip, parentp, lbase, mtid, errorp);
913 KKASSERT(pblksize / 2 <= 32768);
915 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) {
916 z_stream strm_compress;
920 switch(HAMMER2_DEC_ALGO(comp_algo)) {
921 case HAMMER2_COMP_LZ4:
922 comp_buffer = objcache_get(cache_buffer_write,
924 comp_size = LZ4_compress_limitedOutput(
926 &comp_buffer[sizeof(int)],
928 pblksize / 2 - sizeof(int));
930 * We need to prefix with the size, LZ4
931 * doesn't do it for us. Add the related
934 *(int *)comp_buffer = comp_size;
936 comp_size += sizeof(int);
938 case HAMMER2_COMP_ZLIB:
939 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
941 comp_level = 6; /* default zlib compression */
942 else if (comp_level < 6)
944 else if (comp_level > 9)
946 ret = deflateInit(&strm_compress, comp_level);
948 kprintf("HAMMER2 ZLIB: fatal error "
949 "on deflateInit.\n");
952 comp_buffer = objcache_get(cache_buffer_write,
954 strm_compress.next_in = data;
955 strm_compress.avail_in = pblksize;
956 strm_compress.next_out = comp_buffer;
957 strm_compress.avail_out = pblksize / 2;
958 ret = deflate(&strm_compress, Z_FINISH);
959 if (ret == Z_STREAM_END) {
960 comp_size = pblksize / 2 -
961 strm_compress.avail_out;
965 ret = deflateEnd(&strm_compress);
968 kprintf("Error: Unknown compression method.\n");
969 kprintf("Comp_method = %d.\n", comp_algo);
974 if (comp_size == 0) {
976 * compression failed or turned off
978 comp_block_size = pblksize; /* safety */
979 if (++ip->comp_heuristic > 128)
980 ip->comp_heuristic = 8;
983 * compression succeeded
985 ip->comp_heuristic = 0;
986 if (comp_size <= 1024) {
987 comp_block_size = 1024;
988 } else if (comp_size <= 2048) {
989 comp_block_size = 2048;
990 } else if (comp_size <= 4096) {
991 comp_block_size = 4096;
992 } else if (comp_size <= 8192) {
993 comp_block_size = 8192;
994 } else if (comp_size <= 16384) {
995 comp_block_size = 16384;
996 } else if (comp_size <= 32768) {
997 comp_block_size = 32768;
999 panic("hammer2: WRITE PATH: "
1000 "Weird comp_size value.");
1002 comp_block_size = pblksize;
1006 * Must zero the remainder or dedup (which operates on a
1007 * physical block basis) will not find matches.
1009 if (comp_size < comp_block_size) {
1010 bzero(comp_buffer + comp_size,
1011 comp_block_size - comp_size);
1016 * Assign physical storage, data will be set to NULL if a live-dedup
1019 bdata = comp_size ? comp_buffer : data;
1020 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1021 mtid, &bdata, errorp);
1024 kprintf("WRITE PATH: An error occurred while "
1025 "assigning physical space.\n");
1026 KKASSERT(chain == NULL);
1030 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1031 hammer2_inode_data_t *wipdata;
1033 hammer2_chain_modify_ip(ip, chain, mtid, 0);
1034 wipdata = &chain->data->ipdata;
1035 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1036 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1037 ++hammer2_iod_file_wembed;
1038 } else if (bdata == NULL) {
1040 * Live deduplication, a copy of the data is already present
1044 chain->bref.methods =
1045 HAMMER2_ENC_COMP(comp_algo) +
1046 HAMMER2_ENC_CHECK(check_algo);
1048 chain->bref.methods =
1050 HAMMER2_COMP_NONE) +
1051 HAMMER2_ENC_CHECK(check_algo);
1053 bdata = comp_size ? comp_buffer : data;
1054 hammer2_chain_setcheck(chain, bdata);
1055 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1059 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1061 switch(chain->bref.type) {
1062 case HAMMER2_BREF_TYPE_INODE:
1063 panic("hammer2_write_bp: unexpected inode\n");
1065 case HAMMER2_BREF_TYPE_DATA:
1067 * Optimize out the read-before-write
1070 *errorp = hammer2_io_newnz(chain->hmp,
1072 chain->bref.data_off,
1076 hammer2_io_brelse(&dio);
1077 kprintf("hammer2: WRITE PATH: "
1078 "dbp bread error\n");
1081 bdata = hammer2_io_data(dio, chain->bref.data_off);
1084 * When loading the block make sure we don't
1085 * leave garbage after the compressed data.
1088 chain->bref.methods =
1089 HAMMER2_ENC_COMP(comp_algo) +
1090 HAMMER2_ENC_CHECK(check_algo);
1091 bcopy(comp_buffer, bdata, comp_size);
1093 chain->bref.methods =
1095 HAMMER2_COMP_NONE) +
1096 HAMMER2_ENC_CHECK(check_algo);
1097 bcopy(data, bdata, pblksize);
1101 * The flush code doesn't calculate check codes for
1102 * file data (doing so can result in excessive I/O),
1105 hammer2_chain_setcheck(chain, bdata);
1106 hammer2_dedup_record(chain, bdata);
1109 * Device buffer is now valid, chain is no longer in
1110 * the initial state.
1112 * (No blockref table worries with file data)
1114 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1116 /* Now write the related bdp. */
1117 if (ioflag & IO_SYNC) {
1119 * Synchronous I/O requested.
1121 hammer2_io_bwrite(&dio);
1123 } else if ((ioflag & IO_DIRECT) &&
1124 loff + n == pblksize) {
1125 hammer2_io_bdwrite(&dio);
1127 } else if (ioflag & IO_ASYNC) {
1128 hammer2_io_bawrite(&dio);
1130 hammer2_io_bdwrite(&dio);
1134 panic("hammer2_write_bp: bad chain type %d\n",
1142 hammer2_chain_unlock(chain);
1143 hammer2_chain_drop(chain);
1146 objcache_put(cache_buffer_write, comp_buffer);
1152 * Function that performs zero-checking and writing without compression,
1153 * it corresponds to default zero-checking path.
1157 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1158 hammer2_chain_t **parentp,
1159 hammer2_key_t lbase, int ioflag, int pblksize,
1160 hammer2_tid_t mtid, int *errorp,
1163 hammer2_chain_t *chain;
1165 if (test_block_zeros(data, pblksize)) {
1166 zero_write(data, ip, parentp, lbase, mtid, errorp);
1168 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1169 mtid, &data, errorp);
1171 hammer2_write_bp(chain, data, ioflag, pblksize,
1172 mtid, errorp, check_algo);
1173 } /* else dedup occurred */
1175 hammer2_chain_unlock(chain);
1176 hammer2_chain_drop(chain);
1184 * A function to test whether a block of data contains only zeros,
1185 * returns TRUE (non-zero) if the block is all zeros.
1189 test_block_zeros(const char *buf, size_t bytes)
1193 for (i = 0; i < bytes; i += sizeof(long)) {
1194 if (*(const long *)(buf + i) != 0)
1203 * Function to "write" a block that contains only zeros.
1207 zero_write(char *data, hammer2_inode_t *ip,
1208 hammer2_chain_t **parentp,
1209 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1211 hammer2_chain_t *chain;
1212 hammer2_key_t key_dummy;
1213 int cache_index = -1;
1216 chain = hammer2_chain_lookup(parentp, &key_dummy,
1219 HAMMER2_LOOKUP_NODATA);
1221 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1222 hammer2_inode_data_t *wipdata;
1224 hammer2_chain_modify_ip(ip, chain, mtid, 0);
1225 wipdata = &chain->data->ipdata;
1226 KKASSERT(wipdata->meta.op_flags &
1227 HAMMER2_OPFLAG_DIRECTDATA);
1228 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1229 ++hammer2_iod_file_wembed;
1231 hammer2_chain_delete(*parentp, chain,
1232 mtid, HAMMER2_DELETE_PERMANENT);
1233 ++hammer2_iod_file_wzero;
1235 hammer2_chain_unlock(chain);
1236 hammer2_chain_drop(chain);
1238 ++hammer2_iod_file_wzero;
1245 * Function to write the data as it is, without performing any sort of
1246 * compression. This function is used in path without compression and
1247 * default zero-checking path.
1251 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1253 hammer2_tid_t mtid, int *errorp, int check_algo)
1255 hammer2_inode_data_t *wipdata;
1260 error = 0; /* XXX TODO below */
1262 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1264 switch(chain->bref.type) {
1265 case HAMMER2_BREF_TYPE_INODE:
1266 wipdata = &chain->data->ipdata;
1267 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1268 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1270 ++hammer2_iod_file_wembed;
1272 case HAMMER2_BREF_TYPE_DATA:
1273 error = hammer2_io_newnz(chain->hmp,
1275 chain->bref.data_off,
1276 chain->bytes, &dio);
1278 hammer2_io_bqrelse(&dio);
1279 kprintf("hammer2: WRITE PATH: "
1280 "dbp bread error\n");
1283 bdata = hammer2_io_data(dio, chain->bref.data_off);
1285 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1286 HAMMER2_ENC_CHECK(check_algo);
1287 bcopy(data, bdata, chain->bytes);
1290 * The flush code doesn't calculate check codes for
1291 * file data (doing so can result in excessive I/O),
1294 hammer2_chain_setcheck(chain, bdata);
1295 hammer2_dedup_record(chain, bdata);
1298 * Device buffer is now valid, chain is no longer in
1299 * the initial state.
1301 * (No blockref table worries with file data)
1303 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1305 if (ioflag & IO_SYNC) {
1307 * Synchronous I/O requested.
1309 hammer2_io_bwrite(&dio);
1311 } else if ((ioflag & IO_DIRECT) &&
1312 loff + n == pblksize) {
1313 hammer2_io_bdwrite(&dio);
1315 } else if (ioflag & IO_ASYNC) {
1316 hammer2_io_bawrite(&dio);
1318 hammer2_io_bdwrite(&dio);
1322 panic("hammer2_write_bp: bad chain type %d\n",
1328 KKASSERT(error == 0); /* XXX TODO */
1333 * LIVE DEDUP HEURISTIC
1335 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1336 * All fields must be loaded into locals and validated.
1338 * WARNING! Should only be used for file data, hammer2_chain_modify() only
1339 * checks for the dedup case on data chains. Also, dedup data can
1340 * only be recorded for committed chains (so NOT strategy writes
1341 * which can undergo further modification after the fact!).
1344 hammer2_dedup_record(hammer2_chain_t *chain, char *data)
1347 hammer2_dedup_t *dedup;
1353 if (hammer2_dedup_enable == 0)
1357 * Only committed data can be recorded for de-duplication, otherwise
1358 * the contents may change out from under us. So, on read if the
1359 * chain is not modified, and on flush when the chain is committed.
1362 (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_INITIAL)) == 0) {
1369 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1370 case HAMMER2_CHECK_ISCSI32:
1372 * XXX use the built-in crc (the dedup lookup sequencing
1373 * needs to be fixed so the check code is already present
1374 * when dedup_lookup is called)
1377 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1379 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1381 case HAMMER2_CHECK_XXHASH64:
1382 crc = chain->bref.check.xxhash64.value;
1384 case HAMMER2_CHECK_SHA192:
1386 * XXX use the built-in crc (the dedup lookup sequencing
1387 * needs to be fixed so the check code is already present
1388 * when dedup_lookup is called)
1391 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1392 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1393 ((uint64_t *)chain->bref.check.sha192.data)[2];
1395 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1399 * Cannot dedup without a check code
1401 * NOTE: In particular, CHECK_NONE allows a sector to be
1402 * overwritten without copy-on-write, recording
1403 * a dedup block for a CHECK_NONE object would be
1408 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1409 for (i = 0; i < 4; ++i) {
1410 if (dedup[i].data_crc == crc) {
1414 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1415 if (dticks < 0 || dticks > hz * 60 * 30)
1419 if (hammer2_debug & 0x40000) {
1420 kprintf("REC %04x %016jx %016jx\n",
1421 (int)(dedup - hmp->heur_dedup),
1423 chain->bref.data_off);
1425 dedup->ticks = ticks;
1426 dedup->data_off = chain->bref.data_off;
1427 dedup->data_crc = crc;
1428 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUP);
1433 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1435 hammer2_dedup_t *dedup;
1442 if (hammer2_dedup_enable == 0)
1449 * XXX use the built-in crc (the dedup lookup sequencing
1450 * needs to be fixed so the check code is already present
1451 * when dedup_lookup is called)
1453 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1454 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1456 if (hammer2_debug & 0x40000) {
1457 kprintf("LOC %04x/4 %016jx\n",
1458 (int)(dedup - hmp->heur_dedup),
1462 for (i = 0; i < 4; ++i) {
1463 off = dedup[i].data_off;
1465 if (dedup[i].data_crc != crc)
1467 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1469 dio = hammer2_io_getquick(hmp, off, pblksize);
1471 bcmp(data, hammer2_io_data(dio, off), pblksize) == 0) {
1473 * Make sure the INVALOK flag is cleared to prevent
1474 * the possibly-dirty bp from being invalidated now
1475 * that we are using it as part of a de-dup operation.
1477 if (hammer2_debug & 0x40000) {
1478 kprintf("DEDUP SUCCESS %016jx\n",
1481 atomic_clear_64(&dio->refs, HAMMER2_DIO_INVALOK);
1482 hammer2_io_putblk(&dio);
1484 dedup[i].ticks = ticks; /* update use */
1485 ++hammer2_iod_file_wdedup;
1487 return off; /* RETURN */
1490 hammer2_io_putblk(&dio);
1496 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1497 * before or while we are clearing it they will also recover the freemap
1498 * entry (set it to fully allocated), so a bulkfree race can only set it
1499 * to a possibly-free state.
1501 * XXX ok, well, not really sure races are ok but going to run with it
1505 hammer2_dedup_clear(hammer2_dev_t *hmp)
1509 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1510 hmp->heur_dedup[i].data_off = 0;
1511 hmp->heur_dedup[i].ticks = ticks - 1;