4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
24 * Copyright 2014 HybridCluster. All rights reserved.
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dnode.h>
32 #include <sys/zfeature.h>
33 #include <sys/dsl_dataset.h>
36 * Each of the concurrent object allocators will grab
37 * 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to
38 * grab 128 slots, which is 4 blocks worth. This was experimentally
39 * determined to be the lowest value that eliminates the measurable effect
40 * of lock contention from this code path.
42 int dmu_object_alloc_chunk_shift = 7;
45 dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
46 int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
47 int dnodesize, dmu_tx_t *tx)
50 uint64_t L1_dnode_count = DNODES_PER_BLOCK <<
51 (DMU_META_DNODE(os)->dn_indblkshift - SPA_BLKPTRSHIFT);
53 int dn_slots = dnodesize >> DNODE_SHIFT;
54 boolean_t restarted = B_FALSE;
55 uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
56 os->os_obj_next_percpu_len];
57 int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
61 dn_slots = DNODE_MIN_SLOTS;
63 ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
64 ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
68 * The "chunk" of dnodes that is assigned to a CPU-specific
69 * allocator needs to be at least one block's worth, to avoid
70 * lock contention on the dbuf. It can be at most one L1 block's
71 * worth, so that the "rescan after polishing off a L1's worth"
72 * logic below will be sure to kick in.
74 if (dnodes_per_chunk < DNODES_PER_BLOCK)
75 dnodes_per_chunk = DNODES_PER_BLOCK;
76 if (dnodes_per_chunk > L1_dnode_count)
77 dnodes_per_chunk = L1_dnode_count;
80 object = atomic_load_64(cpuobj);
87 * If we finished a chunk of dnodes, get a new one from
88 * the global allocator.
90 if ((P2PHASE(object, dnodes_per_chunk) == 0) ||
91 (P2PHASE(object + dn_slots - 1, dnodes_per_chunk) <
93 DNODE_STAT_BUMP(dnode_alloc_next_chunk);
94 mutex_enter(&os->os_obj_lock);
95 ASSERT0(P2PHASE(os->os_obj_next_chunk,
97 object = os->os_obj_next_chunk;
100 * Each time we polish off a L1 bp worth of dnodes
101 * (2^12 objects), move to another L1 bp that's
102 * still reasonably sparse (at most 1/4 full). Look
103 * from the beginning at most once per txg. If we
104 * still can't allocate from that L1 block, search
105 * for an empty L0 block, which will quickly skip
106 * to the end of the metadnode if the no nearby L0
107 * blocks are empty. This fallback avoids a
108 * pathology where full dnode blocks containing
109 * large dnodes appear sparse because they have a
110 * low blk_fill, leading to many failed allocation
111 * attempts. In the long term a better mechanism to
112 * search for sparse metadnode regions, such as
113 * spacemaps, could be implemented.
115 * os_scan_dnodes is set during txg sync if enough
116 * objects have been freed since the previous
117 * rescan to justify backfilling again.
119 * Note that dmu_traverse depends on the behavior
120 * that we use multiple blocks of the dnode object
121 * before going back to reuse objects. Any change
122 * to this algorithm should preserve that property
123 * or find another solution to the issues described
124 * in traverse_visitbp.
126 if (P2PHASE(object, L1_dnode_count) == 0) {
130 if (os->os_rescan_dnodes) {
132 os->os_rescan_dnodes = B_FALSE;
134 offset = object << DNODE_SHIFT;
136 blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
137 minlvl = restarted ? 1 : 2;
139 error = dnode_next_offset(DMU_META_DNODE(os),
140 DNODE_FIND_HOLE, &offset, minlvl,
143 object = offset >> DNODE_SHIFT;
147 * Note: if "restarted", we may find a L0 that
148 * is not suitably aligned.
150 os->os_obj_next_chunk =
151 P2ALIGN(object, dnodes_per_chunk) +
153 (void) atomic_swap_64(cpuobj, object);
154 mutex_exit(&os->os_obj_lock);
158 * The value of (*cpuobj) before adding dn_slots is the object
159 * ID assigned to us. The value afterwards is the object ID
160 * assigned to whoever wants to do an allocation next.
162 object = atomic_add_64_nv(cpuobj, dn_slots) - dn_slots;
165 * XXX We should check for an i/o error here and return
166 * up to our caller. Actually we should pre-read it in
167 * dmu_tx_assign(), but there is currently no mechanism
170 error = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
171 dn_slots, FTAG, &dn);
173 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
175 * Another thread could have allocated it; check
176 * again now that we have the struct lock.
178 if (dn->dn_type == DMU_OT_NONE) {
179 dnode_allocate(dn, ot, blocksize, 0,
180 bonustype, bonuslen, dn_slots, tx);
181 rw_exit(&dn->dn_struct_rwlock);
182 dmu_tx_add_new_object(tx, dn);
183 dnode_rele(dn, FTAG);
186 rw_exit(&dn->dn_struct_rwlock);
187 dnode_rele(dn, FTAG);
188 DNODE_STAT_BUMP(dnode_alloc_race);
192 * Skip to next known valid starting point on error. This
193 * is the start of the next block of dnodes.
195 if (dmu_object_next(os, &object, B_TRUE, 0) != 0) {
196 object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK);
197 DNODE_STAT_BUMP(dnode_alloc_next_block);
199 (void) atomic_swap_64(cpuobj, object);
204 dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
205 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
207 return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype,
212 dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize,
213 int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
216 return (dmu_object_alloc_impl(os, ot, blocksize, indirect_blockshift,
217 bonustype, bonuslen, 0, tx));
221 dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
222 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
224 return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype,
225 bonuslen, dnodesize, tx));
229 dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
230 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
232 return (dmu_object_claim_dnsize(os, object, ot, blocksize, bonustype,
237 dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
238 int blocksize, dmu_object_type_t bonustype, int bonuslen,
239 int dnodesize, dmu_tx_t *tx)
242 int dn_slots = dnodesize >> DNODE_SHIFT;
246 dn_slots = DNODE_MIN_SLOTS;
247 ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
248 ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
250 if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
251 return (SET_ERROR(EBADF));
253 err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
257 dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx);
258 dmu_tx_add_new_object(tx, dn);
260 dnode_rele(dn, FTAG);
266 dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
267 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
269 return (dmu_object_reclaim_dnsize(os, object, ot, blocksize, bonustype,
270 bonuslen, DNODE_MIN_SIZE, tx));
274 dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
275 int blocksize, dmu_object_type_t bonustype, int bonuslen, int dnodesize,
279 int dn_slots = dnodesize >> DNODE_SHIFT;
283 dn_slots = DNODE_MIN_SLOTS;
285 if (object == DMU_META_DNODE_OBJECT)
286 return (SET_ERROR(EBADF));
288 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
293 dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, dn_slots, tx);
295 dnode_rele(dn, FTAG);
301 dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
306 ASSERT(object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
308 err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0,
313 ASSERT(dn->dn_type != DMU_OT_NONE);
315 * If we don't create this free range, we'll leak indirect blocks when
316 * we get to freeing the dnode in syncing context.
318 dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
320 dnode_rele(dn, FTAG);
326 * Return (in *objectp) the next object which is allocated (or a hole)
327 * after *object, taking into account only objects that may have been modified
328 * after the specified txg.
331 dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg)
335 struct dsl_dataset *ds = os->os_dsl_dataset;
340 } else if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
341 uint64_t i = *objectp + 1;
342 uint64_t last_obj = *objectp | (DNODES_PER_BLOCK - 1);
343 dmu_object_info_t doi;
346 * Scan through the remaining meta dnode block. The contents
347 * of each slot in the block are known so it can be quickly
348 * checked. If the block is exhausted without a match then
349 * hand off to dnode_next_offset() for further scanning.
351 while (i <= last_obj) {
352 error = dmu_object_info(os, i, &doi);
353 if (error == ENOENT) {
360 } else if (error == EEXIST) {
362 } else if (error == 0) {
364 i += doi.doi_dnodesize >> DNODE_SHIFT;
376 start_obj = *objectp + 1;
379 offset = start_obj << DNODE_SHIFT;
381 error = dnode_next_offset(DMU_META_DNODE(os),
382 (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);
384 *objectp = offset >> DNODE_SHIFT;
390 * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
391 * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
393 * Only for use from syncing context, on MOS objects.
396 dmu_object_zapify(objset_t *mos, uint64_t object, dmu_object_type_t old_type,
401 ASSERT(dmu_tx_is_syncing(tx));
403 VERIFY0(dnode_hold(mos, object, FTAG, &dn));
404 if (dn->dn_type == DMU_OTN_ZAP_METADATA) {
405 dnode_rele(dn, FTAG);
408 ASSERT3U(dn->dn_type, ==, old_type);
409 ASSERT0(dn->dn_maxblkid);
412 * We must initialize the ZAP data before changing the type,
413 * so that concurrent calls to *_is_zapified() can determine if
414 * the object has been completely zapified by checking the type.
416 mzap_create_impl(mos, object, 0, 0, tx);
418 dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type =
419 DMU_OTN_ZAP_METADATA;
420 dnode_setdirty(dn, tx);
421 dnode_rele(dn, FTAG);
423 spa_feature_incr(dmu_objset_spa(mos),
424 SPA_FEATURE_EXTENSIBLE_DATASET, tx);
428 dmu_object_free_zapified(objset_t *mos, uint64_t object, dmu_tx_t *tx)
433 ASSERT(dmu_tx_is_syncing(tx));
435 VERIFY0(dnode_hold(mos, object, FTAG, &dn));
437 dnode_rele(dn, FTAG);
439 if (t == DMU_OTN_ZAP_METADATA) {
440 spa_feature_decr(dmu_objset_spa(mos),
441 SPA_FEATURE_EXTENSIBLE_DATASET, tx);
443 VERIFY0(dmu_object_free(mos, object, tx));