4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
30 * This file contains the code to implement file range locking in
31 * ZFS, although there isn't much specific to ZFS (all that comes to mind is
32 * support for growing the blocksize).
36 * Defined in zfs_rlock.h but essentially:
37 * lr = rangelock_enter(zp, off, len, lock_type);
38 * rangelock_reduce(lr, off, len); // optional
43 * An AVL tree is used to maintain the state of the existing ranges
44 * that are locked for exclusive (writer) or shared (reader) use.
45 * The starting range offset is used for searching and sorting the tree.
49 * The (hopefully) usual case is of no overlaps or contention for locks. On
50 * entry to rangelock_enter(), a locked_range_t is allocated; the tree
51 * searched that finds no overlap, and *this* locked_range_t is placed in the
54 * Overlaps/Reference counting/Proxy locks
55 * ---------------------------------------
56 * The avl code only allows one node at a particular offset. Also it's very
57 * inefficient to search through all previous entries looking for overlaps
58 * (because the very 1st in the ordered list might be at offset 0 but
59 * cover the whole file).
60 * So this implementation uses reference counts and proxy range locks.
61 * Firstly, only reader locks use reference counts and proxy locks,
62 * because writer locks are exclusive.
63 * When a reader lock overlaps with another then a proxy lock is created
64 * for that range and replaces the original lock. If the overlap
65 * is exact then the reference count of the proxy is simply incremented.
66 * Otherwise, the proxy lock is split into smaller lock ranges and
67 * new proxy locks created for non overlapping ranges.
68 * The reference counts are adjusted accordingly.
69 * Meanwhile, the orginal lock is kept around (this is the callers handle)
70 * and its offset and length are used when releasing the lock.
74 * In order to make wakeups efficient and to ensure multiple continuous
75 * readers on a range don't starve a writer for the same range lock,
76 * two condition variables are allocated in each rl_t.
77 * If a writer (or reader) can't get a range it initialises the writer
78 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
79 * and waits on that cv. When a thread unlocks that range it wakes up all
80 * writers then all readers before destroying the lock.
84 * Append mode writes need to lock a range at the end of a file.
85 * The offset of the end of the file is determined under the
86 * range locking mutex, and the lock type converted from RL_APPEND to
87 * RL_WRITER and the range locked.
91 * ZFS supports multiple block sizes, up to 16MB. The smallest
92 * block size is used for the file which is grown as needed. During this
93 * growth all other writers and readers must be excluded.
94 * So if the block size needs to be grown then the whole file is
95 * exclusively locked, then later the caller will reduce the lock
96 * range to just the range to be written using rangelock_reduce().
99 #include <sys/zfs_context.h>
101 #include <sys/zfs_rlock.h>
104 * AVL comparison function used to order range locks
105 * Locks are ordered on the start offset of the range.
108 rangelock_compare(const void *arg1, const void *arg2)
110 const locked_range_t *rl1 = (const locked_range_t *)arg1;
111 const locked_range_t *rl2 = (const locked_range_t *)arg2;
113 return (AVL_CMP(rl1->lr_offset, rl2->lr_offset));
117 * The callback is invoked when acquiring a RL_WRITER or RL_APPEND lock.
118 * It must convert RL_APPEND to RL_WRITER (starting at the end of the file),
119 * and may increase the range that's locked for RL_WRITER.
122 rangelock_init(rangelock_t *rl, rangelock_cb_t *cb, void *arg)
124 mutex_init(&rl->rl_lock, NULL, MUTEX_DEFAULT, NULL);
125 avl_create(&rl->rl_tree, rangelock_compare,
126 sizeof (locked_range_t), offsetof(locked_range_t, lr_node));
132 rangelock_fini(rangelock_t *rl)
134 mutex_destroy(&rl->rl_lock);
135 avl_destroy(&rl->rl_tree);
139 * Check if a write lock can be grabbed. If not, fail immediately or sleep and
140 * recheck until available, depending on the value of the "nonblock" parameter.
143 rangelock_enter_writer(rangelock_t *rl, locked_range_t *new, boolean_t nonblock)
145 avl_tree_t *tree = &rl->rl_tree;
148 uint64_t orig_off = new->lr_offset;
149 uint64_t orig_len = new->lr_length;
150 rangelock_type_t orig_type = new->lr_type;
154 * Call callback which can modify new->r_off,len,type.
155 * Note, the callback is used by the ZPL to handle appending
156 * and changing blocksizes. It isn't needed for zvols.
158 if (rl->rl_cb != NULL) {
159 rl->rl_cb(new, rl->rl_arg);
163 * If the type was APPEND, the callback must convert it to
166 ASSERT3U(new->lr_type, ==, RL_WRITER);
169 * First check for the usual case of no locks
171 if (avl_numnodes(tree) == 0) {
177 * Look for any locks in the range.
179 lr = avl_find(tree, new, &where);
181 goto wait; /* already locked at same offset */
183 lr = (locked_range_t *)avl_nearest(tree, where, AVL_AFTER);
185 lr->lr_offset < new->lr_offset + new->lr_length)
188 lr = (locked_range_t *)avl_nearest(tree, where, AVL_BEFORE);
190 lr->lr_offset + lr->lr_length > new->lr_offset)
193 avl_insert(tree, new, where);
198 if (!lr->lr_write_wanted) {
199 cv_init(&lr->lr_write_cv, NULL, CV_DEFAULT, NULL);
200 lr->lr_write_wanted = B_TRUE;
202 cv_wait(&lr->lr_write_cv, &rl->rl_lock);
204 /* reset to original */
205 new->lr_offset = orig_off;
206 new->lr_length = orig_len;
207 new->lr_type = orig_type;
212 * If this is an original (non-proxy) lock then replace it by
213 * a proxy and return the proxy.
215 static locked_range_t *
216 rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
218 locked_range_t *proxy;
221 return (lr); /* already a proxy */
223 ASSERT3U(lr->lr_count, ==, 1);
224 ASSERT(lr->lr_write_wanted == B_FALSE);
225 ASSERT(lr->lr_read_wanted == B_FALSE);
226 avl_remove(tree, lr);
229 /* create a proxy range lock */
230 proxy = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
231 proxy->lr_offset = lr->lr_offset;
232 proxy->lr_length = lr->lr_length;
234 proxy->lr_type = RL_READER;
235 proxy->lr_proxy = B_TRUE;
236 proxy->lr_write_wanted = B_FALSE;
237 proxy->lr_read_wanted = B_FALSE;
238 avl_add(tree, proxy);
244 * Split the range lock at the supplied offset
245 * returning the *front* proxy.
247 static locked_range_t *
248 rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
250 ASSERT3U(lr->lr_length, >, 1);
251 ASSERT3U(off, >, lr->lr_offset);
252 ASSERT3U(off, <, lr->lr_offset + lr->lr_length);
253 ASSERT(lr->lr_write_wanted == B_FALSE);
254 ASSERT(lr->lr_read_wanted == B_FALSE);
256 /* create the rear proxy range lock */
257 locked_range_t *rear = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
258 rear->lr_offset = off;
259 rear->lr_length = lr->lr_offset + lr->lr_length - off;
260 rear->lr_count = lr->lr_count;
261 rear->lr_type = RL_READER;
262 rear->lr_proxy = B_TRUE;
263 rear->lr_write_wanted = B_FALSE;
264 rear->lr_read_wanted = B_FALSE;
266 locked_range_t *front = rangelock_proxify(tree, lr);
267 front->lr_length = off - lr->lr_offset;
269 avl_insert_here(tree, rear, front, AVL_AFTER);
274 * Create and add a new proxy range lock for the supplied range.
277 rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
280 locked_range_t *lr = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
284 lr->lr_type = RL_READER;
285 lr->lr_proxy = B_TRUE;
286 lr->lr_write_wanted = B_FALSE;
287 lr->lr_read_wanted = B_FALSE;
292 rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
293 locked_range_t *prev, avl_index_t where)
295 locked_range_t *next;
296 uint64_t off = new->lr_offset;
297 uint64_t len = new->lr_length;
300 * prev arrives either:
301 * - pointing to an entry at the same offset
302 * - pointing to the entry with the closest previous offset whose
303 * range may overlap with the new range
304 * - null, if there were no ranges starting before the new one
307 if (prev->lr_offset + prev->lr_length <= off) {
309 } else if (prev->lr_offset != off) {
311 * convert to proxy if needed then
312 * split this entry and bump ref count
314 prev = rangelock_split(tree, prev, off);
315 prev = AVL_NEXT(tree, prev); /* move to rear range */
318 ASSERT((prev == NULL) || (prev->lr_offset == off));
323 next = avl_nearest(tree, where, AVL_AFTER);
325 if (next == NULL || off + len <= next->lr_offset) {
326 /* no overlaps, use the original new rl_t in the tree */
327 avl_insert(tree, new, where);
331 if (off < next->lr_offset) {
332 /* Add a proxy for initial range before the overlap */
333 rangelock_new_proxy(tree, off, next->lr_offset - off);
336 new->lr_count = 0; /* will use proxies in tree */
338 * We now search forward through the ranges, until we go past the end
339 * of the new range. For each entry we make it a proxy if it
340 * isn't already, then bump its reference count. If there's any
341 * gaps between the ranges then we create a new proxy range.
343 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) {
344 if (off + len <= next->lr_offset)
346 if (prev != NULL && prev->lr_offset + prev->lr_length <
349 ASSERT3U(next->lr_offset, >,
350 prev->lr_offset + prev->lr_length);
351 rangelock_new_proxy(tree,
352 prev->lr_offset + prev->lr_length,
354 (prev->lr_offset + prev->lr_length));
356 if (off + len == next->lr_offset + next->lr_length) {
357 /* exact overlap with end */
358 next = rangelock_proxify(tree, next);
362 if (off + len < next->lr_offset + next->lr_length) {
363 /* new range ends in the middle of this block */
364 next = rangelock_split(tree, next, off + len);
368 ASSERT3U(off + len, >, next->lr_offset + next->lr_length);
369 next = rangelock_proxify(tree, next);
373 /* Add the remaining end range. */
374 rangelock_new_proxy(tree, prev->lr_offset + prev->lr_length,
375 (off + len) - (prev->lr_offset + prev->lr_length));
379 * Check if a reader lock can be grabbed. If not, fail immediately or sleep and
380 * recheck until available, depending on the value of the "nonblock" parameter.
383 rangelock_enter_reader(rangelock_t *rl, locked_range_t *new, boolean_t nonblock)
385 avl_tree_t *tree = &rl->rl_tree;
386 locked_range_t *prev, *next;
388 uint64_t off = new->lr_offset;
389 uint64_t len = new->lr_length;
392 * Look for any writer locks in the range.
395 prev = avl_find(tree, new, &where);
397 prev = (locked_range_t *)avl_nearest(tree, where, AVL_BEFORE);
400 * Check the previous range for a writer lock overlap.
402 if (prev && (off < prev->lr_offset + prev->lr_length)) {
403 if ((prev->lr_type == RL_WRITER) || (prev->lr_write_wanted)) {
406 if (!prev->lr_read_wanted) {
407 cv_init(&prev->lr_read_cv,
408 NULL, CV_DEFAULT, NULL);
409 prev->lr_read_wanted = B_TRUE;
411 cv_wait(&prev->lr_read_cv, &rl->rl_lock);
414 if (off + len < prev->lr_offset + prev->lr_length)
419 * Search through the following ranges to see if there's
420 * write lock any overlap.
423 next = AVL_NEXT(tree, prev);
425 next = (locked_range_t *)avl_nearest(tree, where, AVL_AFTER);
426 for (; next != NULL; next = AVL_NEXT(tree, next)) {
427 if (off + len <= next->lr_offset)
429 if ((next->lr_type == RL_WRITER) || (next->lr_write_wanted)) {
432 if (!next->lr_read_wanted) {
433 cv_init(&next->lr_read_cv,
434 NULL, CV_DEFAULT, NULL);
435 next->lr_read_wanted = B_TRUE;
437 cv_wait(&next->lr_read_cv, &rl->rl_lock);
440 if (off + len <= next->lr_offset + next->lr_length)
446 * Add the read lock, which may involve splitting existing
447 * locks and bumping ref counts (r_count).
449 rangelock_add_reader(tree, new, prev, where);
454 * Lock a range (offset, length) as either shared (RL_READER) or exclusive
455 * (RL_WRITER or RL_APPEND). If RL_APPEND is specified, rl_cb() will convert
456 * it to a RL_WRITER lock (with the offset at the end of the file). Returns
457 * the range lock structure for later unlocking (or reduce range if the
458 * entire file is locked as RL_WRITER).
460 static locked_range_t *
461 _rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
462 rangelock_type_t type, boolean_t nonblock)
464 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
466 locked_range_t *new = kmem_alloc(sizeof (*new), KM_SLEEP);
467 new->lr_rangelock = rl;
468 new->lr_offset = off;
469 if (len + off < off) /* overflow */
470 len = UINT64_MAX - off;
471 new->lr_length = len;
472 new->lr_count = 1; /* assume it's going to be in the tree */
474 new->lr_proxy = B_FALSE;
475 new->lr_write_wanted = B_FALSE;
476 new->lr_read_wanted = B_FALSE;
478 mutex_enter(&rl->rl_lock);
479 if (type == RL_READER) {
481 * First check for the usual case of no locks
483 if (avl_numnodes(&rl->rl_tree) == 0) {
484 avl_add(&rl->rl_tree, new);
485 } else if (!rangelock_enter_reader(rl, new, nonblock)) {
486 kmem_free(new, sizeof (*new));
489 } else if (!rangelock_enter_writer(rl, new, nonblock)) {
490 kmem_free(new, sizeof (*new));
493 mutex_exit(&rl->rl_lock);
498 rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
499 rangelock_type_t type)
501 return (_rangelock_enter(rl, off, len, type, B_FALSE));
505 rangelock_tryenter(rangelock_t *rl, uint64_t off, uint64_t len,
506 rangelock_type_t type)
508 return (_rangelock_enter(rl, off, len, type, B_TRUE));
512 * Unlock a reader lock
515 rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove)
517 avl_tree_t *tree = &rl->rl_tree;
521 * The common case is when the remove entry is in the tree
522 * (cnt == 1) meaning there's been no other reader locks overlapping
523 * with this one. Otherwise the remove entry will have been
524 * removed from the tree and replaced by proxies (one or
525 * more ranges mapping to the entire range).
527 if (remove->lr_count == 1) {
528 avl_remove(tree, remove);
529 if (remove->lr_write_wanted) {
530 cv_broadcast(&remove->lr_write_cv);
531 cv_destroy(&remove->lr_write_cv);
533 if (remove->lr_read_wanted) {
534 cv_broadcast(&remove->lr_read_cv);
535 cv_destroy(&remove->lr_read_cv);
538 ASSERT0(remove->lr_count);
539 ASSERT0(remove->lr_write_wanted);
540 ASSERT0(remove->lr_read_wanted);
542 * Find start proxy representing this reader lock,
543 * then decrement ref count on all proxies
544 * that make up this range, freeing them as needed.
546 locked_range_t *lr = avl_find(tree, remove, NULL);
547 ASSERT3P(lr, !=, NULL);
548 ASSERT3U(lr->lr_count, !=, 0);
549 ASSERT3U(lr->lr_type, ==, RL_READER);
550 locked_range_t *next = NULL;
551 for (len = remove->lr_length; len != 0; lr = next) {
552 len -= lr->lr_length;
554 next = AVL_NEXT(tree, lr);
555 ASSERT3P(next, !=, NULL);
556 ASSERT3U(lr->lr_offset + lr->lr_length, ==,
558 ASSERT3U(next->lr_count, !=, 0);
559 ASSERT3U(next->lr_type, ==, RL_READER);
562 if (lr->lr_count == 0) {
563 avl_remove(tree, lr);
564 if (lr->lr_write_wanted) {
565 cv_broadcast(&lr->lr_write_cv);
566 cv_destroy(&lr->lr_write_cv);
568 if (lr->lr_read_wanted) {
569 cv_broadcast(&lr->lr_read_cv);
570 cv_destroy(&lr->lr_read_cv);
572 kmem_free(lr, sizeof (locked_range_t));
576 kmem_free(remove, sizeof (locked_range_t));
580 * Unlock range and destroy range lock structure.
583 rangelock_exit(locked_range_t *lr)
585 rangelock_t *rl = lr->lr_rangelock;
587 ASSERT(lr->lr_type == RL_WRITER || lr->lr_type == RL_READER);
588 ASSERT(lr->lr_count == 1 || lr->lr_count == 0);
589 ASSERT(!lr->lr_proxy);
591 mutex_enter(&rl->rl_lock);
592 if (lr->lr_type == RL_WRITER) {
593 /* writer locks can't be shared or split */
594 avl_remove(&rl->rl_tree, lr);
595 mutex_exit(&rl->rl_lock);
596 if (lr->lr_write_wanted) {
597 cv_broadcast(&lr->lr_write_cv);
598 cv_destroy(&lr->lr_write_cv);
600 if (lr->lr_read_wanted) {
601 cv_broadcast(&lr->lr_read_cv);
602 cv_destroy(&lr->lr_read_cv);
604 kmem_free(lr, sizeof (locked_range_t));
607 * lock may be shared, let rangelock_exit_reader()
608 * release the lock and free the rl_t
610 rangelock_exit_reader(rl, lr);
611 mutex_exit(&rl->rl_lock);
616 * Reduce range locked as RL_WRITER from whole file to specified range.
617 * Asserts the whole file is exclusively locked and so there's only one
621 rangelock_reduce(locked_range_t *lr, uint64_t off, uint64_t len)
623 rangelock_t *rl = lr->lr_rangelock;
625 /* Ensure there are no other locks */
626 ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);
627 ASSERT3U(lr->lr_offset, ==, 0);
628 ASSERT3U(lr->lr_type, ==, RL_WRITER);
629 ASSERT(!lr->lr_proxy);
630 ASSERT3U(lr->lr_length, ==, UINT64_MAX);
631 ASSERT3U(lr->lr_count, ==, 1);
633 mutex_enter(&rl->rl_lock);
636 mutex_exit(&rl->rl_lock);
637 if (lr->lr_write_wanted)
638 cv_broadcast(&lr->lr_write_cv);
639 if (lr->lr_read_wanted)
640 cv_broadcast(&lr->lr_read_cv);