1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 struct dlm_master_list_entry *mle,
57 struct o2nm_node *node,
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 struct dlm_master_list_entry *mle,
61 struct o2nm_node *node,
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 struct dlm_master_list_entry *mle,
78 if (namelen != mle->mnamelen ||
79 memcmp(name, mle->mname, namelen) != 0)
85 static struct kmem_cache *dlm_lockres_cache;
86 static struct kmem_cache *dlm_lockname_cache;
87 static struct kmem_cache *dlm_mle_cache;
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 enum dlm_mle_type type,
93 struct dlm_lock_resource *res,
95 unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 struct dlm_master_list_entry **mle,
100 char *name, unsigned int namelen);
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 struct dlm_master_list_entry *mle, int to);
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 struct dlm_lock_resource *res,
108 struct dlm_master_list_entry *mle,
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 struct dlm_lock_resource *res,
112 struct dlm_master_list_entry *mle,
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 struct dlm_lock_resource *res,
116 struct dlm_master_list_entry *mle,
117 struct dlm_master_list_entry **oldmle,
118 const char *name, unsigned int namelen,
119 u8 new_master, u8 master);
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 struct dlm_lock_resource *res,
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 struct dlm_lock_resource *res);
132 int dlm_is_host_down(int errno)
149 case -EINVAL: /* if returned from our tcp code,
150 this means there is no socket */
158 * MASTER LIST FUNCTIONS
163 * regarding master list entries and heartbeat callbacks:
165 * in order to avoid sleeping and allocation that occurs in
166 * heartbeat, master list entries are simply attached to the
167 * dlm's established heartbeat callbacks. the mle is attached
168 * when it is created, and since the dlm->spinlock is held at
169 * that time, any heartbeat event will be properly discovered
170 * by the mle. the mle needs to be detached from the
171 * dlm->mle_hb_events list as soon as heartbeat events are no
172 * longer useful to the mle, and before the mle is freed.
174 * as a general rule, heartbeat events are no longer needed by
175 * the mle once an "answer" regarding the lock master has been
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 struct dlm_master_list_entry *mle)
181 assert_spin_locked(&dlm->spinlock);
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 struct dlm_master_list_entry *mle)
190 if (!list_empty(&mle->hb_events))
191 list_del_init(&mle->hb_events);
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 struct dlm_master_list_entry *mle)
198 spin_lock(&dlm->spinlock);
199 __dlm_mle_detach_hb_events(dlm, mle);
200 spin_unlock(&dlm->spinlock);
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
205 struct dlm_ctxt *dlm;
208 assert_spin_locked(&dlm->spinlock);
209 assert_spin_locked(&dlm->master_lock);
211 kref_get(&mle->mle_refs);
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
216 struct dlm_ctxt *dlm;
219 spin_lock(&dlm->spinlock);
220 spin_lock(&dlm->master_lock);
223 spin_unlock(&dlm->master_lock);
224 spin_unlock(&dlm->spinlock);
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
231 struct dlm_ctxt *dlm;
234 assert_spin_locked(&dlm->spinlock);
235 assert_spin_locked(&dlm->master_lock);
236 if (!atomic_read(&mle->mle_refs.refcount)) {
237 /* this may or may not crash, but who cares.
239 mlog(ML_ERROR, "bad mle: %p\n", mle);
240 dlm_print_one_mle(mle);
243 kref_put(&mle->mle_refs, dlm_mle_release);
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
250 struct dlm_ctxt *dlm;
253 spin_lock(&dlm->spinlock);
254 spin_lock(&dlm->master_lock);
256 spin_unlock(&dlm->master_lock);
257 spin_unlock(&dlm->spinlock);
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
262 kref_get(&mle->mle_refs);
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 enum dlm_mle_type type,
267 struct dlm_ctxt *dlm,
268 struct dlm_lock_resource *res,
270 unsigned int namelen)
272 assert_spin_locked(&dlm->spinlock);
276 INIT_HLIST_NODE(&mle->master_hash_node);
277 INIT_LIST_HEAD(&mle->hb_events);
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 spin_lock_init(&mle->spinlock);
280 init_waitqueue_head(&mle->wq);
281 atomic_set(&mle->woken, 0);
282 kref_init(&mle->mle_refs);
283 memset(mle->response_map, 0, sizeof(mle->response_map));
284 mle->master = O2NM_MAX_NODES;
285 mle->new_master = O2NM_MAX_NODES;
288 BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 mle->type != DLM_MLE_MASTER &&
290 mle->type != DLM_MLE_MIGRATION);
292 if (mle->type == DLM_MLE_MASTER) {
295 memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 mle->mnamelen = res->lockname.len;
297 mle->mnamehash = res->lockname.hash;
301 memcpy(mle->mname, name, namelen);
302 mle->mnamelen = namelen;
303 mle->mnamehash = dlm_lockid_hash(name, namelen);
306 atomic_inc(&dlm->mle_tot_count[mle->type]);
307 atomic_inc(&dlm->mle_cur_count[mle->type]);
309 /* copy off the node_map and register hb callbacks on our copy */
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 clear_bit(dlm->node_num, mle->vote_map);
313 clear_bit(dlm->node_num, mle->node_map);
315 /* attach the mle to the domain node up/down events */
316 __dlm_mle_attach_hb_events(dlm, mle);
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
321 assert_spin_locked(&dlm->spinlock);
322 assert_spin_locked(&dlm->master_lock);
324 if (!hlist_unhashed(&mle->master_hash_node))
325 hlist_del_init(&mle->master_hash_node);
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
330 struct hlist_head *bucket;
332 assert_spin_locked(&dlm->master_lock);
334 bucket = dlm_master_hash(dlm, mle->mnamehash);
335 hlist_add_head(&mle->master_hash_node, bucket);
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 struct dlm_master_list_entry **mle,
341 char *name, unsigned int namelen)
343 struct dlm_master_list_entry *tmpmle;
344 struct hlist_head *bucket;
347 assert_spin_locked(&dlm->master_lock);
349 hash = dlm_lockid_hash(name, namelen);
350 bucket = dlm_master_hash(dlm, hash);
351 hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
352 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
361 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
363 struct dlm_master_list_entry *mle;
365 assert_spin_locked(&dlm->spinlock);
367 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
369 dlm_mle_node_up(dlm, mle, NULL, idx);
371 dlm_mle_node_down(dlm, mle, NULL, idx);
375 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
376 struct dlm_master_list_entry *mle,
377 struct o2nm_node *node, int idx)
379 spin_lock(&mle->spinlock);
381 if (!test_bit(idx, mle->node_map))
382 mlog(0, "node %u already removed from nodemap!\n", idx);
384 clear_bit(idx, mle->node_map);
386 spin_unlock(&mle->spinlock);
389 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
390 struct dlm_master_list_entry *mle,
391 struct o2nm_node *node, int idx)
393 spin_lock(&mle->spinlock);
395 if (test_bit(idx, mle->node_map))
396 mlog(0, "node %u already in node map!\n", idx);
398 set_bit(idx, mle->node_map);
400 spin_unlock(&mle->spinlock);
404 int dlm_init_mle_cache(void)
406 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
407 sizeof(struct dlm_master_list_entry),
408 0, SLAB_HWCACHE_ALIGN,
410 if (dlm_mle_cache == NULL)
415 void dlm_destroy_mle_cache(void)
418 kmem_cache_destroy(dlm_mle_cache);
421 static void dlm_mle_release(struct kref *kref)
423 struct dlm_master_list_entry *mle;
424 struct dlm_ctxt *dlm;
426 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
429 assert_spin_locked(&dlm->spinlock);
430 assert_spin_locked(&dlm->master_lock);
432 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
435 /* remove from list if not already */
436 __dlm_unlink_mle(dlm, mle);
438 /* detach the mle from the domain node up/down events */
439 __dlm_mle_detach_hb_events(dlm, mle);
441 atomic_dec(&dlm->mle_cur_count[mle->type]);
443 /* NOTE: kfree under spinlock here.
444 * if this is bad, we can move this to a freelist. */
445 kmem_cache_free(dlm_mle_cache, mle);
450 * LOCK RESOURCE FUNCTIONS
453 int dlm_init_master_caches(void)
455 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
456 sizeof(struct dlm_lock_resource),
457 0, SLAB_HWCACHE_ALIGN, NULL);
458 if (!dlm_lockres_cache)
461 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
462 DLM_LOCKID_NAME_MAX, 0,
463 SLAB_HWCACHE_ALIGN, NULL);
464 if (!dlm_lockname_cache)
469 dlm_destroy_master_caches();
473 void dlm_destroy_master_caches(void)
475 if (dlm_lockname_cache) {
476 kmem_cache_destroy(dlm_lockname_cache);
477 dlm_lockname_cache = NULL;
480 if (dlm_lockres_cache) {
481 kmem_cache_destroy(dlm_lockres_cache);
482 dlm_lockres_cache = NULL;
486 static void dlm_lockres_release(struct kref *kref)
488 struct dlm_lock_resource *res;
489 struct dlm_ctxt *dlm;
491 res = container_of(kref, struct dlm_lock_resource, refs);
494 /* This should not happen -- all lockres' have a name
495 * associated with them at init time. */
496 BUG_ON(!res->lockname.name);
498 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
501 spin_lock(&dlm->track_lock);
502 if (!list_empty(&res->tracking))
503 list_del_init(&res->tracking);
505 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
506 res->lockname.len, res->lockname.name);
507 dlm_print_one_lock_resource(res);
509 spin_unlock(&dlm->track_lock);
511 atomic_dec(&dlm->res_cur_count);
513 if (!hlist_unhashed(&res->hash_node) ||
514 !list_empty(&res->granted) ||
515 !list_empty(&res->converting) ||
516 !list_empty(&res->blocked) ||
517 !list_empty(&res->dirty) ||
518 !list_empty(&res->recovering) ||
519 !list_empty(&res->purge)) {
521 "Going to BUG for resource %.*s."
522 " We're on a list! [%c%c%c%c%c%c%c]\n",
523 res->lockname.len, res->lockname.name,
524 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
525 !list_empty(&res->granted) ? 'G' : ' ',
526 !list_empty(&res->converting) ? 'C' : ' ',
527 !list_empty(&res->blocked) ? 'B' : ' ',
528 !list_empty(&res->dirty) ? 'D' : ' ',
529 !list_empty(&res->recovering) ? 'R' : ' ',
530 !list_empty(&res->purge) ? 'P' : ' ');
532 dlm_print_one_lock_resource(res);
535 /* By the time we're ready to blow this guy away, we shouldn't
536 * be on any lists. */
537 BUG_ON(!hlist_unhashed(&res->hash_node));
538 BUG_ON(!list_empty(&res->granted));
539 BUG_ON(!list_empty(&res->converting));
540 BUG_ON(!list_empty(&res->blocked));
541 BUG_ON(!list_empty(&res->dirty));
542 BUG_ON(!list_empty(&res->recovering));
543 BUG_ON(!list_empty(&res->purge));
545 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
547 kmem_cache_free(dlm_lockres_cache, res);
550 void dlm_lockres_put(struct dlm_lock_resource *res)
552 kref_put(&res->refs, dlm_lockres_release);
555 static void dlm_init_lockres(struct dlm_ctxt *dlm,
556 struct dlm_lock_resource *res,
557 const char *name, unsigned int namelen)
561 /* If we memset here, we lose our reference to the kmalloc'd
562 * res->lockname.name, so be sure to init every field
565 qname = (char *) res->lockname.name;
566 memcpy(qname, name, namelen);
568 res->lockname.len = namelen;
569 res->lockname.hash = dlm_lockid_hash(name, namelen);
571 init_waitqueue_head(&res->wq);
572 spin_lock_init(&res->spinlock);
573 INIT_HLIST_NODE(&res->hash_node);
574 INIT_LIST_HEAD(&res->granted);
575 INIT_LIST_HEAD(&res->converting);
576 INIT_LIST_HEAD(&res->blocked);
577 INIT_LIST_HEAD(&res->dirty);
578 INIT_LIST_HEAD(&res->recovering);
579 INIT_LIST_HEAD(&res->purge);
580 INIT_LIST_HEAD(&res->tracking);
581 atomic_set(&res->asts_reserved, 0);
582 res->migration_pending = 0;
583 res->inflight_locks = 0;
584 res->inflight_assert_workers = 0;
588 kref_init(&res->refs);
590 atomic_inc(&dlm->res_tot_count);
591 atomic_inc(&dlm->res_cur_count);
593 /* just for consistency */
594 spin_lock(&res->spinlock);
595 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
596 spin_unlock(&res->spinlock);
598 res->state = DLM_LOCK_RES_IN_PROGRESS;
602 spin_lock(&dlm->spinlock);
603 list_add_tail(&res->tracking, &dlm->tracking_list);
604 spin_unlock(&dlm->spinlock);
606 memset(res->lvb, 0, DLM_LVB_LEN);
607 memset(res->refmap, 0, sizeof(res->refmap));
610 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
612 unsigned int namelen)
614 struct dlm_lock_resource *res = NULL;
616 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
620 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
621 if (!res->lockname.name)
624 dlm_init_lockres(dlm, res, name, namelen);
628 if (res && res->lockname.name)
629 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
632 kmem_cache_free(dlm_lockres_cache, res);
636 void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
637 struct dlm_lock_resource *res, int bit)
639 assert_spin_locked(&res->spinlock);
641 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
642 res->lockname.name, bit, __builtin_return_address(0));
644 set_bit(bit, res->refmap);
647 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
648 struct dlm_lock_resource *res, int bit)
650 assert_spin_locked(&res->spinlock);
652 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
653 res->lockname.name, bit, __builtin_return_address(0));
655 clear_bit(bit, res->refmap);
659 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
660 struct dlm_lock_resource *res)
662 assert_spin_locked(&res->spinlock);
664 res->inflight_locks++;
666 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
667 res->lockname.len, res->lockname.name, res->inflight_locks,
668 __builtin_return_address(0));
671 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
672 struct dlm_lock_resource *res)
674 assert_spin_locked(&res->spinlock);
676 BUG_ON(res->inflight_locks == 0);
678 res->inflight_locks--;
680 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
681 res->lockname.len, res->lockname.name, res->inflight_locks,
682 __builtin_return_address(0));
687 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
688 struct dlm_lock_resource *res)
690 assert_spin_locked(&res->spinlock);
691 res->inflight_assert_workers++;
692 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
693 dlm->name, res->lockname.len, res->lockname.name,
694 res->inflight_assert_workers);
697 static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
698 struct dlm_lock_resource *res)
700 spin_lock(&res->spinlock);
701 __dlm_lockres_grab_inflight_worker(dlm, res);
702 spin_unlock(&res->spinlock);
705 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
706 struct dlm_lock_resource *res)
708 assert_spin_locked(&res->spinlock);
709 BUG_ON(res->inflight_assert_workers == 0);
710 res->inflight_assert_workers--;
711 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
712 dlm->name, res->lockname.len, res->lockname.name,
713 res->inflight_assert_workers);
716 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
717 struct dlm_lock_resource *res)
719 spin_lock(&res->spinlock);
720 __dlm_lockres_drop_inflight_worker(dlm, res);
721 spin_unlock(&res->spinlock);
725 * lookup a lock resource by name.
726 * may already exist in the hashtable.
727 * lockid is null terminated
729 * if not, allocate enough for the lockres and for
730 * the temporary structure used in doing the mastering.
732 * also, do a lookup in the dlm->master_list to see
733 * if another node has begun mastering the same lock.
734 * if so, there should be a block entry in there
735 * for this name, and we should *not* attempt to master
736 * the lock here. need to wait around for that node
737 * to assert_master (or die).
740 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
745 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
746 struct dlm_master_list_entry *mle = NULL;
747 struct dlm_master_list_entry *alloc_mle = NULL;
750 struct dlm_node_iter iter;
753 int bit, wait_on_recovery = 0;
757 hash = dlm_lockid_hash(lockid, namelen);
759 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
762 spin_lock(&dlm->spinlock);
763 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
765 spin_unlock(&dlm->spinlock);
766 spin_lock(&tmpres->spinlock);
767 /* Wait on the thread that is mastering the resource */
768 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
769 __dlm_wait_on_lockres(tmpres);
770 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
771 spin_unlock(&tmpres->spinlock);
772 dlm_lockres_put(tmpres);
777 /* Wait on the resource purge to complete before continuing */
778 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
779 BUG_ON(tmpres->owner == dlm->node_num);
780 __dlm_wait_on_lockres_flags(tmpres,
781 DLM_LOCK_RES_DROPPING_REF);
782 spin_unlock(&tmpres->spinlock);
783 dlm_lockres_put(tmpres);
788 /* Grab inflight ref to pin the resource */
789 dlm_lockres_grab_inflight_ref(dlm, tmpres);
791 spin_unlock(&tmpres->spinlock);
793 dlm_lockres_put(res);
799 spin_unlock(&dlm->spinlock);
800 mlog(0, "allocating a new resource\n");
801 /* nothing found and we need to allocate one. */
802 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
805 res = dlm_new_lockres(dlm, lockid, namelen);
811 mlog(0, "no lockres found, allocated our own: %p\n", res);
813 if (flags & LKM_LOCAL) {
814 /* caller knows it's safe to assume it's not mastered elsewhere
815 * DONE! return right away */
816 spin_lock(&res->spinlock);
817 dlm_change_lockres_owner(dlm, res, dlm->node_num);
818 __dlm_insert_lockres(dlm, res);
819 dlm_lockres_grab_inflight_ref(dlm, res);
820 spin_unlock(&res->spinlock);
821 spin_unlock(&dlm->spinlock);
822 /* lockres still marked IN_PROGRESS */
826 /* check master list to see if another node has started mastering it */
827 spin_lock(&dlm->master_lock);
829 /* if we found a block, wait for lock to be mastered by another node */
830 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
833 if (mle->type == DLM_MLE_MASTER) {
834 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
837 mig = (mle->type == DLM_MLE_MIGRATION);
838 /* if there is a migration in progress, let the migration
839 * finish before continuing. we can wait for the absence
840 * of the MIGRATION mle: either the migrate finished or
841 * one of the nodes died and the mle was cleaned up.
842 * if there is a BLOCK here, but it already has a master
843 * set, we are too late. the master does not have a ref
844 * for us in the refmap. detach the mle and drop it.
845 * either way, go back to the top and start over. */
846 if (mig || mle->master != O2NM_MAX_NODES) {
847 BUG_ON(mig && mle->master == dlm->node_num);
848 /* we arrived too late. the master does not
849 * have a ref for us. retry. */
850 mlog(0, "%s:%.*s: late on %s\n",
851 dlm->name, namelen, lockid,
852 mig ? "MIGRATION" : "BLOCK");
853 spin_unlock(&dlm->master_lock);
854 spin_unlock(&dlm->spinlock);
856 /* master is known, detach */
858 dlm_mle_detach_hb_events(dlm, mle);
861 /* this is lame, but we can't wait on either
862 * the mle or lockres waitqueue here */
868 /* go ahead and try to master lock on this node */
870 /* make sure this does not get freed below */
872 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
873 set_bit(dlm->node_num, mle->maybe_map);
874 __dlm_insert_mle(dlm, mle);
876 /* still holding the dlm spinlock, check the recovery map
877 * to see if there are any nodes that still need to be
878 * considered. these will not appear in the mle nodemap
879 * but they might own this lockres. wait on them. */
880 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
881 if (bit < O2NM_MAX_NODES) {
882 mlog(0, "%s: res %.*s, At least one node (%d) "
883 "to recover before lock mastery can begin\n",
884 dlm->name, namelen, (char *)lockid, bit);
885 wait_on_recovery = 1;
889 /* at this point there is either a DLM_MLE_BLOCK or a
890 * DLM_MLE_MASTER on the master list, so it's safe to add the
891 * lockres to the hashtable. anyone who finds the lock will
892 * still have to wait on the IN_PROGRESS. */
894 /* finally add the lockres to its hash bucket */
895 __dlm_insert_lockres(dlm, res);
897 /* Grab inflight ref to pin the resource */
898 spin_lock(&res->spinlock);
899 dlm_lockres_grab_inflight_ref(dlm, res);
900 spin_unlock(&res->spinlock);
902 /* get an extra ref on the mle in case this is a BLOCK
903 * if so, the creator of the BLOCK may try to put the last
904 * ref at this time in the assert master handler, so we
905 * need an extra one to keep from a bad ptr deref. */
906 dlm_get_mle_inuse(mle);
907 spin_unlock(&dlm->master_lock);
908 spin_unlock(&dlm->spinlock);
911 while (wait_on_recovery) {
912 /* any cluster changes that occurred after dropping the
913 * dlm spinlock would be detectable be a change on the mle,
914 * so we only need to clear out the recovery map once. */
915 if (dlm_is_recovery_lock(lockid, namelen)) {
916 mlog(0, "%s: Recovery map is not empty, but must "
917 "master $RECOVERY lock now\n", dlm->name);
918 if (!dlm_pre_master_reco_lockres(dlm, res))
919 wait_on_recovery = 0;
921 mlog(0, "%s: waiting 500ms for heartbeat state "
922 "change\n", dlm->name);
928 dlm_kick_recovery_thread(dlm);
930 dlm_wait_for_recovery(dlm);
932 spin_lock(&dlm->spinlock);
933 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
934 if (bit < O2NM_MAX_NODES) {
935 mlog(0, "%s: res %.*s, At least one node (%d) "
936 "to recover before lock mastery can begin\n",
937 dlm->name, namelen, (char *)lockid, bit);
938 wait_on_recovery = 1;
940 wait_on_recovery = 0;
941 spin_unlock(&dlm->spinlock);
943 if (wait_on_recovery)
944 dlm_wait_for_node_recovery(dlm, bit, 10000);
947 /* must wait for lock to be mastered elsewhere */
952 dlm_node_iter_init(mle->vote_map, &iter);
953 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
954 ret = dlm_do_master_request(res, mle, nodenum);
957 if (mle->master != O2NM_MAX_NODES) {
958 /* found a master ! */
959 if (mle->master <= nodenum)
961 /* if our master request has not reached the master
962 * yet, keep going until it does. this is how the
963 * master will know that asserts are needed back to
964 * the lower nodes. */
965 mlog(0, "%s: res %.*s, Requests only up to %u but "
966 "master is %u, keep going\n", dlm->name, namelen,
967 lockid, nodenum, mle->master);
972 /* keep going until the response map includes all nodes */
973 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
975 wait_on_recovery = 1;
976 mlog(0, "%s: res %.*s, Node map changed, redo the master "
977 "request now, blocked=%d\n", dlm->name, res->lockname.len,
978 res->lockname.name, blocked);
980 mlog(ML_ERROR, "%s: res %.*s, Spinning on "
981 "dlm_wait_for_lock_mastery, blocked = %d\n",
982 dlm->name, res->lockname.len,
983 res->lockname.name, blocked);
984 dlm_print_one_lock_resource(res);
985 dlm_print_one_mle(mle);
991 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
992 res->lockname.name, res->owner);
993 /* make sure we never continue without this */
994 BUG_ON(res->owner == O2NM_MAX_NODES);
996 /* master is known, detach if not already detached */
997 dlm_mle_detach_hb_events(dlm, mle);
999 /* put the extra ref */
1000 dlm_put_mle_inuse(mle);
1003 spin_lock(&res->spinlock);
1004 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1005 spin_unlock(&res->spinlock);
1009 /* need to free the unused mle */
1011 kmem_cache_free(dlm_mle_cache, alloc_mle);
1017 #define DLM_MASTERY_TIMEOUT_MS 5000
1019 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1020 struct dlm_lock_resource *res,
1021 struct dlm_master_list_entry *mle,
1026 int map_changed, voting_done;
1033 /* check if another node has already become the owner */
1034 spin_lock(&res->spinlock);
1035 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1036 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1037 res->lockname.len, res->lockname.name, res->owner);
1038 spin_unlock(&res->spinlock);
1039 /* this will cause the master to re-assert across
1040 * the whole cluster, freeing up mles */
1041 if (res->owner != dlm->node_num) {
1042 ret = dlm_do_master_request(res, mle, res->owner);
1044 /* give recovery a chance to run */
1045 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1053 spin_unlock(&res->spinlock);
1055 spin_lock(&mle->spinlock);
1057 map_changed = (memcmp(mle->vote_map, mle->node_map,
1058 sizeof(mle->vote_map)) != 0);
1059 voting_done = (memcmp(mle->vote_map, mle->response_map,
1060 sizeof(mle->vote_map)) == 0);
1062 /* restart if we hit any errors */
1065 mlog(0, "%s: %.*s: node map changed, restarting\n",
1066 dlm->name, res->lockname.len, res->lockname.name);
1067 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1068 b = (mle->type == DLM_MLE_BLOCK);
1069 if ((*blocked && !b) || (!*blocked && b)) {
1070 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1071 dlm->name, res->lockname.len, res->lockname.name,
1075 spin_unlock(&mle->spinlock);
1080 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1081 "rechecking now\n", dlm->name, res->lockname.len,
1082 res->lockname.name);
1086 mlog(0, "map not changed and voting not done "
1087 "for %s:%.*s\n", dlm->name, res->lockname.len,
1088 res->lockname.name);
1092 if (m != O2NM_MAX_NODES) {
1093 /* another node has done an assert!
1098 /* have all nodes responded? */
1099 if (voting_done && !*blocked) {
1100 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1101 if (dlm->node_num <= bit) {
1102 /* my node number is lowest.
1103 * now tell other nodes that I am
1104 * mastering this. */
1105 mle->master = dlm->node_num;
1106 /* ref was grabbed in get_lock_resource
1107 * will be dropped in dlmlock_master */
1111 /* if voting is done, but we have not received
1112 * an assert master yet, we must sleep */
1116 spin_unlock(&mle->spinlock);
1118 /* sleep if we haven't finished voting yet */
1120 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1123 if (atomic_read(&mle->mle_refs.refcount) < 2)
1124 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1125 atomic_read(&mle->mle_refs.refcount),
1126 res->lockname.len, res->lockname.name);
1128 atomic_set(&mle->woken, 0);
1129 (void)wait_event_timeout(mle->wq,
1130 (atomic_read(&mle->woken) == 1),
1132 if (res->owner == O2NM_MAX_NODES) {
1133 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1134 res->lockname.len, res->lockname.name);
1137 mlog(0, "done waiting, master is %u\n", res->owner);
1145 mlog(0, "about to master %.*s here, this=%u\n",
1146 res->lockname.len, res->lockname.name, m);
1147 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1149 /* This is a failure in the network path,
1150 * not in the response to the assert_master
1151 * (any nonzero response is a BUG on this node).
1152 * Most likely a socket just got disconnected
1153 * due to node death. */
1156 /* no longer need to restart lock mastery.
1157 * all living nodes have been contacted. */
1161 /* set the lockres owner */
1162 spin_lock(&res->spinlock);
1163 /* mastery reference obtained either during
1164 * assert_master_handler or in get_lock_resource */
1165 dlm_change_lockres_owner(dlm, res, m);
1166 spin_unlock(&res->spinlock);
1172 struct dlm_bitmap_diff_iter
1175 unsigned long *orig_bm;
1176 unsigned long *cur_bm;
1177 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1180 enum dlm_node_state_change
1187 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1188 unsigned long *orig_bm,
1189 unsigned long *cur_bm)
1191 unsigned long p1, p2;
1195 iter->orig_bm = orig_bm;
1196 iter->cur_bm = cur_bm;
1198 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1199 p1 = *(iter->orig_bm + i);
1200 p2 = *(iter->cur_bm + i);
1201 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1205 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1206 enum dlm_node_state_change *state)
1210 if (iter->curnode >= O2NM_MAX_NODES)
1213 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1215 if (bit >= O2NM_MAX_NODES) {
1216 iter->curnode = O2NM_MAX_NODES;
1220 /* if it was there in the original then this node died */
1221 if (test_bit(bit, iter->orig_bm))
1226 iter->curnode = bit;
1231 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1232 struct dlm_lock_resource *res,
1233 struct dlm_master_list_entry *mle,
1236 struct dlm_bitmap_diff_iter bdi;
1237 enum dlm_node_state_change sc;
1241 mlog(0, "something happened such that the "
1242 "master process may need to be restarted!\n");
1244 assert_spin_locked(&mle->spinlock);
1246 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1247 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1249 if (sc == NODE_UP) {
1250 /* a node came up. clear any old vote from
1251 * the response map and set it in the vote map
1252 * then restart the mastery. */
1253 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1255 /* redo the master request, but only for the new node */
1256 mlog(0, "sending request to new node\n");
1257 clear_bit(node, mle->response_map);
1258 set_bit(node, mle->vote_map);
1260 mlog(ML_ERROR, "node down! %d\n", node);
1262 int lowest = find_next_bit(mle->maybe_map,
1265 /* act like it was never there */
1266 clear_bit(node, mle->maybe_map);
1268 if (node == lowest) {
1269 mlog(0, "expected master %u died"
1270 " while this node was blocked "
1271 "waiting on it!\n", node);
1272 lowest = find_next_bit(mle->maybe_map,
1275 if (lowest < O2NM_MAX_NODES) {
1276 mlog(0, "%s:%.*s:still "
1277 "blocked. waiting on %u "
1283 /* mle is an MLE_BLOCK, but
1284 * there is now nothing left to
1285 * block on. we need to return
1286 * all the way back out and try
1287 * again with an MLE_MASTER.
1288 * dlm_do_local_recovery_cleanup
1289 * has already run, so the mle
1291 mlog(0, "%s:%.*s: no "
1292 "longer blocking. try to "
1293 "master this here\n",
1296 res->lockname.name);
1297 mle->type = DLM_MLE_MASTER;
1303 /* now blank out everything, as if we had never
1304 * contacted anyone */
1305 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1306 memset(mle->response_map, 0, sizeof(mle->response_map));
1307 /* reset the vote_map to the current node_map */
1308 memcpy(mle->vote_map, mle->node_map,
1309 sizeof(mle->node_map));
1310 /* put myself into the maybe map */
1311 if (mle->type != DLM_MLE_BLOCK)
1312 set_bit(dlm->node_num, mle->maybe_map);
1315 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1322 * DLM_MASTER_REQUEST_MSG
1324 * returns: 0 on success,
1325 * -errno on a network error
1327 * on error, the caller should assume the target node is "dead"
1331 static int dlm_do_master_request(struct dlm_lock_resource *res,
1332 struct dlm_master_list_entry *mle, int to)
1334 struct dlm_ctxt *dlm = mle->dlm;
1335 struct dlm_master_request request;
1336 int ret, response=0, resend;
1338 memset(&request, 0, sizeof(request));
1339 request.node_idx = dlm->node_num;
1341 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1343 request.namelen = (u8)mle->mnamelen;
1344 memcpy(request.name, mle->mname, request.namelen);
1347 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1348 sizeof(request), to, &response);
1350 if (ret == -ESRCH) {
1351 /* should never happen */
1352 mlog(ML_ERROR, "TCP stack not ready!\n");
1354 } else if (ret == -EINVAL) {
1355 mlog(ML_ERROR, "bad args passed to o2net!\n");
1357 } else if (ret == -ENOMEM) {
1358 mlog(ML_ERROR, "out of memory while trying to send "
1359 "network message! retrying\n");
1360 /* this is totally crude */
1363 } else if (!dlm_is_host_down(ret)) {
1364 /* not a network error. bad. */
1366 mlog(ML_ERROR, "unhandled error!");
1369 /* all other errors should be network errors,
1370 * and likely indicate node death */
1371 mlog(ML_ERROR, "link to %d went down!\n", to);
1377 spin_lock(&mle->spinlock);
1379 case DLM_MASTER_RESP_YES:
1380 set_bit(to, mle->response_map);
1381 mlog(0, "node %u is the master, response=YES\n", to);
1382 mlog(0, "%s:%.*s: master node %u now knows I have a "
1383 "reference\n", dlm->name, res->lockname.len,
1384 res->lockname.name, to);
1387 case DLM_MASTER_RESP_NO:
1388 mlog(0, "node %u not master, response=NO\n", to);
1389 set_bit(to, mle->response_map);
1391 case DLM_MASTER_RESP_MAYBE:
1392 mlog(0, "node %u not master, response=MAYBE\n", to);
1393 set_bit(to, mle->response_map);
1394 set_bit(to, mle->maybe_map);
1396 case DLM_MASTER_RESP_ERROR:
1397 mlog(0, "node %u hit an error, resending\n", to);
1402 mlog(ML_ERROR, "bad response! %u\n", response);
1405 spin_unlock(&mle->spinlock);
1407 /* this is also totally crude */
1417 * locks that can be taken here:
1423 * if possible, TRIM THIS DOWN!!!
1425 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1428 u8 response = DLM_MASTER_RESP_MAYBE;
1429 struct dlm_ctxt *dlm = data;
1430 struct dlm_lock_resource *res = NULL;
1431 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1432 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1434 unsigned int namelen, hash;
1437 int dispatch_assert = 0;
1440 return DLM_MASTER_RESP_NO;
1442 if (!dlm_domain_fully_joined(dlm)) {
1443 response = DLM_MASTER_RESP_NO;
1447 name = request->name;
1448 namelen = request->namelen;
1449 hash = dlm_lockid_hash(name, namelen);
1451 if (namelen > DLM_LOCKID_NAME_MAX) {
1452 response = DLM_IVBUFLEN;
1457 spin_lock(&dlm->spinlock);
1458 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1460 spin_unlock(&dlm->spinlock);
1462 /* take care of the easy cases up front */
1463 spin_lock(&res->spinlock);
1464 if (res->state & (DLM_LOCK_RES_RECOVERING|
1465 DLM_LOCK_RES_MIGRATING)) {
1466 spin_unlock(&res->spinlock);
1467 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1468 "being recovered/migrated\n");
1469 response = DLM_MASTER_RESP_ERROR;
1471 kmem_cache_free(dlm_mle_cache, mle);
1475 if (res->owner == dlm->node_num) {
1476 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1477 spin_unlock(&res->spinlock);
1478 response = DLM_MASTER_RESP_YES;
1480 kmem_cache_free(dlm_mle_cache, mle);
1482 /* this node is the owner.
1483 * there is some extra work that needs to
1484 * happen now. the requesting node has
1485 * caused all nodes up to this one to
1486 * create mles. this node now needs to
1487 * go back and clean those up. */
1488 dispatch_assert = 1;
1490 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1491 spin_unlock(&res->spinlock);
1492 // mlog(0, "node %u is the master\n", res->owner);
1493 response = DLM_MASTER_RESP_NO;
1495 kmem_cache_free(dlm_mle_cache, mle);
1499 /* ok, there is no owner. either this node is
1500 * being blocked, or it is actively trying to
1501 * master this lock. */
1502 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1503 mlog(ML_ERROR, "lock with no owner should be "
1508 // mlog(0, "lockres is in progress...\n");
1509 spin_lock(&dlm->master_lock);
1510 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1512 mlog(ML_ERROR, "no mle found for this lock!\n");
1516 spin_lock(&tmpmle->spinlock);
1517 if (tmpmle->type == DLM_MLE_BLOCK) {
1518 // mlog(0, "this node is waiting for "
1519 // "lockres to be mastered\n");
1520 response = DLM_MASTER_RESP_NO;
1521 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1522 mlog(0, "node %u is master, but trying to migrate to "
1523 "node %u.\n", tmpmle->master, tmpmle->new_master);
1524 if (tmpmle->master == dlm->node_num) {
1525 mlog(ML_ERROR, "no owner on lockres, but this "
1526 "node is trying to migrate it to %u?!\n",
1527 tmpmle->new_master);
1530 /* the real master can respond on its own */
1531 response = DLM_MASTER_RESP_NO;
1533 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1535 if (tmpmle->master == dlm->node_num) {
1536 response = DLM_MASTER_RESP_YES;
1537 /* this node will be the owner.
1538 * go back and clean the mles on any
1540 dispatch_assert = 1;
1541 dlm_lockres_set_refmap_bit(dlm, res,
1544 response = DLM_MASTER_RESP_NO;
1546 // mlog(0, "this node is attempting to "
1547 // "master lockres\n");
1548 response = DLM_MASTER_RESP_MAYBE;
1551 set_bit(request->node_idx, tmpmle->maybe_map);
1552 spin_unlock(&tmpmle->spinlock);
1554 spin_unlock(&dlm->master_lock);
1555 spin_unlock(&res->spinlock);
1557 /* keep the mle attached to heartbeat events */
1558 dlm_put_mle(tmpmle);
1560 kmem_cache_free(dlm_mle_cache, mle);
1565 * lockres doesn't exist on this node
1566 * if there is an MLE_BLOCK, return NO
1567 * if there is an MLE_MASTER, return MAYBE
1568 * otherwise, add an MLE_BLOCK, return NO
1570 spin_lock(&dlm->master_lock);
1571 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1573 /* this lockid has never been seen on this node yet */
1574 // mlog(0, "no mle found\n");
1576 spin_unlock(&dlm->master_lock);
1577 spin_unlock(&dlm->spinlock);
1579 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1581 response = DLM_MASTER_RESP_ERROR;
1582 mlog_errno(-ENOMEM);
1588 // mlog(0, "this is second time thru, already allocated, "
1589 // "add the block.\n");
1590 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1591 set_bit(request->node_idx, mle->maybe_map);
1592 __dlm_insert_mle(dlm, mle);
1593 response = DLM_MASTER_RESP_NO;
1595 // mlog(0, "mle was found\n");
1597 spin_lock(&tmpmle->spinlock);
1598 if (tmpmle->master == dlm->node_num) {
1599 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1602 if (tmpmle->type == DLM_MLE_BLOCK)
1603 response = DLM_MASTER_RESP_NO;
1604 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1605 mlog(0, "migration mle was found (%u->%u)\n",
1606 tmpmle->master, tmpmle->new_master);
1607 /* real master can respond on its own */
1608 response = DLM_MASTER_RESP_NO;
1610 response = DLM_MASTER_RESP_MAYBE;
1612 set_bit(request->node_idx, tmpmle->maybe_map);
1613 spin_unlock(&tmpmle->spinlock);
1615 spin_unlock(&dlm->master_lock);
1616 spin_unlock(&dlm->spinlock);
1619 /* keep the mle attached to heartbeat events */
1620 dlm_put_mle(tmpmle);
1624 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1625 * The reference is released by dlm_assert_master_worker() under
1626 * the call to dlm_dispatch_assert_master(). If
1627 * dlm_assert_master_worker() isn't called, we drop it here.
1629 if (dispatch_assert) {
1630 if (response != DLM_MASTER_RESP_YES)
1631 mlog(ML_ERROR, "invalid response %d\n", response);
1633 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1636 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1637 dlm->node_num, res->lockname.len, res->lockname.name);
1638 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1639 DLM_ASSERT_MASTER_MLE_CLEANUP);
1641 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1642 response = DLM_MASTER_RESP_ERROR;
1643 dlm_lockres_put(res);
1645 dlm_lockres_grab_inflight_worker(dlm, res);
1648 dlm_lockres_put(res);
1656 * DLM_ASSERT_MASTER_MSG
1661 * NOTE: this can be used for debugging
1662 * can periodically run all locks owned by this node
1663 * and re-assert across the cluster...
1665 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1666 struct dlm_lock_resource *res,
1667 void *nodemap, u32 flags)
1669 struct dlm_assert_master assert;
1671 struct dlm_node_iter iter;
1674 const char *lockname = res->lockname.name;
1675 unsigned int namelen = res->lockname.len;
1677 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1679 spin_lock(&res->spinlock);
1680 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1681 spin_unlock(&res->spinlock);
1686 /* note that if this nodemap is empty, it returns 0 */
1687 dlm_node_iter_init(nodemap, &iter);
1688 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1690 struct dlm_master_list_entry *mle = NULL;
1692 mlog(0, "sending assert master to %d (%.*s)\n", to,
1694 memset(&assert, 0, sizeof(assert));
1695 assert.node_idx = dlm->node_num;
1696 assert.namelen = namelen;
1697 memcpy(assert.name, lockname, namelen);
1698 assert.flags = cpu_to_be32(flags);
1700 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1701 &assert, sizeof(assert), to, &r);
1703 mlog(ML_ERROR, "Error %d when sending message %u (key "
1704 "0x%x) to node %u\n", tmpret,
1705 DLM_ASSERT_MASTER_MSG, dlm->key, to);
1706 if (!dlm_is_host_down(tmpret)) {
1707 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1710 /* a node died. finish out the rest of the nodes. */
1711 mlog(0, "link to %d went down!\n", to);
1712 /* any nonzero status return will do */
1716 /* ok, something horribly messed. kill thyself. */
1717 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1718 "got %d.\n", namelen, lockname, to, r);
1719 spin_lock(&dlm->spinlock);
1720 spin_lock(&dlm->master_lock);
1721 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1723 dlm_print_one_mle(mle);
1726 spin_unlock(&dlm->master_lock);
1727 spin_unlock(&dlm->spinlock);
1731 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1732 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1733 mlog(ML_ERROR, "%.*s: very strange, "
1734 "master MLE but no lockres on %u\n",
1735 namelen, lockname, to);
1738 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1739 mlog(0, "%.*s: node %u create mles on other "
1740 "nodes and requests a re-assert\n",
1741 namelen, lockname, to);
1744 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1745 mlog(0, "%.*s: node %u has a reference to this "
1746 "lockres, set the bit in the refmap\n",
1747 namelen, lockname, to);
1748 spin_lock(&res->spinlock);
1749 dlm_lockres_set_refmap_bit(dlm, res, to);
1750 spin_unlock(&res->spinlock);
1757 spin_lock(&res->spinlock);
1758 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1759 spin_unlock(&res->spinlock);
1766 * locks that can be taken here:
1772 * if possible, TRIM THIS DOWN!!!
1774 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1777 struct dlm_ctxt *dlm = data;
1778 struct dlm_master_list_entry *mle = NULL;
1779 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1780 struct dlm_lock_resource *res = NULL;
1782 unsigned int namelen, hash;
1784 int master_request = 0, have_lockres_ref = 0;
1790 name = assert->name;
1791 namelen = assert->namelen;
1792 hash = dlm_lockid_hash(name, namelen);
1793 flags = be32_to_cpu(assert->flags);
1795 if (namelen > DLM_LOCKID_NAME_MAX) {
1796 mlog(ML_ERROR, "Invalid name length!");
1800 spin_lock(&dlm->spinlock);
1803 mlog(0, "assert_master with flags: %u\n", flags);
1806 spin_lock(&dlm->master_lock);
1807 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1808 /* not an error, could be master just re-asserting */
1809 mlog(0, "just got an assert_master from %u, but no "
1810 "MLE for it! (%.*s)\n", assert->node_idx,
1813 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1814 if (bit >= O2NM_MAX_NODES) {
1815 /* not necessarily an error, though less likely.
1816 * could be master just re-asserting. */
1817 mlog(0, "no bits set in the maybe_map, but %u "
1818 "is asserting! (%.*s)\n", assert->node_idx,
1820 } else if (bit != assert->node_idx) {
1821 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1822 mlog(0, "master %u was found, %u should "
1823 "back off\n", assert->node_idx, bit);
1825 /* with the fix for bug 569, a higher node
1826 * number winning the mastery will respond
1827 * YES to mastery requests, but this node
1828 * had no way of knowing. let it pass. */
1829 mlog(0, "%u is the lowest node, "
1830 "%u is asserting. (%.*s) %u must "
1831 "have begun after %u won.\n", bit,
1832 assert->node_idx, namelen, name, bit,
1836 if (mle->type == DLM_MLE_MIGRATION) {
1837 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1838 mlog(0, "%s:%.*s: got cleanup assert"
1839 " from %u for migration\n",
1840 dlm->name, namelen, name,
1842 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1843 mlog(0, "%s:%.*s: got unrelated assert"
1844 " from %u for migration, ignoring\n",
1845 dlm->name, namelen, name,
1848 spin_unlock(&dlm->master_lock);
1849 spin_unlock(&dlm->spinlock);
1854 spin_unlock(&dlm->master_lock);
1856 /* ok everything checks out with the MLE
1857 * now check to see if there is a lockres */
1858 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1860 spin_lock(&res->spinlock);
1861 if (res->state & DLM_LOCK_RES_RECOVERING) {
1862 mlog(ML_ERROR, "%u asserting but %.*s is "
1863 "RECOVERING!\n", assert->node_idx, namelen, name);
1867 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1868 res->owner != assert->node_idx) {
1869 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1870 "but current owner is %u! (%.*s)\n",
1871 assert->node_idx, res->owner, namelen,
1873 __dlm_print_one_lock_resource(res);
1876 } else if (mle->type != DLM_MLE_MIGRATION) {
1877 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1878 /* owner is just re-asserting */
1879 if (res->owner == assert->node_idx) {
1880 mlog(0, "owner %u re-asserting on "
1881 "lock %.*s\n", assert->node_idx,
1885 mlog(ML_ERROR, "got assert_master from "
1886 "node %u, but %u is the owner! "
1887 "(%.*s)\n", assert->node_idx,
1888 res->owner, namelen, name);
1891 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1892 mlog(ML_ERROR, "got assert from %u, but lock "
1893 "with no owner should be "
1894 "in-progress! (%.*s)\n",
1899 } else /* mle->type == DLM_MLE_MIGRATION */ {
1900 /* should only be getting an assert from new master */
1901 if (assert->node_idx != mle->new_master) {
1902 mlog(ML_ERROR, "got assert from %u, but "
1903 "new master is %u, and old master "
1905 assert->node_idx, mle->new_master,
1906 mle->master, namelen, name);
1912 spin_unlock(&res->spinlock);
1915 // mlog(0, "woo! got an assert_master from node %u!\n",
1916 // assert->node_idx);
1922 spin_lock(&mle->spinlock);
1923 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1926 /* MASTER mle: if any bits set in the response map
1927 * then the calling node needs to re-assert to clear
1928 * up nodes that this node contacted */
1929 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1930 nn+1)) < O2NM_MAX_NODES) {
1931 if (nn != dlm->node_num && nn != assert->node_idx) {
1937 mle->master = assert->node_idx;
1938 atomic_set(&mle->woken, 1);
1940 spin_unlock(&mle->spinlock);
1944 spin_lock(&res->spinlock);
1945 if (mle->type == DLM_MLE_MIGRATION) {
1946 mlog(0, "finishing off migration of lockres %.*s, "
1948 res->lockname.len, res->lockname.name,
1949 dlm->node_num, mle->new_master);
1950 res->state &= ~DLM_LOCK_RES_MIGRATING;
1952 dlm_change_lockres_owner(dlm, res, mle->new_master);
1953 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1955 dlm_change_lockres_owner(dlm, res, mle->master);
1957 spin_unlock(&res->spinlock);
1958 have_lockres_ref = 1;
1963 /* master is known, detach if not already detached.
1964 * ensures that only one assert_master call will happen
1966 spin_lock(&dlm->master_lock);
1968 rr = atomic_read(&mle->mle_refs.refcount);
1969 if (mle->inuse > 0) {
1970 if (extra_ref && rr < 3)
1972 else if (!extra_ref && rr < 2)
1975 if (extra_ref && rr < 2)
1977 else if (!extra_ref && rr < 1)
1981 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1982 "that will mess up this node, refs=%d, extra=%d, "
1983 "inuse=%d\n", dlm->name, namelen, name,
1984 assert->node_idx, rr, extra_ref, mle->inuse);
1985 dlm_print_one_mle(mle);
1987 __dlm_unlink_mle(dlm, mle);
1988 __dlm_mle_detach_hb_events(dlm, mle);
1991 /* the assert master message now balances the extra
1992 * ref given by the master / migration request message.
1993 * if this is the last put, it will be removed
1997 spin_unlock(&dlm->master_lock);
1999 if (res->owner != assert->node_idx) {
2000 mlog(0, "assert_master from %u, but current "
2001 "owner is %u (%.*s), no mle\n", assert->node_idx,
2002 res->owner, namelen, name);
2005 spin_unlock(&dlm->spinlock);
2010 spin_lock(&res->spinlock);
2011 res->state |= DLM_LOCK_RES_SETREF_INPROG;
2012 spin_unlock(&res->spinlock);
2013 *ret_data = (void *)res;
2016 if (master_request) {
2017 mlog(0, "need to tell master to reassert\n");
2018 /* positive. negative would shoot down the node. */
2019 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2020 if (!have_lockres_ref) {
2021 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2022 "mle present here for %s:%.*s, but no lockres!\n",
2023 assert->node_idx, dlm->name, namelen, name);
2026 if (have_lockres_ref) {
2027 /* let the master know we have a reference to the lockres */
2028 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2029 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2030 dlm->name, namelen, name, assert->node_idx);
2035 /* kill the caller! */
2036 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
2037 "and killing the other node now! This node is OK and can continue.\n");
2038 __dlm_print_one_lock_resource(res);
2039 spin_unlock(&res->spinlock);
2040 spin_unlock(&dlm->spinlock);
2041 *ret_data = (void *)res;
2046 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2048 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2051 spin_lock(&res->spinlock);
2052 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2053 spin_unlock(&res->spinlock);
2055 dlm_lockres_put(res);
2060 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2061 struct dlm_lock_resource *res,
2062 int ignore_higher, u8 request_from, u32 flags)
2064 struct dlm_work_item *item;
2065 item = kzalloc(sizeof(*item), GFP_ATOMIC);
2070 /* queue up work for dlm_assert_master_worker */
2071 dlm_grab(dlm); /* get an extra ref for the work item */
2072 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2073 item->u.am.lockres = res; /* already have a ref */
2074 /* can optionally ignore node numbers higher than this node */
2075 item->u.am.ignore_higher = ignore_higher;
2076 item->u.am.request_from = request_from;
2077 item->u.am.flags = flags;
2080 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2081 res->lockname.name);
2083 spin_lock(&dlm->work_lock);
2084 list_add_tail(&item->list, &dlm->work_list);
2085 spin_unlock(&dlm->work_lock);
2087 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2091 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2093 struct dlm_ctxt *dlm = data;
2095 struct dlm_lock_resource *res;
2096 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2103 res = item->u.am.lockres;
2104 ignore_higher = item->u.am.ignore_higher;
2105 request_from = item->u.am.request_from;
2106 flags = item->u.am.flags;
2108 spin_lock(&dlm->spinlock);
2109 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2110 spin_unlock(&dlm->spinlock);
2112 clear_bit(dlm->node_num, nodemap);
2113 if (ignore_higher) {
2114 /* if is this just to clear up mles for nodes below
2115 * this node, do not send the message to the original
2116 * caller or any node number higher than this */
2117 clear_bit(request_from, nodemap);
2118 bit = dlm->node_num;
2120 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2122 if (bit >= O2NM_MAX_NODES)
2124 clear_bit(bit, nodemap);
2129 * If we're migrating this lock to someone else, we are no
2130 * longer allowed to assert out own mastery. OTOH, we need to
2131 * prevent migration from starting while we're still asserting
2132 * our dominance. The reserved ast delays migration.
2134 spin_lock(&res->spinlock);
2135 if (res->state & DLM_LOCK_RES_MIGRATING) {
2136 mlog(0, "Someone asked us to assert mastery, but we're "
2137 "in the middle of migration. Skipping assert, "
2138 "the new master will handle that.\n");
2139 spin_unlock(&res->spinlock);
2142 __dlm_lockres_reserve_ast(res);
2143 spin_unlock(&res->spinlock);
2145 /* this call now finishes out the nodemap
2146 * even if one or more nodes die */
2147 mlog(0, "worker about to master %.*s here, this=%u\n",
2148 res->lockname.len, res->lockname.name, dlm->node_num);
2149 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2151 /* no need to restart, we are done */
2152 if (!dlm_is_host_down(ret))
2156 /* Ok, we've asserted ourselves. Let's let migration start. */
2157 dlm_lockres_release_ast(dlm, res);
2160 dlm_lockres_drop_inflight_worker(dlm, res);
2162 dlm_lockres_put(res);
2164 mlog(0, "finished with dlm_assert_master_worker\n");
2167 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2168 * We cannot wait for node recovery to complete to begin mastering this
2169 * lockres because this lockres is used to kick off recovery! ;-)
2170 * So, do a pre-check on all living nodes to see if any of those nodes
2171 * think that $RECOVERY is currently mastered by a dead node. If so,
2172 * we wait a short time to allow that node to get notified by its own
2173 * heartbeat stack, then check again. All $RECOVERY lock resources
2174 * mastered by dead nodes are purged when the hearbeat callback is
2175 * fired, so we can know for sure that it is safe to continue once
2176 * the node returns a live node or no node. */
2177 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2178 struct dlm_lock_resource *res)
2180 struct dlm_node_iter iter;
2183 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2185 spin_lock(&dlm->spinlock);
2186 dlm_node_iter_init(dlm->domain_map, &iter);
2187 spin_unlock(&dlm->spinlock);
2189 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2190 /* do not send to self */
2191 if (nodenum == dlm->node_num)
2193 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2196 if (!dlm_is_host_down(ret))
2198 /* host is down, so answer for that node would be
2199 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2203 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2204 /* check to see if this master is in the recovery map */
2205 spin_lock(&dlm->spinlock);
2206 if (test_bit(master, dlm->recovery_map)) {
2207 mlog(ML_NOTICE, "%s: node %u has not seen "
2208 "node %u go down yet, and thinks the "
2209 "dead node is mastering the recovery "
2210 "lock. must wait.\n", dlm->name,
2214 spin_unlock(&dlm->spinlock);
2215 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2224 * DLM_DEREF_LOCKRES_MSG
2227 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2229 struct dlm_deref_lockres deref;
2231 const char *lockname;
2232 unsigned int namelen;
2234 lockname = res->lockname.name;
2235 namelen = res->lockname.len;
2236 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2238 memset(&deref, 0, sizeof(deref));
2239 deref.node_idx = dlm->node_num;
2240 deref.namelen = namelen;
2241 memcpy(deref.name, lockname, namelen);
2243 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2244 &deref, sizeof(deref), res->owner, &r);
2246 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2247 dlm->name, namelen, lockname, ret, res->owner);
2249 /* BAD. other node says I did not have a ref. */
2250 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2251 dlm->name, namelen, lockname, res->owner, r);
2252 dlm_print_one_lock_resource(res);
2258 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2261 struct dlm_ctxt *dlm = data;
2262 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2263 struct dlm_lock_resource *res = NULL;
2265 unsigned int namelen;
2269 struct dlm_work_item *item;
2277 namelen = deref->namelen;
2278 node = deref->node_idx;
2280 if (namelen > DLM_LOCKID_NAME_MAX) {
2281 mlog(ML_ERROR, "Invalid name length!");
2284 if (deref->node_idx >= O2NM_MAX_NODES) {
2285 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2289 hash = dlm_lockid_hash(name, namelen);
2291 spin_lock(&dlm->spinlock);
2292 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2294 spin_unlock(&dlm->spinlock);
2295 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2296 dlm->name, namelen, name);
2299 spin_unlock(&dlm->spinlock);
2301 spin_lock(&res->spinlock);
2302 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2305 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2306 if (test_bit(node, res->refmap)) {
2307 dlm_lockres_clear_refmap_bit(dlm, res, node);
2311 spin_unlock(&res->spinlock);
2315 dlm_lockres_calc_usage(dlm, res);
2317 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2318 "but it is already dropped!\n", dlm->name,
2319 res->lockname.len, res->lockname.name, node);
2320 dlm_print_one_lock_resource(res);
2326 item = kzalloc(sizeof(*item), GFP_NOFS);
2333 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2334 item->u.dl.deref_res = res;
2335 item->u.dl.deref_node = node;
2337 spin_lock(&dlm->work_lock);
2338 list_add_tail(&item->list, &dlm->work_list);
2339 spin_unlock(&dlm->work_lock);
2341 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2346 dlm_lockres_put(res);
2352 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2354 struct dlm_ctxt *dlm;
2355 struct dlm_lock_resource *res;
2360 res = item->u.dl.deref_res;
2361 node = item->u.dl.deref_node;
2363 spin_lock(&res->spinlock);
2364 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2365 if (test_bit(node, res->refmap)) {
2366 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2367 dlm_lockres_clear_refmap_bit(dlm, res, node);
2370 spin_unlock(&res->spinlock);
2373 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2374 dlm->name, res->lockname.len, res->lockname.name, node);
2375 dlm_lockres_calc_usage(dlm, res);
2377 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2378 "but it is already dropped!\n", dlm->name,
2379 res->lockname.len, res->lockname.name, node);
2380 dlm_print_one_lock_resource(res);
2383 dlm_lockres_put(res);
2387 * A migrateable resource is one that is :
2388 * 1. locally mastered, and,
2389 * 2. zero local locks, and,
2390 * 3. one or more non-local locks, or, one or more references
2391 * Returns 1 if yes, 0 if not.
2393 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2394 struct dlm_lock_resource *res)
2396 enum dlm_lockres_list idx;
2397 int nonlocal = 0, node_ref;
2398 struct list_head *queue;
2399 struct dlm_lock *lock;
2402 assert_spin_locked(&res->spinlock);
2404 /* delay migration when the lockres is in MIGRATING state */
2405 if (res->state & DLM_LOCK_RES_MIGRATING)
2408 if (res->owner != dlm->node_num)
2411 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2412 queue = dlm_list_idx_to_ptr(res, idx);
2413 list_for_each_entry(lock, queue, list) {
2414 if (lock->ml.node != dlm->node_num) {
2418 cookie = be64_to_cpu(lock->ml.cookie);
2419 mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2420 "%s list\n", dlm->name, res->lockname.len,
2422 dlm_get_lock_cookie_node(cookie),
2423 dlm_get_lock_cookie_seq(cookie),
2424 dlm_list_in_text(idx));
2430 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2431 if (node_ref >= O2NM_MAX_NODES)
2435 mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
2436 res->lockname.name);
2442 * DLM_MIGRATE_LOCKRES
2446 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2447 struct dlm_lock_resource *res, u8 target)
2449 struct dlm_master_list_entry *mle = NULL;
2450 struct dlm_master_list_entry *oldmle = NULL;
2451 struct dlm_migratable_lockres *mres = NULL;
2454 unsigned int namelen;
2461 BUG_ON(target == O2NM_MAX_NODES);
2463 name = res->lockname.name;
2464 namelen = res->lockname.len;
2466 mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
2469 /* preallocate up front. if this fails, abort */
2471 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2477 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2485 * clear any existing master requests and
2486 * add the migration mle to the list
2488 spin_lock(&dlm->spinlock);
2489 spin_lock(&dlm->master_lock);
2490 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2491 namelen, target, dlm->node_num);
2492 spin_unlock(&dlm->master_lock);
2493 spin_unlock(&dlm->spinlock);
2495 if (ret == -EEXIST) {
2496 mlog(0, "another process is already migrating it\n");
2502 * set the MIGRATING flag and flush asts
2503 * if we fail after this we need to re-dirty the lockres
2505 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2506 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2507 "the target went down.\n", res->lockname.len,
2508 res->lockname.name, target);
2509 spin_lock(&res->spinlock);
2510 res->state &= ~DLM_LOCK_RES_MIGRATING;
2512 spin_unlock(&res->spinlock);
2518 /* master is known, detach if not already detached */
2519 dlm_mle_detach_hb_events(dlm, oldmle);
2520 dlm_put_mle(oldmle);
2525 dlm_mle_detach_hb_events(dlm, mle);
2528 kmem_cache_free(dlm_mle_cache, mle);
2535 * at this point, we have a migration target, an mle
2536 * in the master list, and the MIGRATING flag set on
2540 /* now that remote nodes are spinning on the MIGRATING flag,
2541 * ensure that all assert_master work is flushed. */
2542 flush_workqueue(dlm->dlm_worker);
2544 /* get an extra reference on the mle.
2545 * otherwise the assert_master from the new
2546 * master will destroy this.
2547 * also, make sure that all callers of dlm_get_mle
2548 * take both dlm->spinlock and dlm->master_lock */
2549 spin_lock(&dlm->spinlock);
2550 spin_lock(&dlm->master_lock);
2551 dlm_get_mle_inuse(mle);
2552 spin_unlock(&dlm->master_lock);
2553 spin_unlock(&dlm->spinlock);
2555 /* notify new node and send all lock state */
2556 /* call send_one_lockres with migration flag.
2557 * this serves as notice to the target node that a
2558 * migration is starting. */
2559 ret = dlm_send_one_lockres(dlm, res, mres, target,
2560 DLM_MRES_MIGRATION);
2563 mlog(0, "migration to node %u failed with %d\n",
2565 /* migration failed, detach and clean up mle */
2566 dlm_mle_detach_hb_events(dlm, mle);
2568 dlm_put_mle_inuse(mle);
2569 spin_lock(&res->spinlock);
2570 res->state &= ~DLM_LOCK_RES_MIGRATING;
2572 spin_unlock(&res->spinlock);
2573 if (dlm_is_host_down(ret))
2574 dlm_wait_for_node_death(dlm, target,
2575 DLM_NODE_DEATH_WAIT_MAX);
2579 /* at this point, the target sends a message to all nodes,
2580 * (using dlm_do_migrate_request). this node is skipped since
2581 * we had to put an mle in the list to begin the process. this
2582 * node now waits for target to do an assert master. this node
2583 * will be the last one notified, ensuring that the migration
2584 * is complete everywhere. if the target dies while this is
2585 * going on, some nodes could potentially see the target as the
2586 * master, so it is important that my recovery finds the migration
2587 * mle and sets the master to UNKNOWN. */
2590 /* wait for new node to assert master */
2592 ret = wait_event_interruptible_timeout(mle->wq,
2593 (atomic_read(&mle->woken) == 1),
2594 msecs_to_jiffies(5000));
2597 if (atomic_read(&mle->woken) == 1 ||
2598 res->owner == target)
2601 mlog(0, "%s:%.*s: timed out during migration\n",
2602 dlm->name, res->lockname.len, res->lockname.name);
2603 /* avoid hang during shutdown when migrating lockres
2604 * to a node which also goes down */
2605 if (dlm_is_node_dead(dlm, target)) {
2606 mlog(0, "%s:%.*s: expected migration "
2607 "target %u is no longer up, restarting\n",
2608 dlm->name, res->lockname.len,
2609 res->lockname.name, target);
2611 /* migration failed, detach and clean up mle */
2612 dlm_mle_detach_hb_events(dlm, mle);
2614 dlm_put_mle_inuse(mle);
2615 spin_lock(&res->spinlock);
2616 res->state &= ~DLM_LOCK_RES_MIGRATING;
2618 spin_unlock(&res->spinlock);
2622 mlog(0, "%s:%.*s: caught signal during migration\n",
2623 dlm->name, res->lockname.len, res->lockname.name);
2626 /* all done, set the owner, clear the flag */
2627 spin_lock(&res->spinlock);
2628 dlm_set_lockres_owner(dlm, res, target);
2629 res->state &= ~DLM_LOCK_RES_MIGRATING;
2630 dlm_remove_nonlocal_locks(dlm, res);
2631 spin_unlock(&res->spinlock);
2634 /* master is known, detach if not already detached */
2635 dlm_mle_detach_hb_events(dlm, mle);
2636 dlm_put_mle_inuse(mle);
2639 dlm_lockres_calc_usage(dlm, res);
2642 /* re-dirty the lockres if we failed */
2644 dlm_kick_thread(dlm, res);
2646 /* wake up waiters if the MIGRATING flag got set
2647 * but migration failed */
2652 free_page((unsigned long)mres);
2656 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
2661 #define DLM_MIGRATION_RETRY_MS 100
2664 * Should be called only after beginning the domain leave process.
2665 * There should not be any remaining locks on nonlocal lock resources,
2666 * and there should be no local locks left on locally mastered resources.
2668 * Called with the dlm spinlock held, may drop it to do migration, but
2669 * will re-acquire before exit.
2671 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2673 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2676 int lock_dropped = 0;
2677 u8 target = O2NM_MAX_NODES;
2679 assert_spin_locked(&dlm->spinlock);
2681 spin_lock(&res->spinlock);
2682 if (dlm_is_lockres_migrateable(dlm, res))
2683 target = dlm_pick_migration_target(dlm, res);
2684 spin_unlock(&res->spinlock);
2686 if (target == O2NM_MAX_NODES)
2689 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2690 spin_unlock(&dlm->spinlock);
2692 ret = dlm_migrate_lockres(dlm, res, target);
2694 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2695 dlm->name, res->lockname.len, res->lockname.name,
2697 spin_lock(&dlm->spinlock);
2699 return lock_dropped;
2702 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2705 spin_lock(&dlm->ast_lock);
2706 spin_lock(&lock->spinlock);
2707 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2708 spin_unlock(&lock->spinlock);
2709 spin_unlock(&dlm->ast_lock);
2713 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2714 struct dlm_lock_resource *res,
2718 spin_lock(&res->spinlock);
2719 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2720 spin_unlock(&res->spinlock);
2722 /* target has died, so make the caller break out of the
2723 * wait_event, but caller must recheck the domain_map */
2724 spin_lock(&dlm->spinlock);
2725 if (!test_bit(mig_target, dlm->domain_map))
2727 spin_unlock(&dlm->spinlock);
2731 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2732 struct dlm_lock_resource *res)
2735 spin_lock(&res->spinlock);
2736 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2737 spin_unlock(&res->spinlock);
2742 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2743 struct dlm_lock_resource *res,
2748 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2749 res->lockname.len, res->lockname.name, dlm->node_num,
2751 /* need to set MIGRATING flag on lockres. this is done by
2752 * ensuring that all asts have been flushed for this lockres. */
2753 spin_lock(&res->spinlock);
2754 BUG_ON(res->migration_pending);
2755 res->migration_pending = 1;
2756 /* strategy is to reserve an extra ast then release
2757 * it below, letting the release do all of the work */
2758 __dlm_lockres_reserve_ast(res);
2759 spin_unlock(&res->spinlock);
2761 /* now flush all the pending asts */
2762 dlm_kick_thread(dlm, res);
2763 /* before waiting on DIRTY, block processes which may
2764 * try to dirty the lockres before MIGRATING is set */
2765 spin_lock(&res->spinlock);
2766 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2767 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2768 spin_unlock(&res->spinlock);
2769 /* now wait on any pending asts and the DIRTY state */
2770 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2771 dlm_lockres_release_ast(dlm, res);
2773 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2774 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2775 /* if the extra ref we just put was the final one, this
2776 * will pass thru immediately. otherwise, we need to wait
2777 * for the last ast to finish. */
2779 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2780 dlm_migration_can_proceed(dlm, res, target),
2781 msecs_to_jiffies(1000));
2783 mlog(0, "woken again: migrating? %s, dead? %s\n",
2784 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2785 test_bit(target, dlm->domain_map) ? "no":"yes");
2787 mlog(0, "all is well: migrating? %s, dead? %s\n",
2788 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2789 test_bit(target, dlm->domain_map) ? "no":"yes");
2791 if (!dlm_migration_can_proceed(dlm, res, target)) {
2792 mlog(0, "trying again...\n");
2797 /* did the target go down or die? */
2798 spin_lock(&dlm->spinlock);
2799 if (!test_bit(target, dlm->domain_map)) {
2800 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2804 spin_unlock(&dlm->spinlock);
2807 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2808 * another try; otherwise, we are sure the MIGRATING state is there,
2809 * drop the unneded state which blocked threads trying to DIRTY
2811 spin_lock(&res->spinlock);
2812 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2813 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2815 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2816 spin_unlock(&res->spinlock);
2821 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2822 * o there are no pending asts on this lockres
2823 * o all processes trying to reserve an ast on this
2824 * lockres must wait for the MIGRATING flag to clear
2829 /* last step in the migration process.
2830 * original master calls this to free all of the dlm_lock
2831 * structures that used to be for other nodes. */
2832 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2833 struct dlm_lock_resource *res)
2835 struct list_head *queue = &res->granted;
2837 struct dlm_lock *lock, *next;
2839 assert_spin_locked(&res->spinlock);
2841 BUG_ON(res->owner == dlm->node_num);
2843 for (i=0; i<3; i++) {
2844 list_for_each_entry_safe(lock, next, queue, list) {
2845 if (lock->ml.node != dlm->node_num) {
2846 mlog(0, "putting lock for node %u\n",
2848 /* be extra careful */
2849 BUG_ON(!list_empty(&lock->ast_list));
2850 BUG_ON(!list_empty(&lock->bast_list));
2851 BUG_ON(lock->ast_pending);
2852 BUG_ON(lock->bast_pending);
2853 dlm_lockres_clear_refmap_bit(dlm, res,
2855 list_del_init(&lock->list);
2857 /* In a normal unlock, we would have added a
2858 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2866 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2867 if (bit >= O2NM_MAX_NODES)
2869 /* do not clear the local node reference, if there is a
2870 * process holding this, let it drop the ref itself */
2871 if (bit != dlm->node_num) {
2872 mlog(0, "%s:%.*s: node %u had a ref to this "
2873 "migrating lockres, clearing\n", dlm->name,
2874 res->lockname.len, res->lockname.name, bit);
2875 dlm_lockres_clear_refmap_bit(dlm, res, bit);
2882 * Pick a node to migrate the lock resource to. This function selects a
2883 * potential target based first on the locks and then on refmap. It skips
2884 * nodes that are in the process of exiting the domain.
2886 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2887 struct dlm_lock_resource *res)
2889 enum dlm_lockres_list idx;
2890 struct list_head *queue = &res->granted;
2891 struct dlm_lock *lock;
2893 u8 nodenum = O2NM_MAX_NODES;
2895 assert_spin_locked(&dlm->spinlock);
2896 assert_spin_locked(&res->spinlock);
2898 /* Go through all the locks */
2899 for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
2900 queue = dlm_list_idx_to_ptr(res, idx);
2901 list_for_each_entry(lock, queue, list) {
2902 if (lock->ml.node == dlm->node_num)
2904 if (test_bit(lock->ml.node, dlm->exit_domain_map))
2906 nodenum = lock->ml.node;
2911 /* Go thru the refmap */
2914 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
2916 if (noderef >= O2NM_MAX_NODES)
2918 if (noderef == dlm->node_num)
2920 if (test_bit(noderef, dlm->exit_domain_map))
2930 /* this is called by the new master once all lockres
2931 * data has been received */
2932 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2933 struct dlm_lock_resource *res,
2934 u8 master, u8 new_master,
2935 struct dlm_node_iter *iter)
2937 struct dlm_migrate_request migrate;
2938 int ret, skip, status = 0;
2941 memset(&migrate, 0, sizeof(migrate));
2942 migrate.namelen = res->lockname.len;
2943 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2944 migrate.new_master = new_master;
2945 migrate.master = master;
2949 /* send message to all nodes, except the master and myself */
2950 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2951 if (nodenum == master ||
2952 nodenum == new_master)
2955 /* We could race exit domain. If exited, skip. */
2956 spin_lock(&dlm->spinlock);
2957 skip = (!test_bit(nodenum, dlm->domain_map));
2958 spin_unlock(&dlm->spinlock);
2960 clear_bit(nodenum, iter->node_map);
2964 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2965 &migrate, sizeof(migrate), nodenum,
2968 mlog(ML_ERROR, "%s: res %.*s, Error %d send "
2969 "MIGRATE_REQUEST to node %u\n", dlm->name,
2970 migrate.namelen, migrate.name, ret, nodenum);
2971 if (!dlm_is_host_down(ret)) {
2972 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2975 clear_bit(nodenum, iter->node_map);
2977 } else if (status < 0) {
2978 mlog(0, "migrate request (node %u) returned %d!\n",
2981 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2982 /* during the migration request we short-circuited
2983 * the mastery of the lockres. make sure we have
2984 * a mastery ref for nodenum */
2985 mlog(0, "%s:%.*s: need ref for node %u\n",
2986 dlm->name, res->lockname.len, res->lockname.name,
2988 spin_lock(&res->spinlock);
2989 dlm_lockres_set_refmap_bit(dlm, res, nodenum);
2990 spin_unlock(&res->spinlock);
2997 mlog(0, "returning ret=%d\n", ret);
3002 /* if there is an existing mle for this lockres, we now know who the master is.
3003 * (the one who sent us *this* message) we can clear it up right away.
3004 * since the process that put the mle on the list still has a reference to it,
3005 * we can unhash it now, set the master and wake the process. as a result,
3006 * we will have no mle in the list to start with. now we can add an mle for
3007 * the migration and this should be the only one found for those scanning the
3009 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3012 struct dlm_ctxt *dlm = data;
3013 struct dlm_lock_resource *res = NULL;
3014 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3015 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3017 unsigned int namelen, hash;
3023 name = migrate->name;
3024 namelen = migrate->namelen;
3025 hash = dlm_lockid_hash(name, namelen);
3027 /* preallocate.. if this fails, abort */
3028 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3035 /* check for pre-existing lock */
3036 spin_lock(&dlm->spinlock);
3037 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3039 spin_lock(&res->spinlock);
3040 if (res->state & DLM_LOCK_RES_RECOVERING) {
3041 /* if all is working ok, this can only mean that we got
3042 * a migrate request from a node that we now see as
3043 * dead. what can we do here? drop it to the floor? */
3044 spin_unlock(&res->spinlock);
3045 mlog(ML_ERROR, "Got a migrate request, but the "
3046 "lockres is marked as recovering!");
3047 kmem_cache_free(dlm_mle_cache, mle);
3048 ret = -EINVAL; /* need a better solution */
3051 res->state |= DLM_LOCK_RES_MIGRATING;
3052 spin_unlock(&res->spinlock);
3055 spin_lock(&dlm->master_lock);
3056 /* ignore status. only nonzero status would BUG. */
3057 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3059 migrate->new_master,
3062 spin_unlock(&dlm->master_lock);
3064 spin_unlock(&dlm->spinlock);
3067 /* master is known, detach if not already detached */
3068 dlm_mle_detach_hb_events(dlm, oldmle);
3069 dlm_put_mle(oldmle);
3073 dlm_lockres_put(res);
3079 /* must be holding dlm->spinlock and dlm->master_lock
3080 * when adding a migration mle, we can clear any other mles
3081 * in the master list because we know with certainty that
3082 * the master is "master". so we remove any old mle from
3083 * the list after setting it's master field, and then add
3084 * the new migration mle. this way we can hold with the rule
3085 * of having only one mle for a given lock name at all times. */
3086 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3087 struct dlm_lock_resource *res,
3088 struct dlm_master_list_entry *mle,
3089 struct dlm_master_list_entry **oldmle,
3090 const char *name, unsigned int namelen,
3091 u8 new_master, u8 master)
3098 assert_spin_locked(&dlm->spinlock);
3099 assert_spin_locked(&dlm->master_lock);
3101 /* caller is responsible for any ref taken here on oldmle */
3102 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3104 struct dlm_master_list_entry *tmp = *oldmle;
3105 spin_lock(&tmp->spinlock);
3106 if (tmp->type == DLM_MLE_MIGRATION) {
3107 if (master == dlm->node_num) {
3108 /* ah another process raced me to it */
3109 mlog(0, "tried to migrate %.*s, but some "
3110 "process beat me to it\n",
3114 /* bad. 2 NODES are trying to migrate! */
3115 mlog(ML_ERROR, "migration error mle: "
3116 "master=%u new_master=%u // request: "
3117 "master=%u new_master=%u // "
3119 tmp->master, tmp->new_master,
3125 /* this is essentially what assert_master does */
3126 tmp->master = master;
3127 atomic_set(&tmp->woken, 1);
3129 /* remove it so that only one mle will be found */
3130 __dlm_unlink_mle(dlm, tmp);
3131 __dlm_mle_detach_hb_events(dlm, tmp);
3132 if (tmp->type == DLM_MLE_MASTER) {
3133 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3134 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3135 "telling master to get ref "
3136 "for cleared out mle during "
3137 "migration\n", dlm->name,
3138 namelen, name, master,
3142 spin_unlock(&tmp->spinlock);
3145 /* now add a migration mle to the tail of the list */
3146 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3147 mle->new_master = new_master;
3148 /* the new master will be sending an assert master for this.
3149 * at that point we will get the refmap reference */
3150 mle->master = master;
3151 /* do this for consistency with other mle types */
3152 set_bit(new_master, mle->maybe_map);
3153 __dlm_insert_mle(dlm, mle);
3159 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3161 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3162 struct dlm_master_list_entry *mle)
3164 struct dlm_lock_resource *res;
3166 /* Find the lockres associated to the mle and set its owner to UNK */
3167 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3170 spin_unlock(&dlm->master_lock);
3172 /* move lockres onto recovery list */
3173 spin_lock(&res->spinlock);
3174 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3175 dlm_move_lockres_to_recovery_list(dlm, res);
3176 spin_unlock(&res->spinlock);
3177 dlm_lockres_put(res);
3179 /* about to get rid of mle, detach from heartbeat */
3180 __dlm_mle_detach_hb_events(dlm, mle);
3183 spin_lock(&dlm->master_lock);
3185 spin_unlock(&dlm->master_lock);
3191 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3192 struct dlm_master_list_entry *mle)
3194 __dlm_mle_detach_hb_events(dlm, mle);
3196 spin_lock(&mle->spinlock);
3197 __dlm_unlink_mle(dlm, mle);
3198 atomic_set(&mle->woken, 1);
3199 spin_unlock(&mle->spinlock);
3204 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3205 struct dlm_master_list_entry *mle, u8 dead_node)
3209 BUG_ON(mle->type != DLM_MLE_BLOCK);
3211 spin_lock(&mle->spinlock);
3212 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3213 if (bit != dead_node) {
3214 mlog(0, "mle found, but dead node %u would not have been "
3215 "master\n", dead_node);
3216 spin_unlock(&mle->spinlock);
3218 /* Must drop the refcount by one since the assert_master will
3219 * never arrive. This may result in the mle being unlinked and
3220 * freed, but there may still be a process waiting in the
3221 * dlmlock path which is fine. */
3222 mlog(0, "node %u was expected master\n", dead_node);
3223 atomic_set(&mle->woken, 1);
3224 spin_unlock(&mle->spinlock);
3227 /* Do not need events any longer, so detach from heartbeat */
3228 __dlm_mle_detach_hb_events(dlm, mle);
3233 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3235 struct dlm_master_list_entry *mle;
3236 struct dlm_lock_resource *res;
3237 struct hlist_head *bucket;
3238 struct hlist_node *tmp;
3241 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3243 assert_spin_locked(&dlm->spinlock);
3245 /* clean the master list */
3246 spin_lock(&dlm->master_lock);
3247 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3248 bucket = dlm_master_hash(dlm, i);
3249 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3250 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3251 mle->type != DLM_MLE_MASTER &&
3252 mle->type != DLM_MLE_MIGRATION);
3254 /* MASTER mles are initiated locally. The waiting
3255 * process will notice the node map change shortly.
3256 * Let that happen as normal. */
3257 if (mle->type == DLM_MLE_MASTER)
3260 /* BLOCK mles are initiated by other nodes. Need to
3261 * clean up if the dead node would have been the
3263 if (mle->type == DLM_MLE_BLOCK) {
3264 dlm_clean_block_mle(dlm, mle, dead_node);
3268 /* Everything else is a MIGRATION mle */
3270 /* The rule for MIGRATION mles is that the master
3271 * becomes UNKNOWN if *either* the original or the new
3272 * master dies. All UNKNOWN lockres' are sent to
3273 * whichever node becomes the recovery master. The new
3274 * master is responsible for determining if there is
3275 * still a master for this lockres, or if he needs to
3276 * take over mastery. Either way, this node should
3277 * expect another message to resolve this. */
3279 if (mle->master != dead_node &&
3280 mle->new_master != dead_node)
3283 /* If we have reached this point, this mle needs to be
3284 * removed from the list and freed. */
3285 dlm_clean_migration_mle(dlm, mle);
3287 mlog(0, "%s: node %u died during migration from "
3288 "%u to %u!\n", dlm->name, dead_node, mle->master,
3291 /* If we find a lockres associated with the mle, we've
3292 * hit this rare case that messes up our lock ordering.
3293 * If so, we need to drop the master lock so that we can
3294 * take the lockres lock, meaning that we will have to
3295 * restart from the head of list. */
3296 res = dlm_reset_mleres_owner(dlm, mle);
3301 /* This may be the last reference */
3305 spin_unlock(&dlm->master_lock);
3308 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3311 struct dlm_node_iter iter;
3314 spin_lock(&dlm->spinlock);
3315 dlm_node_iter_init(dlm->domain_map, &iter);
3316 clear_bit(old_master, iter.node_map);
3317 clear_bit(dlm->node_num, iter.node_map);
3318 spin_unlock(&dlm->spinlock);
3320 /* ownership of the lockres is changing. account for the
3321 * mastery reference here since old_master will briefly have
3322 * a reference after the migration completes */
3323 spin_lock(&res->spinlock);
3324 dlm_lockres_set_refmap_bit(dlm, res, old_master);
3325 spin_unlock(&res->spinlock);
3327 mlog(0, "now time to do a migrate request to other nodes\n");
3328 ret = dlm_do_migrate_request(dlm, res, old_master,
3329 dlm->node_num, &iter);
3335 mlog(0, "doing assert master of %.*s to all except the original node\n",
3336 res->lockname.len, res->lockname.name);
3337 /* this call now finishes out the nodemap
3338 * even if one or more nodes die */
3339 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3340 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3342 /* no longer need to retry. all living nodes contacted. */
3347 memset(iter.node_map, 0, sizeof(iter.node_map));
3348 set_bit(old_master, iter.node_map);
3349 mlog(0, "doing assert master of %.*s back to %u\n",
3350 res->lockname.len, res->lockname.name, old_master);
3351 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3352 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3354 mlog(0, "assert master to original master failed "
3356 /* the only nonzero status here would be because of
3357 * a dead original node. we're done. */
3361 /* all done, set the owner, clear the flag */
3362 spin_lock(&res->spinlock);
3363 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3364 res->state &= ~DLM_LOCK_RES_MIGRATING;
3365 spin_unlock(&res->spinlock);
3366 /* re-dirty it on the new master */
3367 dlm_kick_thread(dlm, res);
3374 * LOCKRES AST REFCOUNT
3375 * this is integral to migration
3378 /* for future intent to call an ast, reserve one ahead of time.
3379 * this should be called only after waiting on the lockres
3380 * with dlm_wait_on_lockres, and while still holding the
3381 * spinlock after the call. */
3382 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3384 assert_spin_locked(&res->spinlock);
3385 if (res->state & DLM_LOCK_RES_MIGRATING) {
3386 __dlm_print_one_lock_resource(res);
3388 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3390 atomic_inc(&res->asts_reserved);
3394 * used to drop the reserved ast, either because it went unused,
3395 * or because the ast/bast was actually called.
3397 * also, if there is a pending migration on this lockres,
3398 * and this was the last pending ast on the lockres,
3399 * atomically set the MIGRATING flag before we drop the lock.
3400 * this is how we ensure that migration can proceed with no
3401 * asts in progress. note that it is ok if the state of the
3402 * queues is such that a lock should be granted in the future
3403 * or that a bast should be fired, because the new master will
3404 * shuffle the lists on this lockres as soon as it is migrated.
3406 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3407 struct dlm_lock_resource *res)
3409 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3412 if (!res->migration_pending) {
3413 spin_unlock(&res->spinlock);
3417 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3418 res->migration_pending = 0;
3419 res->state |= DLM_LOCK_RES_MIGRATING;
3420 spin_unlock(&res->spinlock);
3422 wake_up(&dlm->migration_wq);
3425 void dlm_force_free_mles(struct dlm_ctxt *dlm)
3428 struct hlist_head *bucket;
3429 struct dlm_master_list_entry *mle;
3430 struct hlist_node *tmp;
3433 * We notified all other nodes that we are exiting the domain and
3434 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3435 * around we force free them and wake any processes that are waiting
3438 spin_lock(&dlm->spinlock);
3439 spin_lock(&dlm->master_lock);
3441 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3442 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3444 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3445 bucket = dlm_master_hash(dlm, i);
3446 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3447 if (mle->type != DLM_MLE_BLOCK) {
3448 mlog(ML_ERROR, "bad mle: %p\n", mle);
3449 dlm_print_one_mle(mle);
3451 atomic_set(&mle->woken, 1);
3454 __dlm_unlink_mle(dlm, mle);
3455 __dlm_mle_detach_hb_events(dlm, mle);
3459 spin_unlock(&dlm->master_lock);
3460 spin_unlock(&dlm->spinlock);