2 * Copyright (c) 2009, 2010 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
41 #include <sys/diskslice.h>
43 #include <sys/malloc.h>
44 #include <machine/md_var.h>
45 #include <sys/ctype.h>
46 #include <sys/syslog.h>
47 #include <sys/device.h>
48 #include <sys/msgport.h>
49 #include <sys/msgport2.h>
51 #include <sys/dsched.h>
52 #include <sys/fcntl.h>
53 #include <machine/varargs.h>
55 TAILQ_HEAD(tdio_list_head, dsched_thread_io);
57 MALLOC_DEFINE(M_DSCHED, "dsched", "dsched allocs");
59 static dsched_prepare_t noop_prepare;
60 static dsched_teardown_t noop_teardown;
61 static dsched_cancel_t noop_cancel;
62 static dsched_queue_t noop_queue;
64 static void dsched_sysctl_add_disk(struct dsched_disk_ctx *diskctx, char *name);
65 static void dsched_disk_ctx_destroy(struct dsched_disk_ctx *diskctx);
66 static void dsched_thread_io_destroy(struct dsched_thread_io *tdio);
67 static void dsched_thread_ctx_destroy(struct dsched_thread_ctx *tdctx);
69 static int dsched_inited = 0;
70 static int default_set = 0;
72 struct lock dsched_lock;
73 static int dsched_debug_enable = 0;
75 struct dsched_stats dsched_stats;
77 struct objcache_malloc_args dsched_disk_ctx_malloc_args = {
78 DSCHED_DISK_CTX_MAX_SZ, M_DSCHED };
79 struct objcache_malloc_args dsched_thread_io_malloc_args = {
80 DSCHED_THREAD_IO_MAX_SZ, M_DSCHED };
81 struct objcache_malloc_args dsched_thread_ctx_malloc_args = {
82 DSCHED_THREAD_CTX_MAX_SZ, M_DSCHED };
84 static struct objcache *dsched_diskctx_cache;
85 static struct objcache *dsched_tdctx_cache;
86 static struct objcache *dsched_tdio_cache;
88 TAILQ_HEAD(, dsched_thread_ctx) dsched_tdctx_list =
89 TAILQ_HEAD_INITIALIZER(dsched_tdctx_list);
91 struct lock dsched_tdctx_lock;
93 static struct dsched_policy_head dsched_policy_list =
94 TAILQ_HEAD_INITIALIZER(dsched_policy_list);
96 static struct dsched_policy dsched_noop_policy = {
99 .prepare = noop_prepare,
100 .teardown = noop_teardown,
101 .cancel_all = noop_cancel,
102 .bio_queue = noop_queue
105 static struct dsched_policy *default_policy = &dsched_noop_policy;
108 * dsched_debug() is a SYSCTL and TUNABLE controlled debug output function
112 dsched_debug(int level, char *fmt, ...)
117 if (level <= dsched_debug_enable)
125 * Called on disk_create()
126 * tries to read which policy to use from loader.conf, if there's
127 * none specified, the default policy is used.
130 dsched_disk_create_callback(struct disk *dp, const char *head_name, int unit)
132 char tunable_key[SPECNAMELEN + 48];
133 char sched_policy[DSCHED_POLICY_NAME_LENGTH];
135 struct dsched_policy *policy = NULL;
137 /* Also look for serno stuff? */
138 /* kprintf("dsched_disk_create_callback() for disk %s%d\n", head_name, unit); */
139 lockmgr(&dsched_lock, LK_EXCLUSIVE);
141 ksnprintf(tunable_key, sizeof(tunable_key), "dsched.policy.%s%d",
143 if (TUNABLE_STR_FETCH(tunable_key, sched_policy,
144 sizeof(sched_policy)) != 0) {
145 policy = dsched_find_policy(sched_policy);
148 ksnprintf(tunable_key, sizeof(tunable_key), "dsched.policy.%s",
150 for (ptr = tunable_key; *ptr; ptr++) {
154 if (!policy && (TUNABLE_STR_FETCH(tunable_key, sched_policy,
155 sizeof(sched_policy)) != 0)) {
156 policy = dsched_find_policy(sched_policy);
159 ksnprintf(tunable_key, sizeof(tunable_key), "dsched.policy.default");
160 if (!policy && !default_set && (TUNABLE_STR_FETCH(tunable_key, sched_policy,
161 sizeof(sched_policy)) != 0)) {
162 policy = dsched_find_policy(sched_policy);
166 if (!default_set && bootverbose) {
168 "No policy for %s%d specified, "
169 "or policy not found\n",
172 dsched_set_policy(dp, default_policy);
174 dsched_set_policy(dp, policy);
177 if (strncmp(head_name, "mapper/", strlen("mapper/")) == 0)
178 ksnprintf(tunable_key, sizeof(tunable_key), "%s", head_name);
180 ksnprintf(tunable_key, sizeof(tunable_key), "%s%d", head_name, unit);
181 for (ptr = tunable_key; *ptr; ptr++) {
185 dsched_sysctl_add_disk(
186 (struct dsched_disk_ctx *)dsched_get_disk_priv(dp),
189 lockmgr(&dsched_lock, LK_RELEASE);
193 * Called from disk_setdiskinfo (or rather _setdiskinfo). This will check if
194 * there's any policy associated with the serial number of the device.
197 dsched_disk_update_callback(struct disk *dp, struct disk_info *info)
199 char tunable_key[SPECNAMELEN + 48];
200 char sched_policy[DSCHED_POLICY_NAME_LENGTH];
201 struct dsched_policy *policy = NULL;
203 if (info->d_serialno == NULL)
206 lockmgr(&dsched_lock, LK_EXCLUSIVE);
208 ksnprintf(tunable_key, sizeof(tunable_key), "dsched.policy.%s",
211 if((TUNABLE_STR_FETCH(tunable_key, sched_policy,
212 sizeof(sched_policy)) != 0)) {
213 policy = dsched_find_policy(sched_policy);
217 dsched_switch(dp, policy);
220 dsched_sysctl_add_disk(
221 (struct dsched_disk_ctx *)dsched_get_disk_priv(dp),
224 lockmgr(&dsched_lock, LK_RELEASE);
228 * Called on disk_destroy()
229 * shuts down the scheduler core and cancels all remaining bios
232 dsched_disk_destroy_callback(struct disk *dp)
234 struct dsched_policy *old_policy;
235 struct dsched_disk_ctx *diskctx;
237 lockmgr(&dsched_lock, LK_EXCLUSIVE);
239 diskctx = dsched_get_disk_priv(dp);
241 old_policy = dp->d_sched_policy;
242 dp->d_sched_policy = &dsched_noop_policy;
243 old_policy->cancel_all(dsched_get_disk_priv(dp));
244 old_policy->teardown(dsched_get_disk_priv(dp));
246 if (diskctx->flags & DSCHED_SYSCTL_CTX_INITED)
247 sysctl_ctx_free(&diskctx->sysctl_ctx);
250 atomic_subtract_int(&old_policy->ref_count, 1);
251 KKASSERT(old_policy->ref_count >= 0);
253 lockmgr(&dsched_lock, LK_RELEASE);
258 dsched_queue(struct disk *dp, struct bio *bio)
260 struct dsched_thread_ctx *tdctx;
261 struct dsched_thread_io *tdio;
262 struct dsched_disk_ctx *diskctx;
264 int found = 0, error = 0;
266 tdctx = dsched_get_buf_priv(bio->bio_buf);
268 /* We don't handle this case, let dsched dispatch */
269 atomic_add_int(&dsched_stats.no_tdctx, 1);
270 dsched_strategy_raw(dp, bio);
274 DSCHED_THREAD_CTX_LOCK(tdctx);
276 KKASSERT(!TAILQ_EMPTY(&tdctx->tdio_list));
279 * iterate in reverse to make sure we find the most up-to-date
280 * tdio for a given disk. After a switch it may take some time
281 * for everything to clean up.
283 TAILQ_FOREACH_REVERSE(tdio, &tdctx->tdio_list, tdio_list_head, link) {
284 if (tdio->dp == dp) {
285 dsched_thread_io_ref(tdio);
291 DSCHED_THREAD_CTX_UNLOCK(tdctx);
292 dsched_clr_buf_priv(bio->bio_buf);
293 dsched_thread_ctx_unref(tdctx); /* acquired on new_buf */
295 KKASSERT(found == 1);
296 diskctx = dsched_get_disk_priv(dp);
297 dsched_disk_ctx_ref(diskctx);
299 if (dp->d_sched_policy != &dsched_noop_policy)
300 KKASSERT(tdio->debug_policy == dp->d_sched_policy);
302 KKASSERT(tdio->debug_inited == 0xF00F1234);
304 error = dp->d_sched_policy->bio_queue(diskctx, tdio, bio);
307 dsched_strategy_raw(dp, bio);
309 dsched_disk_ctx_unref(diskctx);
310 dsched_thread_io_unref(tdio);
315 * Called from each module_init or module_attach of each policy
316 * registers the policy in the local policy list.
319 dsched_register(struct dsched_policy *d_policy)
321 struct dsched_policy *policy;
324 lockmgr(&dsched_lock, LK_EXCLUSIVE);
326 policy = dsched_find_policy(d_policy->name);
329 TAILQ_INSERT_TAIL(&dsched_policy_list, d_policy, link);
330 atomic_add_int(&d_policy->ref_count, 1);
332 dsched_debug(LOG_ERR, "Policy with name %s already registered!\n",
337 lockmgr(&dsched_lock, LK_RELEASE);
342 * Called from each module_detach of each policy
343 * unregisters the policy
346 dsched_unregister(struct dsched_policy *d_policy)
348 struct dsched_policy *policy;
350 lockmgr(&dsched_lock, LK_EXCLUSIVE);
351 policy = dsched_find_policy(d_policy->name);
354 if (policy->ref_count > 1) {
355 lockmgr(&dsched_lock, LK_RELEASE);
358 TAILQ_REMOVE(&dsched_policy_list, policy, link);
359 atomic_subtract_int(&policy->ref_count, 1);
360 KKASSERT(policy->ref_count == 0);
362 lockmgr(&dsched_lock, LK_RELEASE);
369 * switches the policy by first removing the old one and then
370 * enabling the new one.
373 dsched_switch(struct disk *dp, struct dsched_policy *new_policy)
375 struct dsched_policy *old_policy;
377 /* If we are asked to set the same policy, do nothing */
378 if (dp->d_sched_policy == new_policy)
381 /* lock everything down, diskwise */
382 lockmgr(&dsched_lock, LK_EXCLUSIVE);
383 old_policy = dp->d_sched_policy;
385 atomic_subtract_int(&old_policy->ref_count, 1);
386 KKASSERT(old_policy->ref_count >= 0);
388 dp->d_sched_policy = &dsched_noop_policy;
389 old_policy->teardown(dsched_get_disk_priv(dp));
392 /* Bring everything back to life */
393 dsched_set_policy(dp, new_policy);
394 lockmgr(&dsched_lock, LK_RELEASE);
401 * Loads a given policy and attaches it to the specified disk.
402 * Also initializes the core for the policy
405 dsched_set_policy(struct disk *dp, struct dsched_policy *new_policy)
409 /* Check if it is locked already. if not, we acquire the devfs lock */
410 if (!(lockstatus(&dsched_lock, curthread)) == LK_EXCLUSIVE) {
411 lockmgr(&dsched_lock, LK_EXCLUSIVE);
415 DSCHED_GLOBAL_THREAD_CTX_LOCK();
417 policy_new(dp, new_policy);
418 new_policy->prepare(dsched_get_disk_priv(dp));
419 dp->d_sched_policy = new_policy;
421 DSCHED_GLOBAL_THREAD_CTX_UNLOCK();
423 atomic_add_int(&new_policy->ref_count, 1);
424 kprintf("disk scheduler: set policy of %s to %s\n", dp->d_cdev->si_name,
427 /* If we acquired the lock, we also get rid of it */
429 lockmgr(&dsched_lock, LK_RELEASE);
432 struct dsched_policy*
433 dsched_find_policy(char *search)
435 struct dsched_policy *policy;
436 struct dsched_policy *policy_found = NULL;
439 /* Check if it is locked already. if not, we acquire the devfs lock */
440 if (!(lockstatus(&dsched_lock, curthread)) == LK_EXCLUSIVE) {
441 lockmgr(&dsched_lock, LK_EXCLUSIVE);
445 TAILQ_FOREACH(policy, &dsched_policy_list, link) {
446 if (!strcmp(policy->name, search)) {
447 policy_found = policy;
452 /* If we acquired the lock, we also get rid of it */
454 lockmgr(&dsched_lock, LK_RELEASE);
460 dsched_find_disk(char *search)
462 struct disk *dp_found = NULL;
463 struct disk *dp = NULL;
465 while((dp = disk_enumerate(dp))) {
466 if (!strcmp(dp->d_cdev->si_name, search)) {
476 dsched_disk_enumerate(struct disk *dp, struct dsched_policy *policy)
478 while ((dp = disk_enumerate(dp))) {
479 if (dp->d_sched_policy == policy)
486 struct dsched_policy *
487 dsched_policy_enumerate(struct dsched_policy *pol)
490 return (TAILQ_FIRST(&dsched_policy_list));
492 return (TAILQ_NEXT(pol, link));
496 dsched_cancel_bio(struct bio *bp)
498 bp->bio_buf->b_error = ENXIO;
499 bp->bio_buf->b_flags |= B_ERROR;
500 bp->bio_buf->b_resid = bp->bio_buf->b_bcount;
506 dsched_strategy_raw(struct disk *dp, struct bio *bp)
509 * Ideally, this stuff shouldn't be needed... but just in case, we leave it in
512 KASSERT(dp->d_rawdev != NULL, ("dsched_strategy_raw sees NULL d_rawdev!!"));
513 if(bp->bio_track != NULL) {
514 dsched_debug(LOG_INFO,
515 "dsched_strategy_raw sees non-NULL bio_track!! "
517 bp->bio_track = NULL;
519 dev_dstrategy(dp->d_rawdev, bp);
523 dsched_strategy_sync(struct disk *dp, struct bio *bio)
525 struct buf *bp, *nbp;
533 nbp->b_cmd = bp->b_cmd;
534 nbp->b_bufsize = bp->b_bufsize;
535 nbp->b_runningbufspace = bp->b_runningbufspace;
536 nbp->b_bcount = bp->b_bcount;
537 nbp->b_resid = bp->b_resid;
538 nbp->b_data = bp->b_data;
541 * Buffers undergoing device I/O do not need a kvabase/size.
543 nbp->b_kvabase = bp->b_kvabase;
544 nbp->b_kvasize = bp->b_kvasize;
546 nbp->b_dirtyend = bp->b_dirtyend;
548 nbio->bio_done = biodone_sync;
549 nbio->bio_flags |= BIO_SYNC;
550 nbio->bio_track = NULL;
552 nbio->bio_caller_info1.ptr = dp;
553 nbio->bio_offset = bio->bio_offset;
555 dev_dstrategy(dp->d_rawdev, nbio);
556 biowait(nbio, "dschedsync");
557 bp->b_resid = nbp->b_resid;
558 bp->b_error = nbp->b_error;
561 nbp->b_kvabase = NULL;
568 dsched_strategy_async(struct disk *dp, struct bio *bio, biodone_t *done, void *priv)
572 nbio = push_bio(bio);
573 nbio->bio_done = done;
574 nbio->bio_offset = bio->bio_offset;
576 dsched_set_bio_dp(nbio, dp);
577 dsched_set_bio_priv(nbio, priv);
579 getmicrotime(&nbio->bio_caller_info3.tv);
580 dev_dstrategy(dp->d_rawdev, nbio);
584 * Ref and deref various structures. The 1->0 transition of the reference
585 * count actually transitions 1->0x80000000 and causes the object to be
586 * destroyed. It is possible for transitory references to occur on the
587 * object while it is being destroyed. We use bit 31 to indicate that
588 * destruction is in progress and to prevent nested destructions.
591 dsched_disk_ctx_ref(struct dsched_disk_ctx *diskctx)
595 refcount = atomic_fetchadd_int(&diskctx->refcount, 1);
599 dsched_thread_io_ref(struct dsched_thread_io *tdio)
603 refcount = atomic_fetchadd_int(&tdio->refcount, 1);
607 dsched_thread_ctx_ref(struct dsched_thread_ctx *tdctx)
611 refcount = atomic_fetchadd_int(&tdctx->refcount, 1);
615 dsched_disk_ctx_unref(struct dsched_disk_ctx *diskctx)
621 * Handle 1->0 transitions for diskctx and nested destruction
622 * recursions. If the refs are already in destruction mode (bit 31
623 * set) on the 1->0 transition we don't try to destruct it again.
625 * 0x80000001->0x80000000 transitions are handled normally and
626 * thus avoid nested dstruction.
629 refs = diskctx->refcount;
633 KKASSERT(((refs ^ nrefs) & 0x80000000) == 0);
635 if (atomic_cmpset_int(&diskctx->refcount, refs, nrefs))
640 if (atomic_cmpset_int(&diskctx->refcount, refs, nrefs)) {
641 dsched_disk_ctx_destroy(diskctx);
649 dsched_disk_ctx_destroy(struct dsched_disk_ctx *diskctx)
651 struct dsched_thread_io *tdio;
654 kprintf("diskctx (%p) destruction started, trace:\n", diskctx);
657 lockmgr(&diskctx->lock, LK_EXCLUSIVE);
658 while ((tdio = TAILQ_FIRST(&diskctx->tdio_list)) != NULL) {
659 KKASSERT(tdio->flags & DSCHED_LINKED_DISK_CTX);
660 TAILQ_REMOVE(&diskctx->tdio_list, tdio, dlink);
661 atomic_clear_int(&tdio->flags, DSCHED_LINKED_DISK_CTX);
662 tdio->diskctx = NULL;
663 /* XXX tdio->diskctx->dp->d_sched_policy->destroy_tdio(tdio);*/
664 dsched_thread_io_unref(tdio);
666 lockmgr(&diskctx->lock, LK_RELEASE);
667 if (diskctx->dp->d_sched_policy->destroy_diskctx)
668 diskctx->dp->d_sched_policy->destroy_diskctx(diskctx);
669 KKASSERT(diskctx->refcount == 0x80000000);
670 objcache_put(dsched_diskctx_cache, diskctx);
671 atomic_subtract_int(&dsched_stats.diskctx_allocations, 1);
675 dsched_thread_io_unref(struct dsched_thread_io *tdio)
681 * Handle 1->0 transitions for tdio and nested destruction
682 * recursions. If the refs are already in destruction mode (bit 31
683 * set) on the 1->0 transition we don't try to destruct it again.
685 * 0x80000001->0x80000000 transitions are handled normally and
686 * thus avoid nested dstruction.
689 refs = tdio->refcount;
693 KKASSERT(((refs ^ nrefs) & 0x80000000) == 0);
695 if (atomic_cmpset_int(&tdio->refcount, refs, nrefs))
700 if (atomic_cmpset_int(&tdio->refcount, refs, nrefs)) {
701 dsched_thread_io_destroy(tdio);
708 dsched_thread_io_destroy(struct dsched_thread_io *tdio)
710 struct dsched_thread_ctx *tdctx;
711 struct dsched_disk_ctx *diskctx;
714 kprintf("tdio (%p) destruction started, trace:\n", tdio);
717 KKASSERT(tdio->qlength == 0);
719 while ((diskctx = tdio->diskctx) != NULL) {
720 dsched_disk_ctx_ref(diskctx);
721 lockmgr(&diskctx->lock, LK_EXCLUSIVE);
722 if (diskctx != tdio->diskctx) {
723 lockmgr(&diskctx->lock, LK_RELEASE);
724 dsched_disk_ctx_unref(diskctx);
727 KKASSERT(tdio->flags & DSCHED_LINKED_DISK_CTX);
728 if (diskctx->dp->d_sched_policy->destroy_tdio)
729 diskctx->dp->d_sched_policy->destroy_tdio(tdio);
730 TAILQ_REMOVE(&diskctx->tdio_list, tdio, dlink);
731 atomic_clear_int(&tdio->flags, DSCHED_LINKED_DISK_CTX);
732 tdio->diskctx = NULL;
733 lockmgr(&diskctx->lock, LK_RELEASE);
734 dsched_disk_ctx_unref(diskctx);
736 while ((tdctx = tdio->tdctx) != NULL) {
737 dsched_thread_ctx_ref(tdctx);
738 lockmgr(&tdctx->lock, LK_EXCLUSIVE);
739 if (tdctx != tdio->tdctx) {
740 lockmgr(&tdctx->lock, LK_RELEASE);
741 dsched_thread_ctx_unref(tdctx);
744 KKASSERT(tdio->flags & DSCHED_LINKED_THREAD_CTX);
745 TAILQ_REMOVE(&tdctx->tdio_list, tdio, link);
746 atomic_clear_int(&tdio->flags, DSCHED_LINKED_THREAD_CTX);
748 lockmgr(&tdctx->lock, LK_RELEASE);
749 dsched_thread_ctx_unref(tdctx);
751 KKASSERT(tdio->refcount == 0x80000000);
752 objcache_put(dsched_tdio_cache, tdio);
753 atomic_subtract_int(&dsched_stats.tdio_allocations, 1);
755 dsched_disk_ctx_unref(diskctx);
760 dsched_thread_ctx_unref(struct dsched_thread_ctx *tdctx)
766 * Handle 1->0 transitions for tdctx and nested destruction
767 * recursions. If the refs are already in destruction mode (bit 31
768 * set) on the 1->0 transition we don't try to destruct it again.
770 * 0x80000001->0x80000000 transitions are handled normally and
771 * thus avoid nested dstruction.
774 refs = tdctx->refcount;
778 KKASSERT(((refs ^ nrefs) & 0x80000000) == 0);
780 if (atomic_cmpset_int(&tdctx->refcount, refs, nrefs))
785 if (atomic_cmpset_int(&tdctx->refcount, refs, nrefs)) {
786 dsched_thread_ctx_destroy(tdctx);
793 dsched_thread_ctx_destroy(struct dsched_thread_ctx *tdctx)
795 struct dsched_thread_io *tdio;
798 kprintf("tdctx (%p) destruction started, trace:\n", tdctx);
801 DSCHED_GLOBAL_THREAD_CTX_LOCK();
803 lockmgr(&tdctx->lock, LK_EXCLUSIVE);
805 while ((tdio = TAILQ_FIRST(&tdctx->tdio_list)) != NULL) {
806 KKASSERT(tdio->flags & DSCHED_LINKED_THREAD_CTX);
807 TAILQ_REMOVE(&tdctx->tdio_list, tdio, link);
808 atomic_clear_int(&tdio->flags, DSCHED_LINKED_THREAD_CTX);
810 dsched_thread_io_unref(tdio);
812 KKASSERT(tdctx->refcount == 0x80000000);
813 TAILQ_REMOVE(&dsched_tdctx_list, tdctx, link);
815 lockmgr(&tdctx->lock, LK_RELEASE);
817 DSCHED_GLOBAL_THREAD_CTX_UNLOCK();
819 objcache_put(dsched_tdctx_cache, tdctx);
820 atomic_subtract_int(&dsched_stats.tdctx_allocations, 1);
823 struct dsched_thread_io *
824 dsched_thread_io_alloc(struct disk *dp, struct dsched_thread_ctx *tdctx,
825 struct dsched_policy *pol)
827 struct dsched_thread_io *tdio;
829 dsched_disk_ctx_ref(dsched_get_disk_priv(dp));
831 tdio = objcache_get(dsched_tdio_cache, M_WAITOK);
832 bzero(tdio, DSCHED_THREAD_IO_MAX_SZ);
834 /* XXX: maybe we do need another ref for the disk list for tdio */
835 dsched_thread_io_ref(tdio);
837 DSCHED_THREAD_IO_LOCKINIT(tdio);
840 tdio->diskctx = dsched_get_disk_priv(dp);
841 TAILQ_INIT(&tdio->queue);
846 lockmgr(&tdio->diskctx->lock, LK_EXCLUSIVE);
847 TAILQ_INSERT_TAIL(&tdio->diskctx->tdio_list, tdio, dlink);
848 atomic_set_int(&tdio->flags, DSCHED_LINKED_DISK_CTX);
849 lockmgr(&tdio->diskctx->lock, LK_RELEASE);
855 /* Put the tdio in the tdctx list */
856 DSCHED_THREAD_CTX_LOCK(tdctx);
857 TAILQ_INSERT_TAIL(&tdctx->tdio_list, tdio, link);
858 DSCHED_THREAD_CTX_UNLOCK(tdctx);
859 atomic_set_int(&tdio->flags, DSCHED_LINKED_THREAD_CTX);
862 tdio->debug_policy = pol;
863 tdio->debug_inited = 0xF00F1234;
865 atomic_add_int(&dsched_stats.tdio_allocations, 1);
870 struct dsched_disk_ctx *
871 dsched_disk_ctx_alloc(struct disk *dp, struct dsched_policy *pol)
873 struct dsched_disk_ctx *diskctx;
875 diskctx = objcache_get(dsched_diskctx_cache, M_WAITOK);
876 bzero(diskctx, DSCHED_DISK_CTX_MAX_SZ);
877 dsched_disk_ctx_ref(diskctx);
879 DSCHED_DISK_CTX_LOCKINIT(diskctx);
880 TAILQ_INIT(&diskctx->tdio_list);
882 atomic_add_int(&dsched_stats.diskctx_allocations, 1);
883 if (pol->new_diskctx)
884 pol->new_diskctx(diskctx);
889 struct dsched_thread_ctx *
890 dsched_thread_ctx_alloc(struct proc *p)
892 struct dsched_thread_ctx *tdctx;
893 struct dsched_thread_io *tdio;
894 struct disk *dp = NULL;
896 tdctx = objcache_get(dsched_tdctx_cache, M_WAITOK);
897 bzero(tdctx, DSCHED_THREAD_CTX_MAX_SZ);
898 dsched_thread_ctx_ref(tdctx);
900 kprintf("dsched_thread_ctx_alloc, new tdctx = %p\n", tdctx);
902 DSCHED_THREAD_CTX_LOCKINIT(tdctx);
903 TAILQ_INIT(&tdctx->tdio_list);
906 DSCHED_GLOBAL_THREAD_CTX_LOCK();
907 while ((dp = disk_enumerate(dp))) {
908 tdio = dsched_thread_io_alloc(dp, tdctx, dp->d_sched_policy);
911 TAILQ_INSERT_TAIL(&dsched_tdctx_list, tdctx, link);
912 DSCHED_GLOBAL_THREAD_CTX_UNLOCK();
914 atomic_add_int(&dsched_stats.tdctx_allocations, 1);
915 /* XXX: no callback here */
920 policy_new(struct disk *dp, struct dsched_policy *pol) {
921 struct dsched_thread_ctx *tdctx;
922 struct dsched_disk_ctx *diskctx;
923 struct dsched_thread_io *tdio;
925 diskctx = dsched_disk_ctx_alloc(dp, pol);
926 dsched_disk_ctx_ref(diskctx);
927 dsched_set_disk_priv(dp, diskctx);
929 TAILQ_FOREACH(tdctx, &dsched_tdctx_list, link) {
930 tdio = dsched_thread_io_alloc(dp, tdctx, pol);
935 policy_destroy(struct disk *dp) {
936 struct dsched_disk_ctx *diskctx;
938 diskctx = dsched_get_disk_priv(dp);
939 KKASSERT(diskctx != NULL);
941 dsched_disk_ctx_unref(diskctx); /* from prepare */
942 dsched_disk_ctx_unref(diskctx); /* from alloc */
944 dsched_set_disk_priv(dp, NULL);
948 dsched_new_buf(struct buf *bp)
950 struct dsched_thread_ctx *tdctx = NULL;
952 if (dsched_inited == 0)
955 if (curproc != NULL) {
956 tdctx = dsched_get_proc_priv(curproc);
958 /* This is a kernel thread, so no proc info is available */
959 tdctx = dsched_get_thread_priv(curthread);
964 * XXX: hack. we don't want this assert because we aren't catching all
965 * threads. mi_startup() is still getting away without an tdctx.
968 /* by now we should have an tdctx. if not, something bad is going on */
969 KKASSERT(tdctx != NULL);
973 dsched_thread_ctx_ref(tdctx);
975 dsched_set_buf_priv(bp, tdctx);
979 dsched_exit_buf(struct buf *bp)
981 struct dsched_thread_ctx *tdctx;
983 tdctx = dsched_get_buf_priv(bp);
985 dsched_clr_buf_priv(bp);
986 dsched_thread_ctx_unref(tdctx);
991 dsched_new_proc(struct proc *p)
993 struct dsched_thread_ctx *tdctx;
995 if (dsched_inited == 0)
1000 tdctx = dsched_thread_ctx_alloc(p);
1002 dsched_thread_ctx_ref(tdctx);
1004 dsched_set_proc_priv(p, tdctx);
1005 atomic_add_int(&dsched_stats.nprocs, 1);
1010 dsched_new_thread(struct thread *td)
1012 struct dsched_thread_ctx *tdctx;
1014 if (dsched_inited == 0)
1017 KKASSERT(td != NULL);
1019 tdctx = dsched_thread_ctx_alloc(NULL);
1021 dsched_thread_ctx_ref(tdctx);
1023 dsched_set_thread_priv(td, tdctx);
1024 atomic_add_int(&dsched_stats.nthreads, 1);
1028 dsched_exit_proc(struct proc *p)
1030 struct dsched_thread_ctx *tdctx;
1032 if (dsched_inited == 0)
1035 KKASSERT(p != NULL);
1037 tdctx = dsched_get_proc_priv(p);
1038 KKASSERT(tdctx != NULL);
1040 tdctx->dead = 0xDEAD;
1041 dsched_set_proc_priv(p, NULL);
1043 dsched_thread_ctx_unref(tdctx); /* one for alloc, */
1044 dsched_thread_ctx_unref(tdctx); /* one for ref */
1045 atomic_subtract_int(&dsched_stats.nprocs, 1);
1050 dsched_exit_thread(struct thread *td)
1052 struct dsched_thread_ctx *tdctx;
1054 if (dsched_inited == 0)
1057 KKASSERT(td != NULL);
1059 tdctx = dsched_get_thread_priv(td);
1060 KKASSERT(tdctx != NULL);
1062 tdctx->dead = 0xDEAD;
1063 dsched_set_thread_priv(td, 0);
1065 dsched_thread_ctx_unref(tdctx); /* one for alloc, */
1066 dsched_thread_ctx_unref(tdctx); /* one for ref */
1067 atomic_subtract_int(&dsched_stats.nthreads, 1);
1070 struct dsched_thread_io *
1071 dsched_new_policy_thread_tdio(struct dsched_disk_ctx *diskctx,
1072 struct dsched_policy *pol) {
1073 struct dsched_thread_ctx *tdctx;
1074 struct dsched_thread_io *tdio;
1076 DSCHED_GLOBAL_THREAD_CTX_LOCK();
1078 tdctx = dsched_get_thread_priv(curthread);
1079 KKASSERT(tdctx != NULL);
1080 tdio = dsched_thread_io_alloc(diskctx->dp, tdctx, pol);
1082 DSCHED_GLOBAL_THREAD_CTX_UNLOCK();
1087 /* DEFAULT NOOP POLICY */
1090 noop_prepare(struct dsched_disk_ctx *diskctx)
1096 noop_teardown(struct dsched_disk_ctx *diskctx)
1102 noop_cancel(struct dsched_disk_ctx *diskctx)
1108 noop_queue(struct dsched_disk_ctx *diskctx, struct dsched_thread_io *tdio,
1111 dsched_strategy_raw(diskctx->dp, bio);
1113 dsched_strategy_async(diskctx->dp, bio, noop_completed, NULL);
1124 dsched_tdio_cache = objcache_create("dsched-tdio-cache", 0, 0,
1126 objcache_malloc_alloc,
1127 objcache_malloc_free,
1128 &dsched_thread_io_malloc_args );
1130 dsched_tdctx_cache = objcache_create("dsched-tdctx-cache", 0, 0,
1132 objcache_malloc_alloc,
1133 objcache_malloc_free,
1134 &dsched_thread_ctx_malloc_args );
1136 dsched_diskctx_cache = objcache_create("dsched-diskctx-cache", 0, 0,
1138 objcache_malloc_alloc,
1139 objcache_malloc_free,
1140 &dsched_disk_ctx_malloc_args );
1142 bzero(&dsched_stats, sizeof(struct dsched_stats));
1144 lockinit(&dsched_lock, "dsched lock", 0, LK_CANRECURSE);
1145 DSCHED_GLOBAL_THREAD_CTX_LOCKINIT();
1147 dsched_register(&dsched_noop_policy);
1157 SYSINIT(subr_dsched_register, SI_SUB_CREATE_INIT-1, SI_ORDER_FIRST, dsched_init, NULL);
1158 SYSUNINIT(subr_dsched_register, SI_SUB_CREATE_INIT-1, SI_ORDER_ANY, dsched_uninit, NULL);
1164 sysctl_dsched_stats(SYSCTL_HANDLER_ARGS)
1166 return (sysctl_handle_opaque(oidp, &dsched_stats, sizeof(struct dsched_stats), req));
1170 sysctl_dsched_list_policies(SYSCTL_HANDLER_ARGS)
1172 struct dsched_policy *pol = NULL;
1173 int error, first = 1;
1175 lockmgr(&dsched_lock, LK_EXCLUSIVE);
1177 while ((pol = dsched_policy_enumerate(pol))) {
1179 error = SYSCTL_OUT(req, " ", 1);
1185 error = SYSCTL_OUT(req, pol->name, strlen(pol->name));
1191 lockmgr(&dsched_lock, LK_RELEASE);
1193 error = SYSCTL_OUT(req, "", 1);
1199 sysctl_dsched_policy(SYSCTL_HANDLER_ARGS)
1201 char buf[DSCHED_POLICY_NAME_LENGTH];
1202 struct dsched_disk_ctx *diskctx = arg1;
1203 struct dsched_policy *pol = NULL;
1206 if (diskctx == NULL) {
1210 lockmgr(&dsched_lock, LK_EXCLUSIVE);
1212 pol = diskctx->dp->d_sched_policy;
1213 memcpy(buf, pol->name, DSCHED_POLICY_NAME_LENGTH);
1215 error = sysctl_handle_string(oidp, buf, DSCHED_POLICY_NAME_LENGTH, req);
1216 if (error || req->newptr == NULL) {
1217 lockmgr(&dsched_lock, LK_RELEASE);
1221 pol = dsched_find_policy(buf);
1223 lockmgr(&dsched_lock, LK_RELEASE);
1227 dsched_switch(diskctx->dp, pol);
1229 lockmgr(&dsched_lock, LK_RELEASE);
1235 sysctl_dsched_default_policy(SYSCTL_HANDLER_ARGS)
1237 char buf[DSCHED_POLICY_NAME_LENGTH];
1238 struct dsched_policy *pol = NULL;
1241 lockmgr(&dsched_lock, LK_EXCLUSIVE);
1243 pol = default_policy;
1244 memcpy(buf, pol->name, DSCHED_POLICY_NAME_LENGTH);
1246 error = sysctl_handle_string(oidp, buf, DSCHED_POLICY_NAME_LENGTH, req);
1247 if (error || req->newptr == NULL) {
1248 lockmgr(&dsched_lock, LK_RELEASE);
1252 pol = dsched_find_policy(buf);
1254 lockmgr(&dsched_lock, LK_RELEASE);
1259 default_policy = pol;
1261 lockmgr(&dsched_lock, LK_RELEASE);
1266 SYSCTL_NODE(, OID_AUTO, dsched, CTLFLAG_RD, NULL,
1267 "Disk Scheduler Framework (dsched) magic");
1268 SYSCTL_NODE(_dsched, OID_AUTO, policy, CTLFLAG_RW, NULL,
1269 "List of disks and their policies");
1270 SYSCTL_INT(_dsched, OID_AUTO, debug, CTLFLAG_RW, &dsched_debug_enable,
1271 0, "Enable dsched debugging");
1272 SYSCTL_PROC(_dsched, OID_AUTO, stats, CTLTYPE_OPAQUE|CTLFLAG_RD,
1273 0, sizeof(struct dsched_stats), sysctl_dsched_stats, "dsched_stats",
1274 "dsched statistics");
1275 SYSCTL_PROC(_dsched, OID_AUTO, policies, CTLTYPE_STRING|CTLFLAG_RD,
1276 NULL, 0, sysctl_dsched_list_policies, "A", "names of available policies");
1277 SYSCTL_PROC(_dsched_policy, OID_AUTO, default, CTLTYPE_STRING|CTLFLAG_RW,
1278 NULL, 0, sysctl_dsched_default_policy, "A", "default dsched policy");
1281 dsched_sysctl_add_disk(struct dsched_disk_ctx *diskctx, char *name)
1283 if (!(diskctx->flags & DSCHED_SYSCTL_CTX_INITED)) {
1284 diskctx->flags |= DSCHED_SYSCTL_CTX_INITED;
1285 sysctl_ctx_init(&diskctx->sysctl_ctx);
1288 SYSCTL_ADD_PROC(&diskctx->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dsched_policy),
1289 OID_AUTO, name, CTLTYPE_STRING|CTLFLAG_RW,
1290 diskctx, 0, sysctl_dsched_policy, "A", "policy");