dsched - change loader tunables
[dragonfly.git] / sys / kern / subr_dsched.c
CommitLineData
b80a9543
AH
1/*
2 * Copyright (c) 2009, 2010 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/proc.h>
38#include <sys/sysctl.h>
39#include <sys/buf.h>
40#include <sys/conf.h>
41#include <sys/diskslice.h>
42#include <sys/disk.h>
43#include <sys/malloc.h>
44#include <sys/sysctl.h>
45#include <machine/md_var.h>
46#include <sys/ctype.h>
47#include <sys/syslog.h>
48#include <sys/device.h>
49#include <sys/msgport.h>
50#include <sys/msgport2.h>
51#include <sys/buf2.h>
52#include <sys/dsched.h>
53#include <sys/fcntl.h>
54#include <machine/varargs.h>
55
56MALLOC_DEFINE(M_DSCHED, "dsched", "Disk Scheduler Framework allocations");
57
58static dsched_prepare_t default_prepare;
59static dsched_teardown_t default_teardown;
60static dsched_flush_t default_flush;
61static dsched_cancel_t default_cancel;
62static dsched_queue_t default_queue;
63#if 0
64static biodone_t default_completed;
65#endif
66
67dsched_new_buf_t *default_new_buf;
68dsched_new_proc_t *default_new_proc;
69dsched_new_thread_t *default_new_thread;
aa166ad1 70dsched_exit_buf_t *default_exit_buf;
b80a9543
AH
71dsched_exit_proc_t *default_exit_proc;
72dsched_exit_thread_t *default_exit_thread;
73
74static d_open_t dsched_dev_open;
75static d_close_t dsched_dev_close;
76static d_ioctl_t dsched_dev_ioctl;
77
78static int dsched_dev_list_disks(struct dsched_ioctl *data);
79static int dsched_dev_list_disk(struct dsched_ioctl *data);
80static int dsched_dev_list_policies(struct dsched_ioctl *data);
81static int dsched_dev_handle_switch(char *disk, char *policy);
82
83
84struct lock dsched_lock;
85static int dsched_debug_enable = 0;
86static int dsched_test1 = 0;
87static cdev_t dsched_dev;
88
89static struct dsched_policy_head dsched_policy_list =
90 TAILQ_HEAD_INITIALIZER(dsched_policy_list);
91
92static struct dsched_ops dsched_default_ops = {
93 .head = {
aa166ad1 94 .name = "noop"
b80a9543
AH
95 },
96 .prepare = default_prepare,
97 .teardown = default_teardown,
98 .flush = default_flush,
99 .cancel_all = default_cancel,
100 .bio_queue = default_queue,
101};
102
103
104static struct dev_ops dsched_dev_ops = {
105 { "dsched", 0, 0 },
106 .d_open = dsched_dev_open,
107 .d_close = dsched_dev_close,
108 .d_ioctl = dsched_dev_ioctl
109};
110
111/*
112 * dsched_debug() is a SYSCTL and TUNABLE controlled debug output function
113 * using kvprintf
114 */
115int
116dsched_debug(int level, char *fmt, ...)
117{
118 __va_list ap;
119
120 __va_start(ap, fmt);
121 if (level <= dsched_debug_enable)
122 kvprintf(fmt, ap);
123 __va_end(ap);
124
125 return 0;
126}
127
128/*
129 * Called on disk_create()
130 * tries to read which policy to use from loader.conf, if there's
131 * none specified, the default policy is used.
132 */
133void
134dsched_create(struct disk *dp, const char *head_name, int unit)
135{
136 char tunable_key[SPECNAMELEN + 11];
137 char sched_policy[DSCHED_POLICY_NAME_LENGTH];
138 struct dsched_policy *policy = NULL;
139
140 /* Also look for serno stuff? */
141 /* kprintf("dsched_create() for disk %s%d\n", head_name, unit); */
142 lockmgr(&dsched_lock, LK_EXCLUSIVE);
143
27cbb2ca 144 ksnprintf(tunable_key, sizeof(tunable_key), "dsched_pol_%s%d",
b80a9543
AH
145 head_name, unit);
146 if (TUNABLE_STR_FETCH(tunable_key, sched_policy,
147 sizeof(sched_policy)) != 0) {
148 policy = dsched_find_policy(sched_policy);
149 }
150
27cbb2ca 151 ksnprintf(tunable_key, sizeof(tunable_key), "dsched_pol_%s",
b80a9543
AH
152 head_name);
153 if (!policy && (TUNABLE_STR_FETCH(tunable_key, sched_policy,
154 sizeof(sched_policy)) != 0)) {
155 policy = dsched_find_policy(sched_policy);
156 }
157
27cbb2ca 158 ksnprintf(tunable_key, sizeof(tunable_key), "dsched_pol");
b80a9543
AH
159 if (!policy && (TUNABLE_STR_FETCH(tunable_key, sched_policy,
160 sizeof(sched_policy)) != 0)) {
161 policy = dsched_find_policy(sched_policy);
162 }
163
164 if (!policy) {
165 dsched_debug(0, "No policy for %s%d specified, "
166 "or policy not found\n", head_name, unit);
167 dsched_set_policy(dp, &dsched_default_ops);
168 } else {
169 dsched_set_policy(dp, policy->d_ops);
170 }
171
172 lockmgr(&dsched_lock, LK_RELEASE);
173}
174
175/*
176 * Called on disk_destroy()
177 * shuts down the scheduler core and cancels all remaining bios
178 */
179void
180dsched_destroy(struct disk *dp)
181{
182 struct dsched_ops *old_ops;
183
184 lockmgr(&dsched_lock, LK_EXCLUSIVE);
185
186 old_ops = dp->d_sched_ops;
187 dp->d_sched_ops = &dsched_default_ops;
188 old_ops->cancel_all(dp);
189 old_ops->teardown(dp);
190 atomic_subtract_int(&old_ops->head.ref_count, 1);
191 KKASSERT(old_ops->head.ref_count >= 0);
192
193 lockmgr(&dsched_lock, LK_RELEASE);
194}
195
196
197void
198dsched_queue(struct disk *dp, struct bio *bio)
199{
200 int error = 0;
201 error = dp->d_sched_ops->bio_queue(dp, bio);
202
203 if (error) {
204 if (bio->bio_buf->b_cmd == BUF_CMD_FLUSH) {
205 dp->d_sched_ops->flush(dp, bio);
206 }
207 dsched_strategy_raw(dp, bio);
208 }
209}
210
211
212/*
213 * Called from each module_init or module_attach of each policy
214 * registers the policy in the local policy list.
215 */
216int
217dsched_register(struct dsched_ops *d_ops)
218{
219 struct dsched_policy *policy;
220 int error = 0;
221
222 lockmgr(&dsched_lock, LK_EXCLUSIVE);
223
224 policy = dsched_find_policy(d_ops->head.name);
225
226 if (!policy) {
227 if ((d_ops->new_buf != NULL) || (d_ops->new_proc != NULL) ||
228 (d_ops->new_thread != NULL)) {
229 /*
230 * Policy ops has hooks for proc/thread/buf creation,
231 * so check if there are already hooks for those present
232 * and if so, stop right now.
233 */
234 if ((default_new_buf != NULL) || (default_new_proc != NULL) ||
235 (default_new_thread != NULL) || (default_exit_proc != NULL) ||
236 (default_exit_thread != NULL)) {
237 dsched_debug(LOG_ERR, "A policy with "
238 "proc/thread/buf hooks is already in use!");
239 error = 1;
240 goto done;
241 }
242
243 /* If everything is fine, just register the hooks */
244 default_new_buf = d_ops->new_buf;
245 default_new_proc = d_ops->new_proc;
246 default_new_thread = d_ops->new_thread;
aa166ad1 247 default_exit_buf = d_ops->exit_buf;
b80a9543
AH
248 default_exit_proc = d_ops->exit_proc;
249 default_exit_thread = d_ops->exit_thread;
250 }
251
252 policy = kmalloc(sizeof(struct dsched_policy), M_DSCHED, M_WAITOK);
253 policy->d_ops = d_ops;
254 TAILQ_INSERT_TAIL(&dsched_policy_list, policy, link);
255 atomic_add_int(&policy->d_ops->head.ref_count, 1);
256 } else {
257 dsched_debug(LOG_ERR, "Policy with name %s already registered!\n",
258 d_ops->head.name);
259 error = 1;
260 }
261
262done:
263 lockmgr(&dsched_lock, LK_RELEASE);
264 return error;
265}
266
267/*
268 * Called from each module_detach of each policy
269 * unregisters the policy
270 */
271int
272dsched_unregister(struct dsched_ops *d_ops)
273{
274 struct dsched_policy *policy;
275
276 lockmgr(&dsched_lock, LK_EXCLUSIVE);
277 policy = dsched_find_policy(d_ops->head.name);
278
279 if (policy) {
280 if (policy->d_ops->head.ref_count > 1)
281 return 1;
282 TAILQ_REMOVE(&dsched_policy_list, policy, link);
283 atomic_subtract_int(&policy->d_ops->head.ref_count, 1);
284 KKASSERT(policy->d_ops->head.ref_count >= 0);
285 kfree(policy, M_DSCHED);
286 }
287 lockmgr(&dsched_lock, LK_RELEASE);
288 return 0;
289}
290
291
292/*
293 * switches the policy by first removing the old one and then
294 * enabling the new one.
295 */
296int
297dsched_switch(struct disk *dp, struct dsched_ops *new_ops)
298{
299 struct dsched_ops *old_ops;
300
301 /* If we are asked to set the same policy, do nothing */
302 if (dp->d_sched_ops == new_ops)
303 return 0;
304
305 /* lock everything down, diskwise */
306 lockmgr(&dsched_lock, LK_EXCLUSIVE);
307 old_ops = dp->d_sched_ops;
308
309 atomic_subtract_int(&dp->d_sched_ops->head.ref_count, 1);
310 KKASSERT(dp->d_sched_ops->head.ref_count >= 0);
311
312 dp->d_sched_ops = &dsched_default_ops;
313 old_ops->teardown(dp);
314
315 /* Bring everything back to life */
316 dsched_set_policy(dp, new_ops);
317 lockmgr(&dsched_lock, LK_RELEASE);
318 return 0;
319}
320
321
322/*
323 * Loads a given policy and attaches it to the specified disk.
324 * Also initializes the core for the policy
325 */
326void
327dsched_set_policy(struct disk *dp, struct dsched_ops *new_ops)
328{
329 int locked = 0;
330
331 /* Check if it is locked already. if not, we acquire the devfs lock */
332 if (!(lockstatus(&dsched_lock, curthread)) == LK_EXCLUSIVE) {
333 lockmgr(&dsched_lock, LK_EXCLUSIVE);
334 locked = 1;
335 }
336
337 new_ops->prepare(dp);
338 dp->d_sched_ops = new_ops;
339 atomic_add_int(&new_ops->head.ref_count, 1);
340 kprintf("disk scheduler: set policy of %s to %s\n", dp->d_cdev->si_name,
341 new_ops->head.name);
342
343 /* If we acquired the lock, we also get rid of it */
344 if (locked)
345 lockmgr(&dsched_lock, LK_RELEASE);
346}
347
348struct dsched_policy*
349dsched_find_policy(char *search)
350{
351 struct dsched_policy *policy;
352 struct dsched_policy *policy_found = NULL;
353 int locked = 0;
354
355 /* Check if it is locked already. if not, we acquire the devfs lock */
356 if (!(lockstatus(&dsched_lock, curthread)) == LK_EXCLUSIVE) {
357 lockmgr(&dsched_lock, LK_EXCLUSIVE);
358 locked = 1;
359 }
360
361 TAILQ_FOREACH(policy, &dsched_policy_list, link) {
362 if (!strcmp(policy->d_ops->head.name, search)) {
363 policy_found = policy;
364 break;
365 }
366 }
367
368 /* If we acquired the lock, we also get rid of it */
369 if (locked)
370 lockmgr(&dsched_lock, LK_RELEASE);
371
372 return policy_found;
373}
374
375struct disk*
376dsched_find_disk(char *search)
377{
378 struct disk *dp_found = NULL;
379 struct disk *dp = NULL;
380
381 while((dp = disk_enumerate(dp))) {
382 if (!strcmp(dp->d_cdev->si_name, search)) {
383 dp_found = dp;
384 break;
385 }
386 }
387
388 return dp_found;
389}
390
391struct disk*
392dsched_disk_enumerate(struct disk *dp, struct dsched_ops *ops)
393{
394 while ((dp = disk_enumerate(dp))) {
395 if (dp->d_sched_ops == ops)
396 return dp;
397 }
398
399 return NULL;
400}
401
402struct dsched_policy *
403dsched_policy_enumerate(struct dsched_policy *pol)
404{
405 if (!pol)
406 return (TAILQ_FIRST(&dsched_policy_list));
407 else
408 return (TAILQ_NEXT(pol, link));
409}
410
411void
412dsched_cancel_bio(struct bio *bp)
413{
414 bp->bio_buf->b_error = ENXIO;
415 bp->bio_buf->b_flags |= B_ERROR;
416 bp->bio_buf->b_resid = bp->bio_buf->b_bcount;
417
418 biodone(bp);
419}
420
421void
422dsched_strategy_raw(struct disk *dp, struct bio *bp)
423{
424 /*
425 * Ideally, this stuff shouldn't be needed... but just in case, we leave it in
426 * to avoid panics
427 */
428 KASSERT(dp->d_rawdev != NULL, ("dsched_strategy_raw sees NULL d_rawdev!!"));
429 if(bp->bio_track != NULL) {
430 dsched_debug(LOG_INFO,
431 "dsched_strategy_raw sees non-NULL bio_track!! "
432 "bio: %x\n", (uint32_t)bp);
433 bp->bio_track = NULL;
434 }
435 dev_dstrategy(dp->d_rawdev, bp);
436}
437
438void
439dsched_strategy_sync(struct disk *dp, struct bio *bio)
440{
441 struct buf *bp, *nbp;
442 struct bio *nbio;
443
444 bp = bio->bio_buf;
445
446 nbp = getpbuf(NULL);
447 nbio = &nbp->b_bio1;
448
449 nbp->b_cmd = bp->b_cmd;
450 nbp->b_bufsize = bp->b_bufsize;
451 nbp->b_runningbufspace = bp->b_runningbufspace;
452 nbp->b_bcount = bp->b_bcount;
453 nbp->b_resid = bp->b_resid;
454 nbp->b_data = bp->b_data;
455 nbp->b_kvabase = bp->b_kvabase;
456 nbp->b_kvasize = bp->b_kvasize;
457 nbp->b_dirtyend = bp->b_dirtyend;
458
459 nbio->bio_done = biodone_sync;
460 nbio->bio_flags |= BIO_SYNC;
461 nbio->bio_track = NULL;
462
463 nbio->bio_caller_info1.ptr = dp;
464 nbio->bio_offset = bio->bio_offset;
465
466 dev_dstrategy(dp->d_rawdev, nbio);
467 biowait(nbio, "dschedsync");
468 bp->b_resid = nbp->b_resid;
469 bp->b_error = nbp->b_error;
470 biodone(bio);
471}
472
473void
474dsched_strategy_async(struct disk *dp, struct bio *bio, biodone_t *done, void *priv)
475{
476 struct bio *nbio;
477
478 nbio = push_bio(bio);
479 nbio->bio_done = done;
480 nbio->bio_offset = bio->bio_offset;
481
482 dsched_set_bio_dp(nbio, dp);
483 dsched_set_bio_priv(nbio, priv);
484
485 getmicrotime(&nbio->bio_caller_info3.tv);
486 dev_dstrategy(dp->d_rawdev, nbio);
487}
488
489void
490dsched_new_buf(struct buf *bp)
491{
492 if (default_new_buf != NULL)
493 default_new_buf(bp);
494}
495
aa166ad1
AH
496void
497dsched_exit_buf(struct buf *bp)
498{
499 if (default_exit_buf != NULL)
500 default_exit_buf(bp);
501}
b80a9543
AH
502
503void
504dsched_new_proc(struct proc *p)
505{
506 if (default_new_proc != NULL)
507 default_new_proc(p);
508}
509
510
511void
512dsched_new_thread(struct thread *td)
513{
514 if (default_new_thread != NULL)
515 default_new_thread(td);
516}
517
518void
519dsched_exit_proc(struct proc *p)
520{
521 if (default_exit_proc != NULL)
522 default_exit_proc(p);
523}
524
525
526void
527dsched_exit_thread(struct thread *td)
528{
529 if (default_exit_thread != NULL)
530 default_exit_thread(td);
531}
532
533int
534default_prepare(struct disk *dp)
535{
536 return 0;
537}
538
539void
540default_teardown(struct disk *dp)
541{
542
543}
544
545void
546default_flush(struct disk *dp, struct bio *bio)
547{
548
549}
550
551void
552default_cancel(struct disk *dp)
553{
554
555}
556
557int
558default_queue(struct disk *dp, struct bio *bio)
559{
560 dsched_strategy_raw(dp, bio);
561#if 0
562 dsched_strategy_async(dp, bio, default_completed, NULL);
563#endif
564 return 0;
565}
566
567#if 0
568void
569default_completed(struct bio *bp)
570{
571 struct bio *obio;
572
573 obio = pop_bio(bp);
574 biodone(obio);
575}
576#endif
577
578/*
579 * dsched device stuff
580 */
581
582static int
583dsched_dev_list_disks(struct dsched_ioctl *data)
584{
585 struct disk *dp = NULL;
586 uint32_t i;
587
588 for (i = 0; (i <= data->num_elem) && (dp = disk_enumerate(dp)); i++);
589
590 if (dp == NULL)
591 return -1;
592
593 strncpy(data->dev_name, dp->d_cdev->si_name, sizeof(data->dev_name));
594
595 if (dp->d_sched_ops) {
596 strncpy(data->pol_name, dp->d_sched_ops->head.name,
597 sizeof(data->pol_name));
598 } else {
599 strncpy(data->pol_name, "N/A (error)", 12);
600 }
601
602 return 0;
603}
604
605static int
606dsched_dev_list_disk(struct dsched_ioctl *data)
607{
608 struct disk *dp = NULL;
609 int found = 0;
610
611 while ((dp = disk_enumerate(dp))) {
612 if (!strncmp(dp->d_cdev->si_name, data->dev_name,
613 sizeof(data->dev_name))) {
614 KKASSERT(dp->d_sched_ops != NULL);
615
616 found = 1;
617 strncpy(data->pol_name, dp->d_sched_ops->head.name,
618 sizeof(data->pol_name));
619 break;
620 }
621 }
622 if (!found)
623 return -1;
624
625 return 0;
626}
627
628static int
629dsched_dev_list_policies(struct dsched_ioctl *data)
630{
631 struct dsched_policy *pol = NULL;
632 uint32_t i;
633
634 for (i = 0; (i <= data->num_elem) && (pol = dsched_policy_enumerate(pol)); i++);
635
636 if (pol == NULL)
637 return -1;
638
639 strncpy(data->pol_name, pol->d_ops->head.name, sizeof(data->pol_name));
640 return 0;
641}
642
643static int
644dsched_dev_handle_switch(char *disk, char *policy)
645{
646 struct disk *dp;
647 struct dsched_policy *pol;
648
649 dp = dsched_find_disk(disk);
650 pol = dsched_find_policy(policy);
651
652 if ((dp == NULL) || (pol == NULL))
653 return -1;
654
655 return (dsched_switch(dp, pol->d_ops));
656}
657
658static int
659dsched_dev_open(struct dev_open_args *ap)
660{
661 /*
662 * Only allow read-write access.
663 */
664 if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
665 return(EPERM);
666
667 /*
668 * We don't allow nonblocking access.
669 */
670 if ((ap->a_oflags & O_NONBLOCK) != 0) {
671 kprintf("dsched_dev: can't do nonblocking access\n");
672 return(ENODEV);
673 }
674
675 return 0;
676}
677
678static int
679dsched_dev_close(struct dev_close_args *ap)
680{
681 return 0;
682}
683
684static int
685dsched_dev_ioctl(struct dev_ioctl_args *ap)
686{
687 int error;
688 struct dsched_ioctl *data;
689
690 error = 0;
691 data = (struct dsched_ioctl *)ap->a_data;
692
693 switch(ap->a_cmd) {
694 case DSCHED_SET_DEVICE_POLICY:
695 if (dsched_dev_handle_switch(data->dev_name, data->pol_name))
696 error = ENOENT; /* No such file or directory */
697 break;
698
699 case DSCHED_LIST_DISK:
700 if (dsched_dev_list_disk(data) != 0) {
701 error = EINVAL; /* Invalid argument */
702 }
703 break;
704
705 case DSCHED_LIST_DISKS:
706 if (dsched_dev_list_disks(data) != 0) {
707 error = EINVAL; /* Invalid argument */
708 }
709 break;
710
711 case DSCHED_LIST_POLICIES:
712 if (dsched_dev_list_policies(data) != 0) {
713 error = EINVAL; /* Invalid argument */
714 }
715 break;
716
717
718 default:
719 error = ENOTTY; /* Inappropriate ioctl for device */
720 break;
721 }
722
723 return(error);
724}
725
726/*
727 * SYSINIT stuff
728 */
729
730
731static void
732dsched_init(void)
733{
734 lockinit(&dsched_lock, "dsched lock", 0, 0);
735 dsched_register(&dsched_default_ops);
736}
737
738static void
739dsched_uninit(void)
740{
741}
742
743static void
744dsched_dev_init(void)
745{
746 dsched_dev = make_dev(&dsched_dev_ops,
747 0,
748 UID_ROOT,
749 GID_WHEEL,
750 0600,
751 "dsched");
752}
753
754static void
755dsched_dev_uninit(void)
756{
757 destroy_dev(dsched_dev);
758}
759
760SYSINIT(subr_dsched_register, SI_SUB_CREATE_INIT-2, SI_ORDER_FIRST, dsched_init, NULL);
761SYSUNINIT(subr_dsched_register, SI_SUB_CREATE_INIT-2, SI_ORDER_ANY, dsched_uninit, NULL);
762SYSINIT(subr_dsched_dev_register, SI_SUB_DRIVERS, SI_ORDER_ANY, dsched_dev_init, NULL);
763SYSUNINIT(subr_dsched_dev_register, SI_SUB_DRIVERS, SI_ORDER_ANY, dsched_dev_uninit, NULL);
764
765/*
766 * SYSCTL stuff
767 */
768SYSCTL_INT(_kern, OID_AUTO, dsched_debug, CTLFLAG_RW, &dsched_debug_enable,
769 0, "Enable dsched debugging");
770SYSCTL_INT(_kern, OID_AUTO, dsched_test1, CTLFLAG_RW, &dsched_test1,
771 0, "Switch dsched test1 method");