1 // SPDX-License-Identifier: GPL-2.0+
3 // Scalability test comparing RCU vs other mechanisms
4 // for acquiring references on objects.
6 // Copyright (C) Google, 2020.
8 // Author: Joel Fernandes <joel@joelfernandes.org>
10 #define pr_fmt(fmt) fmt
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/completion.h>
15 #include <linux/cpu.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/kthread.h>
21 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/notifier.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rcupdate_trace.h>
29 #include <linux/reboot.h>
30 #include <linux/sched.h>
31 #include <linux/seq_buf.h>
32 #include <linux/spinlock.h>
33 #include <linux/smp.h>
34 #include <linux/stat.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/torture.h>
38 #include <linux/types.h>
42 #define SCALE_FLAG "-ref-scale: "
44 #define SCALEOUT(s, x...) \
45 pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
47 #define VERBOSE_SCALEOUT(s, x...) \
50 pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
53 static atomic_t verbose_batch_ctr;
55 #define VERBOSE_SCALEOUT_BATCH(s, x...) \
58 (verbose_batched <= 0 || \
59 !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \
60 schedule_timeout_uninterruptible(1); \
61 pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
65 #define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x)
67 MODULE_DESCRIPTION("Scalability test for object reference mechanisms");
68 MODULE_LICENSE("GPL");
69 MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
71 static char *scale_type = "rcu";
72 module_param(scale_type, charp, 0444);
73 MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
75 torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
76 torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
78 // Wait until there are multiple CPUs before starting test.
79 torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
80 "Holdoff time before test start (s)");
81 // Number of typesafe_lookup structures, that is, the degree of concurrency.
82 torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures.");
83 // Number of loops per experiment, all readers execute operations concurrently.
84 torture_param(long, loops, 10000, "Number of loops per experiment.");
85 // Number of readers, with -1 defaulting to about 75% of the CPUs.
86 torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
88 torture_param(int, nruns, 30, "Number of experiments to run.");
89 // Reader delay in nanoseconds, 0 for no delay.
90 torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
93 # define REFSCALE_SHUTDOWN 0
95 # define REFSCALE_SHUTDOWN 1
98 torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
99 "Shutdown at end of scalability tests.");
102 struct task_struct *task;
104 wait_queue_head_t wq;
105 u64 last_duration_ns;
108 static struct task_struct *shutdown_task;
109 static wait_queue_head_t shutdown_wq;
111 static struct task_struct *main_task;
112 static wait_queue_head_t main_wq;
113 static int shutdown_start;
115 static struct reader_task *reader_tasks;
117 // Number of readers that are part of the current experiment.
118 static atomic_t nreaders_exp;
120 // Use to wait for all threads to start.
121 static atomic_t n_init;
122 static atomic_t n_started;
123 static atomic_t n_warmedup;
124 static atomic_t n_cooleddown;
126 // Track which experiment is currently running.
129 // Operations vector for selecting different types of tests.
130 struct ref_scale_ops {
132 void (*cleanup)(void);
133 void (*readsection)(const int nloops);
134 void (*delaysection)(const int nloops, const int udl, const int ndl);
138 static const struct ref_scale_ops *cur_ops;
140 static void un_delay(const int udl, const int ndl)
148 static void ref_rcu_read_section(const int nloops)
152 for (i = nloops; i >= 0; i--) {
158 static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
162 for (i = nloops; i >= 0; i--) {
169 static bool rcu_sync_scale_init(void)
174 static const struct ref_scale_ops rcu_ops = {
175 .init = rcu_sync_scale_init,
176 .readsection = ref_rcu_read_section,
177 .delaysection = ref_rcu_delay_section,
181 // Definitions for SRCU ref scale testing.
182 DEFINE_STATIC_SRCU(srcu_refctl_scale);
183 static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
185 static void srcu_ref_scale_read_section(const int nloops)
190 for (i = nloops; i >= 0; i--) {
191 idx = srcu_read_lock(srcu_ctlp);
192 srcu_read_unlock(srcu_ctlp, idx);
196 static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
201 for (i = nloops; i >= 0; i--) {
202 idx = srcu_read_lock(srcu_ctlp);
204 srcu_read_unlock(srcu_ctlp, idx);
208 static const struct ref_scale_ops srcu_ops = {
209 .init = rcu_sync_scale_init,
210 .readsection = srcu_ref_scale_read_section,
211 .delaysection = srcu_ref_scale_delay_section,
215 #ifdef CONFIG_TASKS_RCU
217 // Definitions for RCU Tasks ref scale testing: Empty read markers.
218 // These definitions also work for RCU Rude readers.
219 static void rcu_tasks_ref_scale_read_section(const int nloops)
223 for (i = nloops; i >= 0; i--)
227 static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
231 for (i = nloops; i >= 0; i--)
235 static const struct ref_scale_ops rcu_tasks_ops = {
236 .init = rcu_sync_scale_init,
237 .readsection = rcu_tasks_ref_scale_read_section,
238 .delaysection = rcu_tasks_ref_scale_delay_section,
242 #define RCU_TASKS_OPS &rcu_tasks_ops,
244 #else // #ifdef CONFIG_TASKS_RCU
246 #define RCU_TASKS_OPS
248 #endif // #else // #ifdef CONFIG_TASKS_RCU
250 #ifdef CONFIG_TASKS_TRACE_RCU
252 // Definitions for RCU Tasks Trace ref scale testing.
253 static void rcu_trace_ref_scale_read_section(const int nloops)
257 for (i = nloops; i >= 0; i--) {
258 rcu_read_lock_trace();
259 rcu_read_unlock_trace();
263 static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
267 for (i = nloops; i >= 0; i--) {
268 rcu_read_lock_trace();
270 rcu_read_unlock_trace();
274 static const struct ref_scale_ops rcu_trace_ops = {
275 .init = rcu_sync_scale_init,
276 .readsection = rcu_trace_ref_scale_read_section,
277 .delaysection = rcu_trace_ref_scale_delay_section,
281 #define RCU_TRACE_OPS &rcu_trace_ops,
283 #else // #ifdef CONFIG_TASKS_TRACE_RCU
285 #define RCU_TRACE_OPS
287 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
289 // Definitions for reference count
290 static atomic_t refcnt;
292 static void ref_refcnt_section(const int nloops)
296 for (i = nloops; i >= 0; i--) {
302 static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
306 for (i = nloops; i >= 0; i--) {
313 static const struct ref_scale_ops refcnt_ops = {
314 .init = rcu_sync_scale_init,
315 .readsection = ref_refcnt_section,
316 .delaysection = ref_refcnt_delay_section,
320 // Definitions for rwlock
321 static rwlock_t test_rwlock;
323 static bool ref_rwlock_init(void)
325 rwlock_init(&test_rwlock);
329 static void ref_rwlock_section(const int nloops)
333 for (i = nloops; i >= 0; i--) {
334 read_lock(&test_rwlock);
335 read_unlock(&test_rwlock);
339 static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
343 for (i = nloops; i >= 0; i--) {
344 read_lock(&test_rwlock);
346 read_unlock(&test_rwlock);
350 static const struct ref_scale_ops rwlock_ops = {
351 .init = ref_rwlock_init,
352 .readsection = ref_rwlock_section,
353 .delaysection = ref_rwlock_delay_section,
357 // Definitions for rwsem
358 static struct rw_semaphore test_rwsem;
360 static bool ref_rwsem_init(void)
362 init_rwsem(&test_rwsem);
366 static void ref_rwsem_section(const int nloops)
370 for (i = nloops; i >= 0; i--) {
371 down_read(&test_rwsem);
372 up_read(&test_rwsem);
376 static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
380 for (i = nloops; i >= 0; i--) {
381 down_read(&test_rwsem);
383 up_read(&test_rwsem);
387 static const struct ref_scale_ops rwsem_ops = {
388 .init = ref_rwsem_init,
389 .readsection = ref_rwsem_section,
390 .delaysection = ref_rwsem_delay_section,
394 // Definitions for global spinlock
395 static DEFINE_RAW_SPINLOCK(test_lock);
397 static void ref_lock_section(const int nloops)
402 for (i = nloops; i >= 0; i--) {
403 raw_spin_lock(&test_lock);
404 raw_spin_unlock(&test_lock);
409 static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
414 for (i = nloops; i >= 0; i--) {
415 raw_spin_lock(&test_lock);
417 raw_spin_unlock(&test_lock);
422 static const struct ref_scale_ops lock_ops = {
423 .readsection = ref_lock_section,
424 .delaysection = ref_lock_delay_section,
428 // Definitions for global irq-save spinlock
430 static void ref_lock_irq_section(const int nloops)
436 for (i = nloops; i >= 0; i--) {
437 raw_spin_lock_irqsave(&test_lock, flags);
438 raw_spin_unlock_irqrestore(&test_lock, flags);
443 static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
449 for (i = nloops; i >= 0; i--) {
450 raw_spin_lock_irqsave(&test_lock, flags);
452 raw_spin_unlock_irqrestore(&test_lock, flags);
457 static const struct ref_scale_ops lock_irq_ops = {
458 .readsection = ref_lock_irq_section,
459 .delaysection = ref_lock_irq_delay_section,
463 // Definitions acquire-release.
464 static DEFINE_PER_CPU(unsigned long, test_acqrel);
466 static void ref_acqrel_section(const int nloops)
472 for (i = nloops; i >= 0; i--) {
473 x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
474 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
479 static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
485 for (i = nloops; i >= 0; i--) {
486 x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
488 smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
493 static const struct ref_scale_ops acqrel_ops = {
494 .readsection = ref_acqrel_section,
495 .delaysection = ref_acqrel_delay_section,
499 static volatile u64 stopopts;
501 static void ref_clock_section(const int nloops)
507 for (i = nloops; i >= 0; i--)
508 x += ktime_get_real_fast_ns();
513 static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
519 for (i = nloops; i >= 0; i--) {
520 x += ktime_get_real_fast_ns();
527 static const struct ref_scale_ops clock_ops = {
528 .readsection = ref_clock_section,
529 .delaysection = ref_clock_delay_section,
533 static void ref_jiffies_section(const int nloops)
539 for (i = nloops; i >= 0; i--)
545 static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl)
551 for (i = nloops; i >= 0; i--) {
559 static const struct ref_scale_ops jiffies_ops = {
560 .readsection = ref_jiffies_section,
561 .delaysection = ref_jiffies_delay_section,
565 ////////////////////////////////////////////////////////////////////////
567 // Methods leveraging SLAB_TYPESAFE_BY_RCU.
570 // Item to look up in a typesafe manner. Array of pointers to these.
571 struct refscale_typesafe {
572 atomic_t rts_refctr; // Used by all flavors
574 seqlock_t rts_seqlock;
579 static struct kmem_cache *typesafe_kmem_cachep;
580 static struct refscale_typesafe **rtsarray;
581 static long rtsarray_size;
582 static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand);
583 static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start);
584 static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start);
586 // Conditionally acquire an explicit in-structure reference count.
587 static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
589 return atomic_inc_not_zero(&rtsp->rts_refctr);
592 // Unconditionally release an explicit in-structure reference count.
593 static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start)
595 if (!atomic_dec_return(&rtsp->rts_refctr)) {
596 WRITE_ONCE(rtsp->a, rtsp->a + 1);
597 kmem_cache_free(typesafe_kmem_cachep, rtsp);
602 // Unconditionally acquire an explicit in-structure spinlock.
603 static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
605 spin_lock(&rtsp->rts_lock);
609 // Unconditionally release an explicit in-structure spinlock.
610 static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start)
612 spin_unlock(&rtsp->rts_lock);
616 // Unconditionally acquire an explicit in-structure sequence lock.
617 static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
619 *start = read_seqbegin(&rtsp->rts_seqlock);
623 // Conditionally release an explicit in-structure sequence lock. Return
624 // true if this release was successful, that is, if no retry is required.
625 static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start)
627 return !read_seqretry(&rtsp->rts_seqlock, start);
630 // Do a read-side critical section with the specified delay in
631 // microseconds and nanoseconds inserted so as to increase probability
633 static void typesafe_delay_section(const int nloops, const int udl, const int ndl)
639 struct refscale_typesafe *rtsp;
642 for (i = nloops; i >= 0; i--) {
644 idx = torture_random(this_cpu_ptr(&refscale_rand)) % rtsarray_size;
648 rtsp = rcu_dereference(rtsarray[idx]);
649 a = READ_ONCE(rtsp->a);
650 if (!rts_acquire(rtsp, &start)) {
654 if (a != READ_ONCE(rtsp->a)) {
655 (void)rts_release(rtsp, start);
660 b = READ_ONCE(rtsp->a);
661 // Remember, seqlock read-side release can fail.
662 if (!rts_release(rtsp, start)) {
666 WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b);
669 WARN_ON_ONCE(a * a != b);
673 // Because the acquisition and release methods are expensive, there
674 // is no point in optimizing away the un_delay() function's two checks.
675 // Thus simply define typesafe_read_section() as a simple wrapper around
676 // typesafe_delay_section().
677 static void typesafe_read_section(const int nloops)
679 typesafe_delay_section(nloops, 0, 0);
682 // Allocate and initialize one refscale_typesafe structure.
683 static struct refscale_typesafe *typesafe_alloc_one(void)
685 struct refscale_typesafe *rtsp;
687 rtsp = kmem_cache_alloc(typesafe_kmem_cachep, GFP_KERNEL);
690 atomic_set(&rtsp->rts_refctr, 1);
691 WRITE_ONCE(rtsp->a, rtsp->a + 1);
692 WRITE_ONCE(rtsp->b, rtsp->a * rtsp->a);
696 // Slab-allocator constructor for refscale_typesafe structures created
697 // out of a new slab of system memory.
698 static void refscale_typesafe_ctor(void *rtsp_in)
700 struct refscale_typesafe *rtsp = rtsp_in;
702 spin_lock_init(&rtsp->rts_lock);
703 seqlock_init(&rtsp->rts_seqlock);
705 rtsp->a = torture_random(this_cpu_ptr(&refscale_rand));
709 static const struct ref_scale_ops typesafe_ref_ops;
710 static const struct ref_scale_ops typesafe_lock_ops;
711 static const struct ref_scale_ops typesafe_seqlock_ops;
713 // Initialize for a typesafe test.
714 static bool typesafe_init(void)
717 long si = lookup_instances;
719 typesafe_kmem_cachep = kmem_cache_create("refscale_typesafe",
720 sizeof(struct refscale_typesafe), sizeof(void *),
721 SLAB_TYPESAFE_BY_RCU, refscale_typesafe_ctor);
722 if (!typesafe_kmem_cachep)
725 si = -si * nr_cpu_ids;
729 rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL);
732 for (idx = 0; idx < rtsarray_size; idx++) {
733 rtsarray[idx] = typesafe_alloc_one();
737 if (cur_ops == &typesafe_ref_ops) {
738 rts_acquire = typesafe_ref_acquire;
739 rts_release = typesafe_ref_release;
740 } else if (cur_ops == &typesafe_lock_ops) {
741 rts_acquire = typesafe_lock_acquire;
742 rts_release = typesafe_lock_release;
743 } else if (cur_ops == &typesafe_seqlock_ops) {
744 rts_acquire = typesafe_seqlock_acquire;
745 rts_release = typesafe_seqlock_release;
753 // Clean up after a typesafe test.
754 static void typesafe_cleanup(void)
759 for (idx = 0; idx < rtsarray_size; idx++)
760 kmem_cache_free(typesafe_kmem_cachep, rtsarray[idx]);
765 kmem_cache_destroy(typesafe_kmem_cachep);
766 typesafe_kmem_cachep = NULL;
771 // The typesafe_init() function distinguishes these structures by address.
772 static const struct ref_scale_ops typesafe_ref_ops = {
773 .init = typesafe_init,
774 .cleanup = typesafe_cleanup,
775 .readsection = typesafe_read_section,
776 .delaysection = typesafe_delay_section,
777 .name = "typesafe_ref"
780 static const struct ref_scale_ops typesafe_lock_ops = {
781 .init = typesafe_init,
782 .cleanup = typesafe_cleanup,
783 .readsection = typesafe_read_section,
784 .delaysection = typesafe_delay_section,
785 .name = "typesafe_lock"
788 static const struct ref_scale_ops typesafe_seqlock_ops = {
789 .init = typesafe_init,
790 .cleanup = typesafe_cleanup,
791 .readsection = typesafe_read_section,
792 .delaysection = typesafe_delay_section,
793 .name = "typesafe_seqlock"
796 static void rcu_scale_one_reader(void)
799 cur_ops->readsection(loops);
801 cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
804 // Reader kthread. Repeatedly does empty RCU read-side
805 // critical section, minimizing update-side interference.
807 ref_scale_reader(void *arg)
811 struct reader_task *rt = &(reader_tasks[me]);
815 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
816 WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
817 set_user_nice(current, MAX_NICE);
820 schedule_timeout_interruptible(holdoff * HZ);
822 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
824 // Wait for signal that this reader can start.
825 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
826 torture_must_stop());
828 if (torture_must_stop())
831 // Make sure that the CPU is affinitized appropriately during testing.
832 WARN_ON_ONCE(raw_smp_processor_id() != me);
834 WRITE_ONCE(rt->start_reader, 0);
835 if (!atomic_dec_return(&n_started))
836 while (atomic_read_acquire(&n_started))
839 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
842 // To reduce noise, do an initial cache-warming invocation, check
843 // in, and then keep warming until everyone has checked in.
844 rcu_scale_one_reader();
845 if (!atomic_dec_return(&n_warmedup))
846 while (atomic_read_acquire(&n_warmedup))
847 rcu_scale_one_reader();
848 // Also keep interrupts disabled. This also has the effect
849 // of preventing entries into slow path for rcu_read_unlock().
850 local_irq_save(flags);
851 start = ktime_get_mono_fast_ns();
853 rcu_scale_one_reader();
855 duration = ktime_get_mono_fast_ns() - start;
856 local_irq_restore(flags);
858 rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
859 // To reduce runtime-skew noise, do maintain-load invocations until
861 if (!atomic_dec_return(&n_cooleddown))
862 while (atomic_read_acquire(&n_cooleddown))
863 rcu_scale_one_reader();
865 if (atomic_dec_and_test(&nreaders_exp))
868 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
869 me, exp_idx, atomic_read(&nreaders_exp));
871 if (!torture_must_stop())
874 torture_kthread_stopping("ref_scale_reader");
878 static void reset_readers(void)
881 struct reader_task *rt;
883 for (i = 0; i < nreaders; i++) {
884 rt = &(reader_tasks[i]);
886 rt->last_duration_ns = 0;
890 // Print the results of each reader and return the sum of all their durations.
891 static u64 process_durations(int n)
894 struct reader_task *rt;
899 buf = kmalloc(800 + 64, GFP_KERNEL);
902 seq_buf_init(&s, buf, 800 + 64);
904 seq_buf_printf(&s, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
907 for (i = 0; i < n && !torture_must_stop(); i++) {
908 rt = &(reader_tasks[i]);
911 seq_buf_putc(&s, '\n');
913 if (seq_buf_used(&s) >= 800) {
914 pr_alert("%s", seq_buf_str(&s));
918 seq_buf_printf(&s, "%d: %llu\t", i, rt->last_duration_ns);
920 sum += rt->last_duration_ns;
922 pr_alert("%s\n", seq_buf_str(&s));
928 // The main_func is the main orchestrator, it performs a bunch of
929 // experiments. For every experiment, it orders all the readers
930 // involved to start and waits for them to finish the experiment. It
931 // then reads their timestamps and starts the next experiment. Each
932 // experiment progresses from 1 concurrent reader to N of them at which
933 // point all the timestamps are printed.
934 static int main_func(void *arg)
941 set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
942 set_user_nice(current, MAX_NICE);
944 VERBOSE_SCALEOUT("main_func task started");
945 result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
946 buf = kzalloc(800 + 64, GFP_KERNEL);
947 if (!result_avg || !buf) {
948 SCALEOUT_ERRSTRING("out of memory");
952 schedule_timeout_interruptible(holdoff * HZ);
954 // Wait for all threads to start.
956 while (atomic_read(&n_init) < nreaders + 1)
957 schedule_timeout_uninterruptible(1);
959 // Start exp readers up per experiment
960 for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
961 if (torture_must_stop())
965 atomic_set(&nreaders_exp, nreaders);
966 atomic_set(&n_started, nreaders);
967 atomic_set(&n_warmedup, nreaders);
968 atomic_set(&n_cooleddown, nreaders);
972 for (r = 0; r < nreaders; r++) {
973 smp_store_release(&reader_tasks[r].start_reader, 1);
974 wake_up(&reader_tasks[r].wq);
977 VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
981 !atomic_read(&nreaders_exp) || torture_must_stop());
983 VERBOSE_SCALEOUT("main_func: experiment ended");
985 if (torture_must_stop())
988 result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
991 // Print the average of all experiments
992 SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
994 pr_alert("Runs\tTime(ns)\n");
995 for (exp = 0; exp < nruns; exp++) {
999 avg = div_u64_rem(result_avg[exp], 1000, &rem);
1000 sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
1002 if (strlen(buf) >= 800) {
1003 pr_alert("%s", buf);
1008 pr_alert("%s", buf);
1011 // This will shutdown everything including us.
1014 wake_up(&shutdown_wq);
1017 // Wait for torture to stop us
1018 while (!torture_must_stop())
1019 schedule_timeout_uninterruptible(1);
1022 torture_kthread_stopping("main_func");
1029 ref_scale_print_module_parms(const struct ref_scale_ops *cur_ops, const char *tag)
1031 pr_alert("%s" SCALE_FLAG
1032 "--- %s: verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
1033 verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay);
1037 ref_scale_cleanup(void)
1041 if (torture_cleanup_begin())
1045 torture_cleanup_end();
1050 for (i = 0; i < nreaders; i++)
1051 torture_stop_kthread("ref_scale_reader",
1052 reader_tasks[i].task);
1054 kfree(reader_tasks);
1056 torture_stop_kthread("main_task", main_task);
1059 // Do scale-type-specific cleanup operations.
1060 if (cur_ops->cleanup != NULL)
1063 torture_cleanup_end();
1066 // Shutdown kthread. Just waits to be awakened, then shuts down system.
1068 ref_scale_shutdown(void *arg)
1070 wait_event_idle(shutdown_wq, shutdown_start);
1072 smp_mb(); // Wake before output.
1073 ref_scale_cleanup();
1080 ref_scale_init(void)
1084 static const struct ref_scale_ops *scale_ops[] = {
1085 &rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
1086 &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops,
1087 &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
1090 if (!torture_init_begin(scale_type, verbose))
1093 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
1094 cur_ops = scale_ops[i];
1095 if (strcmp(scale_type, cur_ops->name) == 0)
1098 if (i == ARRAY_SIZE(scale_ops)) {
1099 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
1100 pr_alert("rcu-scale types:");
1101 for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
1102 pr_cont(" %s", scale_ops[i]->name);
1109 if (!cur_ops->init()) {
1110 firsterr = -EUCLEAN;
1114 ref_scale_print_module_parms(cur_ops, "Start of test");
1118 init_waitqueue_head(&shutdown_wq);
1119 firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
1121 if (torture_init_error(firsterr))
1123 schedule_timeout_uninterruptible(1);
1126 // Reader tasks (default to ~75% of online CPUs).
1128 nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
1129 if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
1131 if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
1133 if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
1135 reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
1137 if (!reader_tasks) {
1138 SCALEOUT_ERRSTRING("out of memory");
1143 VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
1145 for (i = 0; i < nreaders; i++) {
1146 init_waitqueue_head(&reader_tasks[i].wq);
1147 firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
1148 reader_tasks[i].task);
1149 if (torture_init_error(firsterr))
1154 init_waitqueue_head(&main_wq);
1155 firsterr = torture_create_kthread(main_func, NULL, main_task);
1156 if (torture_init_error(firsterr))
1164 ref_scale_cleanup();
1166 WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
1172 module_init(ref_scale_init);
1173 module_exit(ref_scale_cleanup);