2 * Copyright (C) 2012 Oleg Moskalenko <mom040267@gmail.com>
3 * Copyright (C) 2012 Gabor Kovesdan <gabor@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
35 #if defined(SORT_THREADS)
37 #include <semaphore.h>
46 #include "radixsort.h"
48 #define DEFAULT_SORT_FUNC_RADIXSORT mergesort
50 #define TINY_NODE(sl) ((sl)->tosort_num < 65)
51 #define SMALL_NODE(sl) ((sl)->tosort_num < 5)
53 /* are we sorting in reverse order ? */
54 static bool reverse_sort;
56 /* sort sub-levels array size */
57 static const size_t slsz = 256 * sizeof(struct sort_level*);
59 /* one sort level structure */
62 struct sort_level **sublevels;
63 struct sort_list_item **leaves;
64 struct sort_list_item **sorted;
65 struct sort_list_item **tosort;
70 size_t start_position;
76 /* stack of sort levels ready to be sorted */
78 struct level_stack *next;
79 struct sort_level *sl;
82 static struct level_stack *g_ls;
84 #if defined(SORT_THREADS)
85 /* stack guarding mutex */
86 static pthread_mutex_t g_ls_mutex;
88 /* counter: how many items are left */
89 static size_t sort_left;
91 static pthread_mutex_t sort_left_mutex;
93 /* semaphore to count threads */
97 * Decrement items counter
100 sort_left_dec(size_t n)
103 pthread_mutex_lock(&sort_left_mutex);
105 pthread_mutex_unlock(&sort_left_mutex);
109 * Do we have something to sort ?
116 pthread_mutex_lock(&sort_left_mutex);
117 ret = (sort_left > 0);
118 pthread_mutex_unlock(&sort_left_mutex);
124 #define sort_left_dec(n)
126 #endif /* SORT_THREADS */
129 * Push sort level to the stack
132 push_ls(struct sort_level *sl)
134 struct level_stack *new_ls;
136 new_ls = sort_malloc(sizeof(struct level_stack));
139 #if defined(SORT_THREADS)
141 pthread_mutex_lock(&g_ls_mutex);
147 #if defined(SORT_THREADS)
149 pthread_mutex_unlock(&g_ls_mutex);
154 * Pop sort level from the stack (single-threaded style)
156 static inline struct sort_level*
159 struct sort_level *sl;
162 struct level_stack *saved_ls;
174 #if defined(SORT_THREADS)
177 * Pop sort level from the stack (multi-threaded style)
179 static inline struct sort_level*
182 struct level_stack *saved_ls;
183 struct sort_level *sl;
185 pthread_mutex_lock(&g_ls_mutex);
196 pthread_mutex_unlock(&g_ls_mutex);
203 #endif /* defined(SORT_THREADS) */
206 add_to_sublevel(struct sort_level *sl, struct sort_list_item *item, size_t indx)
208 struct sort_level *ssl;
210 ssl = sl->sublevels[indx];
213 ssl = sort_malloc(sizeof(struct sort_level));
214 memset(ssl, 0, sizeof(struct sort_level));
216 ssl->level = sl->level + 1;
217 sl->sublevels[indx] = ssl;
222 if (++(ssl->tosort_num) > ssl->tosort_sz) {
223 ssl->tosort_sz = ssl->tosort_num + 128;
224 ssl->tosort = sort_realloc(ssl->tosort,
225 sizeof(struct sort_list_item*) * (ssl->tosort_sz));
228 ssl->tosort[ssl->tosort_num - 1] = item;
232 add_leaf(struct sort_level *sl, struct sort_list_item *item)
235 if (++(sl->leaves_num) > sl->leaves_sz) {
236 sl->leaves_sz = sl->leaves_num + 128;
237 sl->leaves = sort_realloc(sl->leaves,
238 (sizeof(struct sort_list_item*) * (sl->leaves_sz)));
240 sl->leaves[sl->leaves_num - 1] = item;
244 get_wc_index(struct sort_list_item *sli, size_t level)
246 const struct bwstring *bws;
248 bws = sli->ka.key[0].k;
250 if ((BWSLEN(bws) > level))
251 return (unsigned char) BWS_GET(bws,level);
256 place_item(struct sort_level *sl, size_t item)
258 struct sort_list_item *sli;
261 sli = sl->tosort[item];
262 c = get_wc_index(sli, sl->level);
267 add_to_sublevel(sl, sli, c);
271 free_sort_level(struct sort_level *sl)
276 sort_free(sl->leaves);
279 sort_free(sl->tosort);
282 struct sort_level *slc;
287 for (size_t i = 0; i < sln; ++i) {
288 slc = sl->sublevels[i];
290 free_sort_level(slc);
293 sort_free(sl->sublevels);
301 run_sort_level_next(struct sort_level *sl)
303 struct sort_level *slc;
304 size_t i, sln, tosort_num;
307 sort_free(sl->sublevels);
308 sl->sublevels = NULL;
311 switch (sl->tosort_num) {
315 sl->sorted[sl->start_position] = sl->tosort[0];
319 if (list_coll_offset(&(sl->tosort[0]), &(sl->tosort[1]),
321 sl->sorted[sl->start_position++] = sl->tosort[1];
322 sl->sorted[sl->start_position] = sl->tosort[0];
324 sl->sorted[sl->start_position++] = sl->tosort[0];
325 sl->sorted[sl->start_position] = sl->tosort[1];
331 if (TINY_NODE(sl) || (sl->level > 15)) {
334 func = get_list_call_func(sl->level);
336 sl->leaves = sl->tosort;
337 sl->leaves_num = sl->tosort_num;
338 sl->leaves_sz = sl->leaves_num;
339 sl->leaves = sort_realloc(sl->leaves,
340 (sizeof(struct sort_list_item *) *
347 if (sort_opts_vals.sflag) {
348 if (mergesort(sl->leaves, sl->leaves_num,
349 sizeof(struct sort_list_item *),
350 (int(*)(const void *, const void *)) func) == -1)
352 err(2, "Radix sort error 3");
354 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
355 sizeof(struct sort_list_item *),
356 (int(*)(const void *, const void *)) func);
358 memcpy(sl->sorted + sl->start_position,
359 sl->leaves, sl->leaves_num *
360 sizeof(struct sort_list_item*));
362 sort_left_dec(sl->leaves_num);
366 sl->tosort_sz = sl->tosort_num;
367 sl->tosort = sort_realloc(sl->tosort,
368 sizeof(struct sort_list_item*) * (sl->tosort_sz));
373 sl->sublevels = sort_malloc(slsz);
374 memset(sl->sublevels, 0, slsz);
378 tosort_num = sl->tosort_num;
379 for (i = 0; i < tosort_num; ++i)
382 sort_free(sl->tosort);
387 if (sl->leaves_num > 1) {
389 if (sort_opts_vals.sflag) {
390 mergesort(sl->leaves, sl->leaves_num,
391 sizeof(struct sort_list_item *),
392 (int(*)(const void *, const void *)) list_coll);
394 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
395 sizeof(struct sort_list_item *),
396 (int(*)(const void *, const void *)) list_coll);
398 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
399 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
400 sizeof(struct sort_list_item *),
401 (int(*)(const void *, const void *)) list_coll_by_str_only);
405 sl->leaves_sz = sl->leaves_num;
406 sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) *
410 memcpy(sl->sorted + sl->start_position, sl->leaves,
411 sl->leaves_num * sizeof(struct sort_list_item*));
412 sl->start_position += sl->leaves_num;
413 sort_left_dec(sl->leaves_num);
415 sort_free(sl->leaves);
422 for (i = 0; i < sln; ++i) {
423 slc = sl->sublevels[i];
426 slc->sorted = sl->sorted;
427 slc->start_position = sl->start_position;
428 sl->start_position += slc->tosort_num;
430 run_sort_level_next(slc);
433 sl->sublevels[i] = NULL;
442 for (i = 0; i < sln; ++i) {
444 slc = sl->sublevels[n];
447 slc->sorted = sl->sorted;
448 slc->start_position = sl->start_position;
449 sl->start_position += slc->tosort_num;
451 run_sort_level_next(slc);
454 sl->sublevels[n] = NULL;
458 memcpy(sl->sorted + sl->start_position, sl->leaves,
459 sl->leaves_num * sizeof(struct sort_list_item*));
460 sort_left_dec(sl->leaves_num);
468 * Single-threaded sort cycle
471 run_sort_cycle_st(void)
473 struct sort_level *slc;
480 run_sort_level_next(slc);
484 #if defined(SORT_THREADS)
487 * Multi-threaded sort cycle
490 run_sort_cycle_mt(void)
492 struct sort_level *slc;
497 if (have_sort_left()) {
503 run_sort_level_next(slc);
508 * Sort cycle thread (in multi-threaded mode)
511 sort_thread(void* arg)
521 #endif /* defined(SORT_THREADS) */
524 run_top_sort_level(struct sort_level *sl)
526 struct sort_level *slc;
528 reverse_sort = sort_opts_vals.kflag ? keys[0].sm.rflag :
529 default_sort_mods->rflag;
531 sl->start_position = 0;
533 sl->sublevels = sort_malloc(slsz);
534 memset(sl->sublevels, 0, slsz);
536 for (size_t i = 0; i < sl->tosort_num; ++i)
539 if (sl->leaves_num > 1) {
541 if (sort_opts_vals.sflag) {
542 mergesort(sl->leaves, sl->leaves_num,
543 sizeof(struct sort_list_item *),
544 (int(*)(const void *, const void *)) list_coll);
546 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
547 sizeof(struct sort_list_item *),
548 (int(*)(const void *, const void *)) list_coll);
550 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
551 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
552 sizeof(struct sort_list_item *),
553 (int(*)(const void *, const void *)) list_coll_by_str_only);
558 memcpy(sl->tosort + sl->start_position, sl->leaves,
559 sl->leaves_num * sizeof(struct sort_list_item*));
560 sl->start_position += sl->leaves_num;
561 sort_left_dec(sl->leaves_num);
563 for (size_t i = 0; i < sl->sln; ++i) {
564 slc = sl->sublevels[i];
567 slc->sorted = sl->tosort;
568 slc->start_position = sl->start_position;
569 sl->start_position += slc->tosort_num;
571 sl->sublevels[i] = NULL;
578 for (size_t i = 0; i < sl->sln; ++i) {
581 slc = sl->sublevels[n];
584 slc->sorted = sl->tosort;
585 slc->start_position = sl->start_position;
586 sl->start_position += slc->tosort_num;
588 sl->sublevels[n] = NULL;
592 memcpy(sl->tosort + sl->start_position, sl->leaves,
593 sl->leaves_num * sizeof(struct sort_list_item*));
595 sort_left_dec(sl->leaves_num);
598 #if defined(SORT_THREADS)
602 #if defined(SORT_THREADS)
606 for(i = 0; i < nthreads; ++i) {
610 pthread_attr_init(&attr);
611 pthread_attr_setdetachstate(&attr,
615 int res = pthread_create(&pth, &attr,
619 if (errno == EAGAIN) {
626 pthread_attr_destroy(&attr);
629 for(i = 0; i < nthreads; ++i)
632 #endif /* defined(SORT_THREADS) */
636 run_sort(struct sort_list_item **base, size_t nmemb)
638 struct sort_level *sl;
640 #if defined(SORT_THREADS)
641 size_t nthreads_save = nthreads;
642 if (nmemb < MT_SORT_THRESHOLD)
646 pthread_mutexattr_t mattr;
648 pthread_mutexattr_init(&mattr);
649 pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ADAPTIVE_NP);
651 pthread_mutex_init(&g_ls_mutex, &mattr);
652 pthread_mutex_init(&sort_left_mutex, &mattr);
654 pthread_mutexattr_destroy(&mattr);
656 sem_init(&mtsem, 0, 0);
661 sl = sort_malloc(sizeof(struct sort_level));
662 memset(sl, 0, sizeof(struct sort_level));
665 sl->tosort_num = nmemb;
666 sl->tosort_sz = nmemb;
668 #if defined(SORT_THREADS)
672 run_top_sort_level(sl);
676 #if defined(SORT_THREADS)
679 pthread_mutex_destroy(&g_ls_mutex);
680 pthread_mutex_destroy(&sort_left_mutex);
682 nthreads = nthreads_save;
687 rxsort(struct sort_list_item **base, size_t nmemb)
690 run_sort(base, nmemb);