From 44ebca5044832ac4e5a24a7ddbfa2829227d0061 Mon Sep 17 00:00:00 2001 From: Sascha Wildner Date: Sat, 20 Oct 2012 10:39:26 +0200 Subject: [PATCH 01/16] newfs(8): Remove an unused variable. Found-by: gcc47 --- sbin/newfs/mkfs.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/sbin/newfs/mkfs.c b/sbin/newfs/mkfs.c index e07fd5be39..70e5057cfc 100644 --- a/sbin/newfs/mkfs.c +++ b/sbin/newfs/mkfs.c @@ -32,7 +32,6 @@ * * @(#)mkfs.c 8.11 (Berkeley) 5/3/95 * $FreeBSD: src/sbin/newfs/mkfs.c,v 1.29.2.6 2001/09/21 19:15:21 dillon Exp $ - * $DragonFly: src/sbin/newfs/mkfs.c,v 1.14 2007/05/20 19:29:21 dillon Exp $ */ #include "defs.h" @@ -1140,12 +1139,10 @@ iput(struct ufs1_dinode *ip, ino_t ino) { struct ufs1_dinode inobuf[MAXINOPB]; daddr_t d; - int __unused c; #ifdef FSIRAND ip->di_gen = random(); #endif - c = ino_to_cg(&sblock, ino); rdfs(fsbtodb(&sblock, cgtod(&sblock, 0)), sblock.fs_cgsize, (char *)&acg); if (acg.cg_magic != CG_MAGIC) { -- 2.41.0 From 6d2121bf2c0a60c3e5363bdeb914dc5961111b46 Mon Sep 17 00:00:00 2001 From: Sascha Wildner Date: Sat, 20 Oct 2012 10:49:14 +0200 Subject: [PATCH 02/16] udevd(8): Reduce the scope of a yet unused variable. --- sbin/udevd/udevd_monitor.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sbin/udevd/udevd_monitor.c b/sbin/udevd/udevd_monitor.c index f0b6421753..286a668b76 100644 --- a/sbin/udevd/udevd_monitor.c +++ b/sbin/udevd/udevd_monitor.c @@ -110,7 +110,6 @@ struct udev_monitor * udev_monitor_init(struct client_info *cli, prop_array_t filters) { struct udev_monitor *udm; - int __unused error; udm = malloc(sizeof(struct udev_monitor)); if (udm == NULL) @@ -124,7 +123,7 @@ udev_monitor_init(struct client_info *cli, prop_array_t filters) udm->cli = cli; if (filters != NULL) { - error = _parse_filter_prop(udm, filters); + __unused int error = _parse_filter_prop(udm, filters); /* XXX: ignore error for now */ } -- 2.41.0 From 629ff9f7b980ce64c3aea92486ca4bb1a64f1bf3 Mon Sep 17 00:00:00 2001 From: John Marino Date: Sat, 20 Oct 2012 08:35:34 +0200 Subject: [PATCH 03/16] Add libgomp source files to gcc 4.7 vendor branch --- contrib/gcc-4.7/libgomp/alloc.c | 58 ++ contrib/gcc-4.7/libgomp/barrier.c | 41 + contrib/gcc-4.7/libgomp/config.h.in | 135 ++++ contrib/gcc-4.7/libgomp/config/bsd/proc.c | 114 +++ .../gcc-4.7/libgomp/config/posix/affinity.c | 38 + contrib/gcc-4.7/libgomp/config/posix/bar.c | 178 ++++ contrib/gcc-4.7/libgomp/config/posix/bar.h | 115 +++ contrib/gcc-4.7/libgomp/config/posix/lock.c | 307 +++++++ contrib/gcc-4.7/libgomp/config/posix/mutex.c | 1 + contrib/gcc-4.7/libgomp/config/posix/mutex.h | 57 ++ .../gcc-4.7/libgomp/config/posix/omp-lock.h | 23 + contrib/gcc-4.7/libgomp/config/posix/proc.c | 101 +++ .../gcc-4.7/libgomp/config/posix/ptrlock.c | 1 + .../gcc-4.7/libgomp/config/posix/ptrlock.h | 66 ++ contrib/gcc-4.7/libgomp/config/posix/sem.c | 123 +++ contrib/gcc-4.7/libgomp/config/posix/sem.h | 87 ++ contrib/gcc-4.7/libgomp/config/posix/time.c | 78 ++ contrib/gcc-4.7/libgomp/critical.c | 148 ++++ contrib/gcc-4.7/libgomp/env.c | 758 ++++++++++++++++++ contrib/gcc-4.7/libgomp/error.c | 66 ++ contrib/gcc-4.7/libgomp/fortran.c | 437 ++++++++++ contrib/gcc-4.7/libgomp/iter.c | 337 ++++++++ contrib/gcc-4.7/libgomp/iter_ull.c | 344 ++++++++ contrib/gcc-4.7/libgomp/libgomp.h | 591 ++++++++++++++ contrib/gcc-4.7/libgomp/libgomp.map | 186 +++++ contrib/gcc-4.7/libgomp/libgomp.spec.in | 3 + contrib/gcc-4.7/libgomp/libgomp_f.h.in | 93 +++ contrib/gcc-4.7/libgomp/libgomp_g.h | 183 +++++ contrib/gcc-4.7/libgomp/loop.c | 620 ++++++++++++++ contrib/gcc-4.7/libgomp/loop_ull.c | 571 +++++++++++++ contrib/gcc-4.7/libgomp/omp.h.in | 107 +++ contrib/gcc-4.7/libgomp/omp_lib.f90.in | 299 +++++++ contrib/gcc-4.7/libgomp/omp_lib.h.in | 70 ++ contrib/gcc-4.7/libgomp/ordered.c | 251 ++++++ contrib/gcc-4.7/libgomp/parallel.c | 202 +++++ contrib/gcc-4.7/libgomp/sections.c | 159 ++++ contrib/gcc-4.7/libgomp/single.c | 104 +++ contrib/gcc-4.7/libgomp/task.c | 387 +++++++++ contrib/gcc-4.7/libgomp/team.c | 564 +++++++++++++ contrib/gcc-4.7/libgomp/work.c | 264 ++++++ 40 files changed, 8267 insertions(+) create mode 100644 contrib/gcc-4.7/libgomp/alloc.c create mode 100644 contrib/gcc-4.7/libgomp/barrier.c create mode 100644 contrib/gcc-4.7/libgomp/config.h.in create mode 100644 contrib/gcc-4.7/libgomp/config/bsd/proc.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/affinity.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/bar.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/bar.h create mode 100644 contrib/gcc-4.7/libgomp/config/posix/lock.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/mutex.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/mutex.h create mode 100644 contrib/gcc-4.7/libgomp/config/posix/omp-lock.h create mode 100644 contrib/gcc-4.7/libgomp/config/posix/proc.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/ptrlock.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/ptrlock.h create mode 100644 contrib/gcc-4.7/libgomp/config/posix/sem.c create mode 100644 contrib/gcc-4.7/libgomp/config/posix/sem.h create mode 100644 contrib/gcc-4.7/libgomp/config/posix/time.c create mode 100644 contrib/gcc-4.7/libgomp/critical.c create mode 100644 contrib/gcc-4.7/libgomp/env.c create mode 100644 contrib/gcc-4.7/libgomp/error.c create mode 100644 contrib/gcc-4.7/libgomp/fortran.c create mode 100644 contrib/gcc-4.7/libgomp/iter.c create mode 100644 contrib/gcc-4.7/libgomp/iter_ull.c create mode 100644 contrib/gcc-4.7/libgomp/libgomp.h create mode 100644 contrib/gcc-4.7/libgomp/libgomp.map create mode 100644 contrib/gcc-4.7/libgomp/libgomp.spec.in create mode 100644 contrib/gcc-4.7/libgomp/libgomp_f.h.in create mode 100644 contrib/gcc-4.7/libgomp/libgomp_g.h create mode 100644 contrib/gcc-4.7/libgomp/loop.c create mode 100644 contrib/gcc-4.7/libgomp/loop_ull.c create mode 100644 contrib/gcc-4.7/libgomp/omp.h.in create mode 100644 contrib/gcc-4.7/libgomp/omp_lib.f90.in create mode 100644 contrib/gcc-4.7/libgomp/omp_lib.h.in create mode 100644 contrib/gcc-4.7/libgomp/ordered.c create mode 100644 contrib/gcc-4.7/libgomp/parallel.c create mode 100644 contrib/gcc-4.7/libgomp/sections.c create mode 100644 contrib/gcc-4.7/libgomp/single.c create mode 100644 contrib/gcc-4.7/libgomp/task.c create mode 100644 contrib/gcc-4.7/libgomp/team.c create mode 100644 contrib/gcc-4.7/libgomp/work.c diff --git a/contrib/gcc-4.7/libgomp/alloc.c b/contrib/gcc-4.7/libgomp/alloc.c new file mode 100644 index 0000000000..3a0deb0323 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/alloc.c @@ -0,0 +1,58 @@ +/* Copyright (C) 2005, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains wrappers for the system allocation routines. Most + places in the OpenMP API do not make any provision for failure, so in + general we cannot allow memory allocation to fail. */ + +#include "libgomp.h" +#include + + +void * +gomp_malloc (size_t size) +{ + void *ret = malloc (size); + if (ret == NULL) + gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); + return ret; +} + +void * +gomp_malloc_cleared (size_t size) +{ + void *ret = calloc (1, size); + if (ret == NULL) + gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); + return ret; +} + +void * +gomp_realloc (void *old, size_t size) +{ + void *ret = realloc (old, size); + if (ret == NULL) + gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); + return ret; +} diff --git a/contrib/gcc-4.7/libgomp/barrier.c b/contrib/gcc-4.7/libgomp/barrier.c new file mode 100644 index 0000000000..8ed2298002 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/barrier.c @@ -0,0 +1,41 @@ +/* Copyright (C) 2005, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the BARRIER construct. */ + +#include "libgomp.h" + + +void +GOMP_barrier (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + /* It is legal to have orphaned barriers. */ + if (team == NULL) + return; + + gomp_team_barrier_wait (&team->barrier); +} diff --git a/contrib/gcc-4.7/libgomp/config.h.in b/contrib/gcc-4.7/libgomp/config.h.in new file mode 100644 index 0000000000..14c7e2a9a1 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config.h.in @@ -0,0 +1,135 @@ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if the target assembler supports .symver directive. */ +#undef HAVE_AS_SYMVER_DIRECTIVE + +/* Define to 1 if the target supports __attribute__((alias(...))). */ +#undef HAVE_ATTRIBUTE_ALIAS + +/* Define to 1 if the target supports __attribute__((dllexport)). */ +#undef HAVE_ATTRIBUTE_DLLEXPORT + +/* Define to 1 if the target supports __attribute__((visibility(...))). */ +#undef HAVE_ATTRIBUTE_VISIBILITY + +/* Define if the POSIX Semaphores do not work on your system. */ +#undef HAVE_BROKEN_POSIX_SEMAPHORES + +/* Define to 1 if the target assembler supports thread-local storage. */ +#undef HAVE_CC_TLS + +/* Define to 1 if you have the `clock_gettime' function. */ +#undef HAVE_CLOCK_GETTIME + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the `getloadavg' function. */ +#undef HAVE_GETLOADAVG + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define if pthread_{,attr_}{g,s}etaffinity_np is supported. */ +#undef HAVE_PTHREAD_AFFINITY_NP + +/* Define to 1 if you have the header file. */ +#undef HAVE_SEMAPHORE_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the `strtoull' function. */ +#undef HAVE_STRTOULL + +/* Define to 1 if the target runtime linker supports binding the same symbol + to different versions. */ +#undef HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT + +/* Define to 1 if the target supports __sync_*_compare_and_swap */ +#undef HAVE_SYNC_BUILTINS + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_LOADAVG_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TIME_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if the target supports thread-local storage. */ +#undef HAVE_TLS + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if GNU symbol versioning is used for libgomp. */ +#undef LIBGOMP_GNU_SYMBOL_VERSIONING + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LT_OBJDIR + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* The size of `char', as computed by sizeof. */ +#undef SIZEOF_CHAR + +/* The size of `int', as computed by sizeof. */ +#undef SIZEOF_INT + +/* The size of `long', as computed by sizeof. */ +#undef SIZEOF_LONG + +/* The size of `short', as computed by sizeof. */ +#undef SIZEOF_SHORT + +/* The size of `void *', as computed by sizeof. */ +#undef SIZEOF_VOID_P + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Define if you can safely include both and . */ +#undef STRING_WITH_STRINGS + +/* Define to 1 if you can safely include both and . */ +#undef TIME_WITH_SYS_TIME + +/* Version number of package */ +#undef VERSION diff --git a/contrib/gcc-4.7/libgomp/config/bsd/proc.c b/contrib/gcc-4.7/libgomp/config/bsd/proc.c new file mode 100644 index 0000000000..ec16f2c891 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/bsd/proc.c @@ -0,0 +1,114 @@ +/* Copyright (C) 2005, 2006, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains system specific routines related to counting + online processors and dynamic load balancing. It is expected that + a system may well want to write special versions of each of these. + + The following implementation uses a mix of POSIX and BSD routines. */ + +#include "libgomp.h" +#include +#include +#ifdef HAVE_GETLOADAVG +# ifdef HAVE_SYS_LOADAVG_H +# include +# endif +#endif +#ifdef HAVE_SYS_SYSCTL_H +# include +#endif + +static int +get_num_procs (void) +{ +#ifdef _SC_NPROCESSORS_ONLN + return sysconf (_SC_NPROCESSORS_ONLN); +#elif defined HW_NCPU + int ncpus = 1; + size_t len = sizeof(ncpus); + sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0); + return ncpus; +#else + return 0; +#endif +} + +/* At startup, determine the default number of threads. It would seem + this should be related to the number of cpus online. */ + +void +gomp_init_num_threads (void) +{ + int ncpus = get_num_procs (); + + if (ncpus > 0) + gomp_global_icv.nthreads_var = ncpus; +} + +/* When OMP_DYNAMIC is set, at thread launch determine the number of + threads we should spawn for this team. */ +/* ??? I have no idea what best practice for this is. Surely some + function of the number of processors that are *still* online and + the load average. Here I use the number of processors online + minus the 15 minute load average. */ + +unsigned +gomp_dynamic_max_threads (void) +{ + unsigned n_onln, loadavg; + unsigned nthreads_var = gomp_icv (false)->nthreads_var; + + n_onln = get_num_procs (); + if (!n_onln || n_onln > nthreads_var) + n_onln = nthreads_var; + + loadavg = 0; +#ifdef HAVE_GETLOADAVG + { + double dloadavg[3]; + if (getloadavg (dloadavg, 3) == 3) + { + /* Add 0.1 to get a kind of biased rounding. */ + loadavg = dloadavg[2] + 0.1; + } + } +#endif + + if (loadavg >= n_onln) + return 1; + else + return n_onln - loadavg; +} + +int +omp_get_num_procs (void) +{ + int ncpus = get_num_procs (); + if (ncpus <= 0) + ncpus = gomp_icv (false)->nthreads_var; + return ncpus; +} + +ialias (omp_get_num_procs) diff --git a/contrib/gcc-4.7/libgomp/config/posix/affinity.c b/contrib/gcc-4.7/libgomp/config/posix/affinity.c new file mode 100644 index 0000000000..25865fcb5d --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/affinity.c @@ -0,0 +1,38 @@ +/* Copyright (C) 2006, 2009 Free Software Foundation, Inc. + Contributed by Jakub Jelinek . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is a generic stub implementation of a CPU affinity setting. */ + +#include "libgomp.h" + +void +gomp_init_affinity (void) +{ +} + +void +gomp_init_thread_affinity (pthread_attr_t *attr) +{ + (void) attr; +} diff --git a/contrib/gcc-4.7/libgomp/config/posix/bar.c b/contrib/gcc-4.7/libgomp/config/posix/bar.c new file mode 100644 index 0000000000..0101d1f25c --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/bar.c @@ -0,0 +1,178 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is the default implementation of a barrier synchronization mechanism + for libgomp. This type is private to the library. Note that we rely on + being able to adjust the barrier count while threads are blocked, so the + POSIX pthread_barrier_t won't work. */ + +#include "libgomp.h" + + +void +gomp_barrier_init (gomp_barrier_t *bar, unsigned count) +{ + gomp_mutex_init (&bar->mutex1); +#ifndef HAVE_SYNC_BUILTINS + gomp_mutex_init (&bar->mutex2); +#endif + gomp_sem_init (&bar->sem1, 0); + gomp_sem_init (&bar->sem2, 0); + bar->total = count; + bar->arrived = 0; + bar->generation = 0; +} + +void +gomp_barrier_destroy (gomp_barrier_t *bar) +{ + /* Before destroying, make sure all threads have left the barrier. */ + gomp_mutex_lock (&bar->mutex1); + gomp_mutex_unlock (&bar->mutex1); + + gomp_mutex_destroy (&bar->mutex1); +#ifndef HAVE_SYNC_BUILTINS + gomp_mutex_destroy (&bar->mutex2); +#endif + gomp_sem_destroy (&bar->sem1); + gomp_sem_destroy (&bar->sem2); +} + +void +gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count) +{ + gomp_mutex_lock (&bar->mutex1); + bar->total = count; + gomp_mutex_unlock (&bar->mutex1); +} + +void +gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) +{ + unsigned int n; + + if (state & 1) + { + n = --bar->arrived; + if (n > 0) + { + do + gomp_sem_post (&bar->sem1); + while (--n != 0); + gomp_sem_wait (&bar->sem2); + } + gomp_mutex_unlock (&bar->mutex1); + } + else + { + gomp_mutex_unlock (&bar->mutex1); + gomp_sem_wait (&bar->sem1); + +#ifdef HAVE_SYNC_BUILTINS + n = __sync_add_and_fetch (&bar->arrived, -1); +#else + gomp_mutex_lock (&bar->mutex2); + n = --bar->arrived; + gomp_mutex_unlock (&bar->mutex2); +#endif + + if (n == 0) + gomp_sem_post (&bar->sem2); + } +} + +void +gomp_barrier_wait (gomp_barrier_t *barrier) +{ + gomp_barrier_wait_end (barrier, gomp_barrier_wait_start (barrier)); +} + +void +gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) +{ + unsigned int n; + + if (state & 1) + { + n = --bar->arrived; + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + if (team->task_count) + { + gomp_barrier_handle_tasks (state); + if (n > 0) + gomp_sem_wait (&bar->sem2); + gomp_mutex_unlock (&bar->mutex1); + return; + } + + bar->generation = state + 3; + if (n > 0) + { + do + gomp_sem_post (&bar->sem1); + while (--n != 0); + gomp_sem_wait (&bar->sem2); + } + gomp_mutex_unlock (&bar->mutex1); + } + else + { + gomp_mutex_unlock (&bar->mutex1); + do + { + gomp_sem_wait (&bar->sem1); + if (bar->generation & 1) + gomp_barrier_handle_tasks (state); + } + while (bar->generation != state + 4); + +#ifdef HAVE_SYNC_BUILTINS + n = __sync_add_and_fetch (&bar->arrived, -1); +#else + gomp_mutex_lock (&bar->mutex2); + n = --bar->arrived; + gomp_mutex_unlock (&bar->mutex2); +#endif + + if (n == 0) + gomp_sem_post (&bar->sem2); + } +} + +void +gomp_team_barrier_wait (gomp_barrier_t *barrier) +{ + gomp_team_barrier_wait_end (barrier, gomp_barrier_wait_start (barrier)); +} + +void +gomp_team_barrier_wake (gomp_barrier_t *bar, int count) +{ + if (count == 0) + count = bar->total - 1; + while (count-- > 0) + gomp_sem_post (&bar->sem1); +} diff --git a/contrib/gcc-4.7/libgomp/config/posix/bar.h b/contrib/gcc-4.7/libgomp/config/posix/bar.h new file mode 100644 index 0000000000..ac8ae6f85b --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/bar.h @@ -0,0 +1,115 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is the default implementation of a barrier synchronization mechanism + for libgomp. This type is private to the library. Note that we rely on + being able to adjust the barrier count while threads are blocked, so the + POSIX pthread_barrier_t won't work. */ + +#ifndef GOMP_BARRIER_H +#define GOMP_BARRIER_H 1 + +#include + +typedef struct +{ + gomp_mutex_t mutex1; +#ifndef HAVE_SYNC_BUILTINS + gomp_mutex_t mutex2; +#endif + gomp_sem_t sem1; + gomp_sem_t sem2; + unsigned total; + unsigned arrived; + unsigned generation; +} gomp_barrier_t; +typedef unsigned int gomp_barrier_state_t; + +extern void gomp_barrier_init (gomp_barrier_t *, unsigned); +extern void gomp_barrier_reinit (gomp_barrier_t *, unsigned); +extern void gomp_barrier_destroy (gomp_barrier_t *); + +extern void gomp_barrier_wait (gomp_barrier_t *); +extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); +extern void gomp_team_barrier_wait (gomp_barrier_t *); +extern void gomp_team_barrier_wait_end (gomp_barrier_t *, + gomp_barrier_state_t); +extern void gomp_team_barrier_wake (gomp_barrier_t *, int); + +static inline gomp_barrier_state_t +gomp_barrier_wait_start (gomp_barrier_t *bar) +{ + unsigned int ret; + gomp_mutex_lock (&bar->mutex1); + ret = bar->generation & ~3; + ret += ++bar->arrived == bar->total; + return ret; +} + +static inline bool +gomp_barrier_last_thread (gomp_barrier_state_t state) +{ + return state & 1; +} + +static inline void +gomp_barrier_wait_last (gomp_barrier_t *bar) +{ + gomp_barrier_wait (bar); +} + +/* All the inlines below must be called with team->task_lock + held. */ + +static inline void +gomp_team_barrier_set_task_pending (gomp_barrier_t *bar) +{ + bar->generation |= 1; +} + +static inline void +gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar) +{ + bar->generation &= ~1; +} + +static inline void +gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar) +{ + bar->generation |= 2; +} + +static inline bool +gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar) +{ + return (bar->generation & 2) != 0; +} + +static inline void +gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state) +{ + bar->generation = (state & ~3) + 4; +} + +#endif /* GOMP_BARRIER_H */ diff --git a/contrib/gcc-4.7/libgomp/config/posix/lock.c b/contrib/gcc-4.7/libgomp/config/posix/lock.c new file mode 100644 index 0000000000..8cd715e645 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/lock.c @@ -0,0 +1,307 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is the default PTHREADS implementation of the public OpenMP + locking primitives. + + Because OpenMP uses different entry points for normal and recursive + locks, and pthreads uses only one entry point, a system may be able + to do better and streamline the locking as well as reduce the size + of the types exported. */ + +/* We need Unix98 extensions to get recursive locks. On Tru64 UNIX V4.0F, + the declarations are available without _XOPEN_SOURCE, which actually + breaks compilation. */ +#ifndef __osf__ +#define _XOPEN_SOURCE 500 +#endif + +#include "libgomp.h" + +#ifdef HAVE_BROKEN_POSIX_SEMAPHORES +void +gomp_init_lock_30 (omp_lock_t *lock) +{ + pthread_mutex_init (lock, NULL); +} + +void +gomp_destroy_lock_30 (omp_lock_t *lock) +{ + pthread_mutex_destroy (lock); +} + +void +gomp_set_lock_30 (omp_lock_t *lock) +{ + pthread_mutex_lock (lock); +} + +void +gomp_unset_lock_30 (omp_lock_t *lock) +{ + pthread_mutex_unlock (lock); +} + +int +gomp_test_lock_30 (omp_lock_t *lock) +{ + return pthread_mutex_trylock (lock) == 0; +} + +void +gomp_init_nest_lock_30 (omp_nest_lock_t *lock) +{ + pthread_mutex_init (&lock->lock, NULL); + lock->count = 0; + lock->owner = NULL; +} + +void +gomp_destroy_nest_lock_30 (omp_nest_lock_t *lock) +{ + pthread_mutex_destroy (&lock->lock); +} + +void +gomp_set_nest_lock_30 (omp_nest_lock_t *lock) +{ + void *me = gomp_icv (true); + + if (lock->owner != me) + { + pthread_mutex_lock (&lock->lock); + lock->owner = me; + } + lock->count++; +} + +void +gomp_unset_nest_lock_30 (omp_nest_lock_t *lock) +{ + if (--lock->count == 0) + { + lock->owner = NULL; + pthread_mutex_unlock (&lock->lock); + } +} + +int +gomp_test_nest_lock_30 (omp_nest_lock_t *lock) +{ + void *me = gomp_icv (true); + + if (lock->owner != me) + { + if (pthread_mutex_trylock (&lock->lock) != 0) + return 0; + lock->owner = me; + } + + return ++lock->count; +} + +#else + +void +gomp_init_lock_30 (omp_lock_t *lock) +{ + sem_init (lock, 0, 1); +} + +void +gomp_destroy_lock_30 (omp_lock_t *lock) +{ + sem_destroy (lock); +} + +void +gomp_set_lock_30 (omp_lock_t *lock) +{ + while (sem_wait (lock) != 0) + ; +} + +void +gomp_unset_lock_30 (omp_lock_t *lock) +{ + sem_post (lock); +} + +int +gomp_test_lock_30 (omp_lock_t *lock) +{ + return sem_trywait (lock) == 0; +} + +void +gomp_init_nest_lock_30 (omp_nest_lock_t *lock) +{ + sem_init (&lock->lock, 0, 1); + lock->count = 0; + lock->owner = NULL; +} + +void +gomp_destroy_nest_lock_30 (omp_nest_lock_t *lock) +{ + sem_destroy (&lock->lock); +} + +void +gomp_set_nest_lock_30 (omp_nest_lock_t *lock) +{ + void *me = gomp_icv (true); + + if (lock->owner != me) + { + while (sem_wait (&lock->lock) != 0) + ; + lock->owner = me; + } + lock->count++; +} + +void +gomp_unset_nest_lock_30 (omp_nest_lock_t *lock) +{ + if (--lock->count == 0) + { + lock->owner = NULL; + sem_post (&lock->lock); + } +} + +int +gomp_test_nest_lock_30 (omp_nest_lock_t *lock) +{ + void *me = gomp_icv (true); + + if (lock->owner != me) + { + if (sem_trywait (&lock->lock) != 0) + return 0; + lock->owner = me; + } + + return ++lock->count; +} +#endif + +#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING +void +gomp_init_lock_25 (omp_lock_25_t *lock) +{ + pthread_mutex_init (lock, NULL); +} + +void +gomp_destroy_lock_25 (omp_lock_25_t *lock) +{ + pthread_mutex_destroy (lock); +} + +void +gomp_set_lock_25 (omp_lock_25_t *lock) +{ + pthread_mutex_lock (lock); +} + +void +gomp_unset_lock_25 (omp_lock_25_t *lock) +{ + pthread_mutex_unlock (lock); +} + +int +gomp_test_lock_25 (omp_lock_25_t *lock) +{ + return pthread_mutex_trylock (lock) == 0; +} + +void +gomp_init_nest_lock_25 (omp_nest_lock_25_t *lock) +{ + pthread_mutexattr_t attr; + + pthread_mutexattr_init (&attr); + pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init (&lock->lock, &attr); + lock->count = 0; + pthread_mutexattr_destroy (&attr); +} + +void +gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *lock) +{ + pthread_mutex_destroy (&lock->lock); +} + +void +gomp_set_nest_lock_25 (omp_nest_lock_25_t *lock) +{ + pthread_mutex_lock (&lock->lock); + lock->count++; +} + +void +gomp_unset_nest_lock_25 (omp_nest_lock_25_t *lock) +{ + lock->count--; + pthread_mutex_unlock (&lock->lock); +} + +int +gomp_test_nest_lock_25 (omp_nest_lock_25_t *lock) +{ + if (pthread_mutex_trylock (&lock->lock) == 0) + return ++lock->count; + return 0; +} + +omp_lock_symver (omp_init_lock) +omp_lock_symver (omp_destroy_lock) +omp_lock_symver (omp_set_lock) +omp_lock_symver (omp_unset_lock) +omp_lock_symver (omp_test_lock) +omp_lock_symver (omp_init_nest_lock) +omp_lock_symver (omp_destroy_nest_lock) +omp_lock_symver (omp_set_nest_lock) +omp_lock_symver (omp_unset_nest_lock) +omp_lock_symver (omp_test_nest_lock) + +#else + +ialias (omp_init_lock) +ialias (omp_init_nest_lock) +ialias (omp_destroy_lock) +ialias (omp_destroy_nest_lock) +ialias (omp_set_lock) +ialias (omp_set_nest_lock) +ialias (omp_unset_lock) +ialias (omp_unset_nest_lock) +ialias (omp_test_lock) +ialias (omp_test_nest_lock) + +#endif diff --git a/contrib/gcc-4.7/libgomp/config/posix/mutex.c b/contrib/gcc-4.7/libgomp/config/posix/mutex.c new file mode 100644 index 0000000000..39bb64da0f --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/mutex.c @@ -0,0 +1 @@ +/* Everything is in the header. */ diff --git a/contrib/gcc-4.7/libgomp/config/posix/mutex.h b/contrib/gcc-4.7/libgomp/config/posix/mutex.h new file mode 100644 index 0000000000..b6617a4bd0 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/mutex.h @@ -0,0 +1,57 @@ +/* Copyright (C) 2005, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is the default PTHREADS implementation of a mutex synchronization + mechanism for libgomp. This type is private to the library. */ + +#ifndef GOMP_MUTEX_H +#define GOMP_MUTEX_H 1 + +#include + +typedef pthread_mutex_t gomp_mutex_t; + +#define GOMP_MUTEX_INIT_0 0 + +static inline void gomp_mutex_init (gomp_mutex_t *mutex) +{ + pthread_mutex_init (mutex, NULL); +} + +static inline void gomp_mutex_lock (gomp_mutex_t *mutex) +{ + pthread_mutex_lock (mutex); +} + +static inline void gomp_mutex_unlock (gomp_mutex_t *mutex) +{ + pthread_mutex_unlock (mutex); +} + +static inline void gomp_mutex_destroy (gomp_mutex_t *mutex) +{ + pthread_mutex_destroy (mutex); +} + +#endif /* GOMP_MUTEX_H */ diff --git a/contrib/gcc-4.7/libgomp/config/posix/omp-lock.h b/contrib/gcc-4.7/libgomp/config/posix/omp-lock.h new file mode 100644 index 0000000000..e51dc271f8 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/omp-lock.h @@ -0,0 +1,23 @@ +/* This header is used during the build process to find the size and + alignment of the public OpenMP locks, so that we can export data + structures without polluting the namespace. + + In this default POSIX implementation, we used to map the two locks to the + same PTHREADS primitive, but for OpenMP 3.0 sem_t needs to be used + instead, as pthread_mutex_unlock should not be called by different + thread than the one that called pthread_mutex_lock. */ + +#include +#include + +typedef pthread_mutex_t omp_lock_25_t; +typedef struct { pthread_mutex_t lock; int count; } omp_nest_lock_25_t; +#ifdef HAVE_BROKEN_POSIX_SEMAPHORES +/* If we don't have working semaphores, we'll make all explicit tasks + tied to the creating thread. */ +typedef pthread_mutex_t omp_lock_t; +typedef struct { pthread_mutex_t lock; int count; void *owner; } omp_nest_lock_t; +#else +typedef sem_t omp_lock_t; +typedef struct { sem_t lock; int count; void *owner; } omp_nest_lock_t; +#endif diff --git a/contrib/gcc-4.7/libgomp/config/posix/proc.c b/contrib/gcc-4.7/libgomp/config/posix/proc.c new file mode 100644 index 0000000000..aacf41e52a --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/proc.c @@ -0,0 +1,101 @@ +/* Copyright (C) 2005, 2006, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains system specific routines related to counting + online processors and dynamic load balancing. It is expected that + a system may well want to write special versions of each of these. + + The following implementation uses a mix of POSIX and BSD routines. */ + +#include "libgomp.h" +#include +#include +#ifdef HAVE_GETLOADAVG +# ifdef HAVE_SYS_LOADAVG_H +# include +# endif +#endif + + +/* At startup, determine the default number of threads. It would seem + this should be related to the number of cpus online. */ + +void +gomp_init_num_threads (void) +{ +#ifdef _SC_NPROCESSORS_ONLN + gomp_global_icv.nthreads_var = sysconf (_SC_NPROCESSORS_ONLN); +#endif +} + +/* When OMP_DYNAMIC is set, at thread launch determine the number of + threads we should spawn for this team. */ +/* ??? I have no idea what best practice for this is. Surely some + function of the number of processors that are *still* online and + the load average. Here I use the number of processors online + minus the 15 minute load average. */ + +unsigned +gomp_dynamic_max_threads (void) +{ + unsigned n_onln, loadavg; + unsigned nthreads_var = gomp_icv (false)->nthreads_var; + +#ifdef _SC_NPROCESSORS_ONLN + n_onln = sysconf (_SC_NPROCESSORS_ONLN); + if (n_onln > nthreads_var) + n_onln = nthreads_var; +#else + n_onln = nthreads_var; +#endif + + loadavg = 0; +#ifdef HAVE_GETLOADAVG + { + double dloadavg[3]; + if (getloadavg (dloadavg, 3) == 3) + { + /* Add 0.1 to get a kind of biased rounding. */ + loadavg = dloadavg[2] + 0.1; + } + } +#endif + + if (loadavg >= n_onln) + return 1; + else + return n_onln - loadavg; +} + +int +omp_get_num_procs (void) +{ +#ifdef _SC_NPROCESSORS_ONLN + return sysconf (_SC_NPROCESSORS_ONLN); +#else + return gomp_icv (false)->nthreads_var; +#endif +} + +ialias (omp_get_num_procs) diff --git a/contrib/gcc-4.7/libgomp/config/posix/ptrlock.c b/contrib/gcc-4.7/libgomp/config/posix/ptrlock.c new file mode 100644 index 0000000000..39bb64da0f --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/ptrlock.c @@ -0,0 +1 @@ +/* Everything is in the header. */ diff --git a/contrib/gcc-4.7/libgomp/config/posix/ptrlock.h b/contrib/gcc-4.7/libgomp/config/posix/ptrlock.h new file mode 100644 index 0000000000..246e1caacd --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/ptrlock.h @@ -0,0 +1,66 @@ +/* Copyright (C) 2008, 2009 Free Software Foundation, Inc. + Contributed by Jakub Jelinek . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is a Linux specific implementation of a mutex synchronization + mechanism for libgomp. This type is private to the library. This + implementation uses atomic instructions and the futex syscall. */ + +#ifndef GOMP_PTRLOCK_H +#define GOMP_PTRLOCK_H 1 + +typedef struct { void *ptr; gomp_mutex_t lock; } gomp_ptrlock_t; + +static inline void gomp_ptrlock_init (gomp_ptrlock_t *ptrlock, void *ptr) +{ + ptrlock->ptr = ptr; + gomp_mutex_init (&ptrlock->lock); +} + +static inline void *gomp_ptrlock_get (gomp_ptrlock_t *ptrlock) +{ + if (ptrlock->ptr != NULL) + return ptrlock->ptr; + + gomp_mutex_lock (&ptrlock->lock); + if (ptrlock->ptr != NULL) + { + gomp_mutex_unlock (&ptrlock->lock); + return ptrlock->ptr; + } + + return NULL; +} + +static inline void gomp_ptrlock_set (gomp_ptrlock_t *ptrlock, void *ptr) +{ + ptrlock->ptr = ptr; + gomp_mutex_unlock (&ptrlock->lock); +} + +static inline void gomp_ptrlock_destroy (gomp_ptrlock_t *ptrlock) +{ + gomp_mutex_destroy (&ptrlock->lock); +} + +#endif /* GOMP_PTRLOCK_H */ diff --git a/contrib/gcc-4.7/libgomp/config/posix/sem.c b/contrib/gcc-4.7/libgomp/config/posix/sem.c new file mode 100644 index 0000000000..e057675134 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/sem.c @@ -0,0 +1,123 @@ +/* Copyright (C) 2005, 2006, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is the default POSIX 1003.1b implementation of a semaphore + synchronization mechanism for libgomp. This type is private to + the library. + + This is a bit heavy weight for what we need, in that we're not + interested in sem_wait as a cancelation point, but it's not too + bad for a default. */ + +#include "libgomp.h" + +#ifdef HAVE_BROKEN_POSIX_SEMAPHORES +#include + +void gomp_sem_init (gomp_sem_t *sem, int value) +{ + int ret; + + ret = pthread_mutex_init (&sem->mutex, NULL); + if (ret) + return; + + ret = pthread_cond_init (&sem->cond, NULL); + if (ret) + return; + + sem->value = value; +} + +void gomp_sem_wait (gomp_sem_t *sem) +{ + int ret; + + ret = pthread_mutex_lock (&sem->mutex); + if (ret) + return; + + if (sem->value > 0) + { + sem->value--; + ret = pthread_mutex_unlock (&sem->mutex); + return; + } + + while (sem->value <= 0) + { + ret = pthread_cond_wait (&sem->cond, &sem->mutex); + if (ret) + { + pthread_mutex_unlock (&sem->mutex); + return; + } + } + + sem->value--; + ret = pthread_mutex_unlock (&sem->mutex); + return; +} + +void gomp_sem_post (gomp_sem_t *sem) +{ + int ret; + + ret = pthread_mutex_lock (&sem->mutex); + if (ret) + return; + + sem->value++; + + ret = pthread_mutex_unlock (&sem->mutex); + if (ret) + return; + + ret = pthread_cond_signal (&sem->cond); + + return; +} + +void gomp_sem_destroy (gomp_sem_t *sem) +{ + int ret; + + ret = pthread_mutex_destroy (&sem->mutex); + if (ret) + return; + + ret = pthread_cond_destroy (&sem->cond); + + return; +} +#else /* HAVE_BROKEN_POSIX_SEMAPHORES */ +void +gomp_sem_wait (gomp_sem_t *sem) +{ + /* With POSIX, the wait can be canceled by signals. We don't want that. + It is expected that the return value here is -1 and errno is EINTR. */ + while (sem_wait (sem) != 0) + continue; +} +#endif diff --git a/contrib/gcc-4.7/libgomp/config/posix/sem.h b/contrib/gcc-4.7/libgomp/config/posix/sem.h new file mode 100644 index 0000000000..b68230de06 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/sem.h @@ -0,0 +1,87 @@ +/* Copyright (C) 2005, 2006, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is the default POSIX 1003.1b implementation of a semaphore + synchronization mechanism for libgomp. This type is private to + the library. + + This is a bit heavy weight for what we need, in that we're not + interested in sem_wait as a cancelation point, but it's not too + bad for a default. */ + +#ifndef GOMP_SEM_H +#define GOMP_SEM_H 1 + +#ifdef HAVE_ATTRIBUTE_VISIBILITY +# pragma GCC visibility push(default) +#endif + +#include + +#ifdef HAVE_ATTRIBUTE_VISIBILITY +# pragma GCC visibility pop +#endif + +#ifdef HAVE_BROKEN_POSIX_SEMAPHORES +#include + +struct gomp_sem +{ + pthread_mutex_t mutex; + pthread_cond_t cond; + int value; +}; + +typedef struct gomp_sem gomp_sem_t; + +extern void gomp_sem_init (gomp_sem_t *sem, int value); + +extern void gomp_sem_wait (gomp_sem_t *sem); + +extern void gomp_sem_post (gomp_sem_t *sem); + +extern void gomp_sem_destroy (gomp_sem_t *sem); + +#else /* HAVE_BROKEN_POSIX_SEMAPHORES */ + +typedef sem_t gomp_sem_t; + +static inline void gomp_sem_init (gomp_sem_t *sem, int value) +{ + sem_init (sem, 0, value); +} + +extern void gomp_sem_wait (gomp_sem_t *sem); + +static inline void gomp_sem_post (gomp_sem_t *sem) +{ + sem_post (sem); +} + +static inline void gomp_sem_destroy (gomp_sem_t *sem) +{ + sem_destroy (sem); +} +#endif /* doesn't HAVE_BROKEN_POSIX_SEMAPHORES */ +#endif /* GOMP_SEM_H */ diff --git a/contrib/gcc-4.7/libgomp/config/posix/time.c b/contrib/gcc-4.7/libgomp/config/posix/time.c new file mode 100644 index 0000000000..eb196f648e --- /dev/null +++ b/contrib/gcc-4.7/libgomp/config/posix/time.c @@ -0,0 +1,78 @@ +/* Copyright (C) 2005, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains system specific timer routines. It is expected that + a system may well want to write special versions of each of these. + + The following implementation uses the most simple POSIX routines. + If present, POSIX 4 clocks should be used instead. */ + +#include "libgomp.h" +#include +#if TIME_WITH_SYS_TIME +# include +# include +#else +# if HAVE_SYS_TIME_H +# include +# else +# include +# endif +#endif + + +double +omp_get_wtime (void) +{ +#ifdef HAVE_CLOCK_GETTIME + struct timespec ts; +# ifdef CLOCK_MONOTONIC + if (clock_gettime (CLOCK_MONOTONIC, &ts) < 0) +# endif + clock_gettime (CLOCK_REALTIME, &ts); + return ts.tv_sec + ts.tv_nsec / 1e9; +#else + struct timeval tv; + gettimeofday (&tv, NULL); + return tv.tv_sec + tv.tv_usec / 1e6; +#endif +} + +double +omp_get_wtick (void) +{ +#ifdef HAVE_CLOCK_GETTIME + struct timespec ts; +# ifdef CLOCK_MONOTONIC + if (clock_getres (CLOCK_MONOTONIC, &ts) < 0) +# endif + clock_getres (CLOCK_REALTIME, &ts); + return ts.tv_sec + ts.tv_nsec / 1e9; +#else + return 1.0 / sysconf(_SC_CLK_TCK); +#endif +} + +ialias (omp_get_wtime) +ialias (omp_get_wtick) diff --git a/contrib/gcc-4.7/libgomp/critical.c b/contrib/gcc-4.7/libgomp/critical.c new file mode 100644 index 0000000000..414c422162 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/critical.c @@ -0,0 +1,148 @@ +/* Copyright (C) 2005, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the CRITICAL construct. */ + +#include "libgomp.h" +#include + + +static gomp_mutex_t default_lock; + +void +GOMP_critical_start (void) +{ + /* There is an implicit flush on entry to a critical region. */ + __atomic_thread_fence (MEMMODEL_RELEASE); + gomp_mutex_lock (&default_lock); +} + +void +GOMP_critical_end (void) +{ + gomp_mutex_unlock (&default_lock); +} + +#ifndef HAVE_SYNC_BUILTINS +static gomp_mutex_t create_lock_lock; +#endif + +void +GOMP_critical_name_start (void **pptr) +{ + gomp_mutex_t *plock; + + /* If a mutex fits within the space for a pointer, and is zero initialized, + then use the pointer space directly. */ + if (GOMP_MUTEX_INIT_0 + && sizeof (gomp_mutex_t) <= sizeof (void *) + && __alignof (gomp_mutex_t) <= sizeof (void *)) + plock = (gomp_mutex_t *)pptr; + + /* Otherwise we have to be prepared to malloc storage. */ + else + { + plock = *pptr; + + if (plock == NULL) + { +#ifdef HAVE_SYNC_BUILTINS + gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t)); + gomp_mutex_init (nlock); + + plock = __sync_val_compare_and_swap (pptr, NULL, nlock); + if (plock != NULL) + { + gomp_mutex_destroy (nlock); + free (nlock); + } + else + plock = nlock; +#else + gomp_mutex_lock (&create_lock_lock); + plock = *pptr; + if (plock == NULL) + { + plock = gomp_malloc (sizeof (gomp_mutex_t)); + gomp_mutex_init (plock); + __sync_synchronize (); + *pptr = plock; + } + gomp_mutex_unlock (&create_lock_lock); +#endif + } + } + + gomp_mutex_lock (plock); +} + +void +GOMP_critical_name_end (void **pptr) +{ + gomp_mutex_t *plock; + + /* If a mutex fits within the space for a pointer, and is zero initialized, + then use the pointer space directly. */ + if (GOMP_MUTEX_INIT_0 + && sizeof (gomp_mutex_t) <= sizeof (void *) + && __alignof (gomp_mutex_t) <= sizeof (void *)) + plock = (gomp_mutex_t *)pptr; + else + plock = *pptr; + + gomp_mutex_unlock (plock); +} + +/* This mutex is used when atomic operations don't exist for the target + in the mode requested. The result is not globally atomic, but works so + long as all parallel references are within #pragma omp atomic directives. + According to responses received from omp@openmp.org, appears to be within + spec. Which makes sense, since that's how several other compilers + handle this situation as well. */ + +static gomp_mutex_t atomic_lock; + +void +GOMP_atomic_start (void) +{ + gomp_mutex_lock (&atomic_lock); +} + +void +GOMP_atomic_end (void) +{ + gomp_mutex_unlock (&atomic_lock); +} + +#if !GOMP_MUTEX_INIT_0 +static void __attribute__((constructor)) +initialize_critical (void) +{ + gomp_mutex_init (&default_lock); + gomp_mutex_init (&atomic_lock); +#ifndef HAVE_SYNC_BUILTINS + gomp_mutex_init (&create_lock_lock); +#endif +} +#endif diff --git a/contrib/gcc-4.7/libgomp/env.c b/contrib/gcc-4.7/libgomp/env.c new file mode 100644 index 0000000000..aff7490a8b --- /dev/null +++ b/contrib/gcc-4.7/libgomp/env.c @@ -0,0 +1,758 @@ +/* Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file defines the OpenMP internal control variables, and arranges + for them to be initialized from environment variables at startup. */ + +#include "libgomp.h" +#include "libgomp_f.h" +#include +#include +#ifdef STRING_WITH_STRINGS +# include +# include +#else +# ifdef HAVE_STRING_H +# include +# else +# ifdef HAVE_STRINGS_H +# include +# endif +# endif +#endif +#include +#include + +#ifndef HAVE_STRTOULL +# define strtoull(ptr, eptr, base) strtoul (ptr, eptr, base) +#endif + +struct gomp_task_icv gomp_global_icv = { + .nthreads_var = 1, + .run_sched_var = GFS_DYNAMIC, + .run_sched_modifier = 1, + .dyn_var = false, + .nest_var = false +}; + +unsigned short *gomp_cpu_affinity; +size_t gomp_cpu_affinity_len; +unsigned long gomp_max_active_levels_var = INT_MAX; +unsigned long gomp_thread_limit_var = ULONG_MAX; +unsigned long gomp_remaining_threads_count; +#ifndef HAVE_SYNC_BUILTINS +gomp_mutex_t gomp_remaining_threads_lock; +#endif +unsigned long gomp_available_cpus = 1, gomp_managed_threads = 1; +unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; +unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; + +/* Parse the OMP_SCHEDULE environment variable. */ + +static void +parse_schedule (void) +{ + char *env, *end; + unsigned long value; + + env = getenv ("OMP_SCHEDULE"); + if (env == NULL) + return; + + while (isspace ((unsigned char) *env)) + ++env; + if (strncasecmp (env, "static", 6) == 0) + { + gomp_global_icv.run_sched_var = GFS_STATIC; + env += 6; + } + else if (strncasecmp (env, "dynamic", 7) == 0) + { + gomp_global_icv.run_sched_var = GFS_DYNAMIC; + env += 7; + } + else if (strncasecmp (env, "guided", 6) == 0) + { + gomp_global_icv.run_sched_var = GFS_GUIDED; + env += 6; + } + else if (strncasecmp (env, "auto", 4) == 0) + { + gomp_global_icv.run_sched_var = GFS_AUTO; + env += 4; + } + else + goto unknown; + + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + { + gomp_global_icv.run_sched_modifier + = gomp_global_icv.run_sched_var != GFS_STATIC; + return; + } + if (*env++ != ',') + goto unknown; + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + goto invalid; + + errno = 0; + value = strtoul (env, &end, 10); + if (errno) + goto invalid; + + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + goto invalid; + + if ((int)value != value) + goto invalid; + + if (value == 0 && gomp_global_icv.run_sched_var != GFS_STATIC) + value = 1; + gomp_global_icv.run_sched_modifier = value; + return; + + unknown: + gomp_error ("Unknown value for environment variable OMP_SCHEDULE"); + return; + + invalid: + gomp_error ("Invalid value for chunk size in " + "environment variable OMP_SCHEDULE"); + return; +} + +/* Parse an unsigned long environment variable. Return true if one was + present and it was successfully parsed. */ + +static bool +parse_unsigned_long (const char *name, unsigned long *pvalue, bool allow_zero) +{ + char *env, *end; + unsigned long value; + + env = getenv (name); + if (env == NULL) + return false; + + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + goto invalid; + + errno = 0; + value = strtoul (env, &end, 10); + if (errno || (long) value <= 0 - allow_zero) + goto invalid; + + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + goto invalid; + + *pvalue = value; + return true; + + invalid: + gomp_error ("Invalid value for environment variable %s", name); + return false; +} + +/* Parse an unsigned long list environment variable. Return true if one was + present and it was successfully parsed. */ + +static bool +parse_unsigned_long_list (const char *name, unsigned long *p1stvalue, + unsigned long **pvalues, + unsigned long *pnvalues) +{ + char *env, *end; + unsigned long value, *values = NULL; + + env = getenv (name); + if (env == NULL) + return false; + + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + goto invalid; + + errno = 0; + value = strtoul (env, &end, 10); + if (errno || (long) value <= 0) + goto invalid; + + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + { + if (*end == ',') + { + unsigned long nvalues = 0, nalloced = 0; + + do + { + env = end + 1; + if (nvalues == nalloced) + { + unsigned long *n; + nalloced = nalloced ? nalloced * 2 : 16; + n = realloc (values, nalloced * sizeof (unsigned long)); + if (n == NULL) + { + free (values); + gomp_error ("Out of memory while trying to parse" + " environment variable %s", name); + return false; + } + values = n; + if (nvalues == 0) + values[nvalues++] = value; + } + + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + goto invalid; + + errno = 0; + value = strtoul (env, &end, 10); + if (errno || (long) value <= 0) + goto invalid; + + values[nvalues++] = value; + while (isspace ((unsigned char) *end)) + ++end; + if (*end == '\0') + break; + if (*end != ',') + goto invalid; + } + while (1); + *p1stvalue = values[0]; + *pvalues = values; + *pnvalues = nvalues; + return true; + } + goto invalid; + } + + *p1stvalue = value; + return true; + + invalid: + free (values); + gomp_error ("Invalid value for environment variable %s", name); + return false; +} + +/* Parse the OMP_STACKSIZE environment varible. Return true if one was + present and it was successfully parsed. */ + +static bool +parse_stacksize (const char *name, unsigned long *pvalue) +{ + char *env, *end; + unsigned long value, shift = 10; + + env = getenv (name); + if (env == NULL) + return false; + + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + goto invalid; + + errno = 0; + value = strtoul (env, &end, 10); + if (errno) + goto invalid; + + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + { + switch (tolower ((unsigned char) *end)) + { + case 'b': + shift = 0; + break; + case 'k': + break; + case 'm': + shift = 20; + break; + case 'g': + shift = 30; + break; + default: + goto invalid; + } + ++end; + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + goto invalid; + } + + if (((value << shift) >> shift) != value) + goto invalid; + + *pvalue = value << shift; + return true; + + invalid: + gomp_error ("Invalid value for environment variable %s", name); + return false; +} + +/* Parse the GOMP_SPINCOUNT environment varible. Return true if one was + present and it was successfully parsed. */ + +static bool +parse_spincount (const char *name, unsigned long long *pvalue) +{ + char *env, *end; + unsigned long long value, mult = 1; + + env = getenv (name); + if (env == NULL) + return false; + + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + goto invalid; + + if (strncasecmp (env, "infinite", 8) == 0 + || strncasecmp (env, "infinity", 8) == 0) + { + value = ~0ULL; + end = env + 8; + goto check_tail; + } + + errno = 0; + value = strtoull (env, &end, 10); + if (errno) + goto invalid; + + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + { + switch (tolower ((unsigned char) *end)) + { + case 'k': + mult = 1000LL; + break; + case 'm': + mult = 1000LL * 1000LL; + break; + case 'g': + mult = 1000LL * 1000LL * 1000LL; + break; + case 't': + mult = 1000LL * 1000LL * 1000LL * 1000LL; + break; + default: + goto invalid; + } + ++end; + check_tail: + while (isspace ((unsigned char) *end)) + ++end; + if (*end != '\0') + goto invalid; + } + + if (value > ~0ULL / mult) + value = ~0ULL; + else + value *= mult; + + *pvalue = value; + return true; + + invalid: + gomp_error ("Invalid value for environment variable %s", name); + return false; +} + +/* Parse a boolean value for environment variable NAME and store the + result in VALUE. */ + +static void +parse_boolean (const char *name, bool *value) +{ + const char *env; + + env = getenv (name); + if (env == NULL) + return; + + while (isspace ((unsigned char) *env)) + ++env; + if (strncasecmp (env, "true", 4) == 0) + { + *value = true; + env += 4; + } + else if (strncasecmp (env, "false", 5) == 0) + { + *value = false; + env += 5; + } + else + env = "X"; + while (isspace ((unsigned char) *env)) + ++env; + if (*env != '\0') + gomp_error ("Invalid value for environment variable %s", name); +} + +/* Parse the OMP_WAIT_POLICY environment variable and store the + result in gomp_active_wait_policy. */ + +static int +parse_wait_policy (void) +{ + const char *env; + int ret = -1; + + env = getenv ("OMP_WAIT_POLICY"); + if (env == NULL) + return -1; + + while (isspace ((unsigned char) *env)) + ++env; + if (strncasecmp (env, "active", 6) == 0) + { + ret = 1; + env += 6; + } + else if (strncasecmp (env, "passive", 7) == 0) + { + ret = 0; + env += 7; + } + else + env = "X"; + while (isspace ((unsigned char) *env)) + ++env; + if (*env == '\0') + return ret; + gomp_error ("Invalid value for environment variable OMP_WAIT_POLICY"); + return -1; +} + +/* Parse the GOMP_CPU_AFFINITY environment varible. Return true if one was + present and it was successfully parsed. */ + +static bool +parse_affinity (void) +{ + char *env, *end; + unsigned long cpu_beg, cpu_end, cpu_stride; + unsigned short *cpus = NULL; + size_t allocated = 0, used = 0, needed; + + env = getenv ("GOMP_CPU_AFFINITY"); + if (env == NULL) + return false; + + do + { + while (*env == ' ' || *env == '\t') + env++; + + cpu_beg = strtoul (env, &end, 0); + cpu_end = cpu_beg; + cpu_stride = 1; + if (env == end || cpu_beg >= 65536) + goto invalid; + + env = end; + if (*env == '-') + { + cpu_end = strtoul (++env, &end, 0); + if (env == end || cpu_end >= 65536 || cpu_end < cpu_beg) + goto invalid; + + env = end; + if (*env == ':') + { + cpu_stride = strtoul (++env, &end, 0); + if (env == end || cpu_stride == 0 || cpu_stride >= 65536) + goto invalid; + + env = end; + } + } + + needed = (cpu_end - cpu_beg) / cpu_stride + 1; + if (used + needed >= allocated) + { + unsigned short *new_cpus; + + if (allocated < 64) + allocated = 64; + if (allocated > needed) + allocated <<= 1; + else + allocated += 2 * needed; + new_cpus = realloc (cpus, allocated * sizeof (unsigned short)); + if (new_cpus == NULL) + { + free (cpus); + gomp_error ("not enough memory to store GOMP_CPU_AFFINITY list"); + return false; + } + + cpus = new_cpus; + } + + while (needed--) + { + cpus[used++] = cpu_beg; + cpu_beg += cpu_stride; + } + + while (*env == ' ' || *env == '\t') + env++; + + if (*env == ',') + env++; + else if (*env == '\0') + break; + } + while (1); + + gomp_cpu_affinity = cpus; + gomp_cpu_affinity_len = used; + return true; + + invalid: + gomp_error ("Invalid value for enviroment variable GOMP_CPU_AFFINITY"); + return false; +} + +static void __attribute__((constructor)) +initialize_env (void) +{ + unsigned long stacksize; + int wait_policy; + bool bind_var = false; + + /* Do a compile time check that mkomp_h.pl did good job. */ + omp_check_defines (); + + parse_schedule (); + parse_boolean ("OMP_DYNAMIC", &gomp_global_icv.dyn_var); + parse_boolean ("OMP_NESTED", &gomp_global_icv.nest_var); + parse_boolean ("OMP_PROC_BIND", &bind_var); + parse_unsigned_long ("OMP_MAX_ACTIVE_LEVELS", &gomp_max_active_levels_var, + true); + parse_unsigned_long ("OMP_THREAD_LIMIT", &gomp_thread_limit_var, false); + if (gomp_thread_limit_var != ULONG_MAX) + gomp_remaining_threads_count = gomp_thread_limit_var - 1; +#ifndef HAVE_SYNC_BUILTINS + gomp_mutex_init (&gomp_remaining_threads_lock); +#endif + gomp_init_num_threads (); + gomp_available_cpus = gomp_global_icv.nthreads_var; + if (!parse_unsigned_long_list ("OMP_NUM_THREADS", + &gomp_global_icv.nthreads_var, + &gomp_nthreads_var_list, + &gomp_nthreads_var_list_len)) + gomp_global_icv.nthreads_var = gomp_available_cpus; + if (parse_affinity () || bind_var) + gomp_init_affinity (); + wait_policy = parse_wait_policy (); + if (!parse_spincount ("GOMP_SPINCOUNT", &gomp_spin_count_var)) + { + /* Using a rough estimation of 100000 spins per msec, + use 5 min blocking for OMP_WAIT_POLICY=active, + 3 msec blocking when OMP_WAIT_POLICY is not specificed + and 0 when OMP_WAIT_POLICY=passive. + Depending on the CPU speed, this can be e.g. 5 times longer + or 5 times shorter. */ + if (wait_policy > 0) + gomp_spin_count_var = 30000000000LL; + else if (wait_policy < 0) + gomp_spin_count_var = 300000LL; + } + /* gomp_throttled_spin_count_var is used when there are more libgomp + managed threads than available CPUs. Use very short spinning. */ + if (wait_policy > 0) + gomp_throttled_spin_count_var = 1000LL; + else if (wait_policy < 0) + gomp_throttled_spin_count_var = 100LL; + if (gomp_throttled_spin_count_var > gomp_spin_count_var) + gomp_throttled_spin_count_var = gomp_spin_count_var; + + /* Not strictly environment related, but ordering constructors is tricky. */ + pthread_attr_init (&gomp_thread_attr); + pthread_attr_setdetachstate (&gomp_thread_attr, PTHREAD_CREATE_DETACHED); + + if (parse_stacksize ("OMP_STACKSIZE", &stacksize) + || parse_stacksize ("GOMP_STACKSIZE", &stacksize)) + { + int err; + + err = pthread_attr_setstacksize (&gomp_thread_attr, stacksize); + +#ifdef PTHREAD_STACK_MIN + if (err == EINVAL) + { + if (stacksize < PTHREAD_STACK_MIN) + gomp_error ("Stack size less than minimum of %luk", + PTHREAD_STACK_MIN / 1024ul + + (PTHREAD_STACK_MIN % 1024 != 0)); + else + gomp_error ("Stack size larger than system limit"); + } + else +#endif + if (err != 0) + gomp_error ("Stack size change failed: %s", strerror (err)); + } +} + + +/* The public OpenMP API routines that access these variables. */ + +void +omp_set_num_threads (int n) +{ + struct gomp_task_icv *icv = gomp_icv (true); + icv->nthreads_var = (n > 0 ? n : 1); +} + +void +omp_set_dynamic (int val) +{ + struct gomp_task_icv *icv = gomp_icv (true); + icv->dyn_var = val; +} + +int +omp_get_dynamic (void) +{ + struct gomp_task_icv *icv = gomp_icv (false); + return icv->dyn_var; +} + +void +omp_set_nested (int val) +{ + struct gomp_task_icv *icv = gomp_icv (true); + icv->nest_var = val; +} + +int +omp_get_nested (void) +{ + struct gomp_task_icv *icv = gomp_icv (false); + return icv->nest_var; +} + +void +omp_set_schedule (omp_sched_t kind, int modifier) +{ + struct gomp_task_icv *icv = gomp_icv (true); + switch (kind) + { + case omp_sched_static: + if (modifier < 1) + modifier = 0; + icv->run_sched_modifier = modifier; + break; + case omp_sched_dynamic: + case omp_sched_guided: + if (modifier < 1) + modifier = 1; + icv->run_sched_modifier = modifier; + break; + case omp_sched_auto: + break; + default: + return; + } + icv->run_sched_var = kind; +} + +void +omp_get_schedule (omp_sched_t *kind, int *modifier) +{ + struct gomp_task_icv *icv = gomp_icv (false); + *kind = icv->run_sched_var; + *modifier = icv->run_sched_modifier; +} + +int +omp_get_max_threads (void) +{ + struct gomp_task_icv *icv = gomp_icv (false); + return icv->nthreads_var; +} + +int +omp_get_thread_limit (void) +{ + return gomp_thread_limit_var > INT_MAX ? INT_MAX : gomp_thread_limit_var; +} + +void +omp_set_max_active_levels (int max_levels) +{ + if (max_levels >= 0) + gomp_max_active_levels_var = max_levels; +} + +int +omp_get_max_active_levels (void) +{ + return gomp_max_active_levels_var; +} + +ialias (omp_set_dynamic) +ialias (omp_set_nested) +ialias (omp_set_num_threads) +ialias (omp_get_dynamic) +ialias (omp_get_nested) +ialias (omp_set_schedule) +ialias (omp_get_schedule) +ialias (omp_get_max_threads) +ialias (omp_get_thread_limit) +ialias (omp_set_max_active_levels) +ialias (omp_get_max_active_levels) diff --git a/contrib/gcc-4.7/libgomp/error.c b/contrib/gcc-4.7/libgomp/error.c new file mode 100644 index 0000000000..ad829b65c8 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/error.c @@ -0,0 +1,66 @@ +/* Copyright (C) 2005, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains routines used to signal errors. Most places in the + OpenMP API do not make any provision for failure, so we can't just + defer the decision on reporting the problem to the user; we must do it + ourselves or not at all. */ +/* ??? Is this about what other implementations do? Assume stderr hasn't + been pointed somewhere unsafe? */ + +#include "libgomp.h" +#include +#include +#include + + +static void +gomp_verror (const char *fmt, va_list list) +{ + fputs ("\nlibgomp: ", stderr); + vfprintf (stderr, fmt, list); + fputc ('\n', stderr); +} + +void +gomp_error (const char *fmt, ...) +{ + va_list list; + + va_start (list, fmt); + gomp_verror (fmt, list); + va_end (list); +} + +void +gomp_fatal (const char *fmt, ...) +{ + va_list list; + + va_start (list, fmt); + gomp_verror (fmt, list); + va_end (list); + + exit (EXIT_FAILURE); +} diff --git a/contrib/gcc-4.7/libgomp/fortran.c b/contrib/gcc-4.7/libgomp/fortran.c new file mode 100644 index 0000000000..de806f8aba --- /dev/null +++ b/contrib/gcc-4.7/libgomp/fortran.c @@ -0,0 +1,437 @@ +/* Copyright (C) 2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Jakub Jelinek . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains Fortran wrapper routines. */ + +#include "libgomp.h" +#include "libgomp_f.h" +#include +#include + +#ifdef HAVE_ATTRIBUTE_ALIAS +/* Use internal aliases if possible. */ +# define ULP STR1(__USER_LABEL_PREFIX__) +# define STR1(x) STR2(x) +# define STR2(x) #x +# define ialias_redirect(fn) \ + extern __typeof (fn) fn __asm__ (ULP "gomp_ialias_" #fn) attribute_hidden; +# ifndef LIBGOMP_GNU_SYMBOL_VERSIONING +ialias_redirect (omp_init_lock) +ialias_redirect (omp_init_nest_lock) +ialias_redirect (omp_destroy_lock) +ialias_redirect (omp_destroy_nest_lock) +ialias_redirect (omp_set_lock) +ialias_redirect (omp_set_nest_lock) +ialias_redirect (omp_unset_lock) +ialias_redirect (omp_unset_nest_lock) +ialias_redirect (omp_test_lock) +ialias_redirect (omp_test_nest_lock) +# endif +ialias_redirect (omp_set_dynamic) +ialias_redirect (omp_set_nested) +ialias_redirect (omp_set_num_threads) +ialias_redirect (omp_get_dynamic) +ialias_redirect (omp_get_nested) +ialias_redirect (omp_in_parallel) +ialias_redirect (omp_get_max_threads) +ialias_redirect (omp_get_num_procs) +ialias_redirect (omp_get_num_threads) +ialias_redirect (omp_get_thread_num) +ialias_redirect (omp_get_wtick) +ialias_redirect (omp_get_wtime) +ialias_redirect (omp_set_schedule) +ialias_redirect (omp_get_schedule) +ialias_redirect (omp_get_thread_limit) +ialias_redirect (omp_set_max_active_levels) +ialias_redirect (omp_get_max_active_levels) +ialias_redirect (omp_get_level) +ialias_redirect (omp_get_ancestor_thread_num) +ialias_redirect (omp_get_team_size) +ialias_redirect (omp_get_active_level) +ialias_redirect (omp_in_final) +#endif + +#ifndef LIBGOMP_GNU_SYMBOL_VERSIONING +# define gomp_init_lock__30 omp_init_lock_ +# define gomp_destroy_lock__30 omp_destroy_lock_ +# define gomp_set_lock__30 omp_set_lock_ +# define gomp_unset_lock__30 omp_unset_lock_ +# define gomp_test_lock__30 omp_test_lock_ +# define gomp_init_nest_lock__30 omp_init_nest_lock_ +# define gomp_destroy_nest_lock__30 omp_destroy_nest_lock_ +# define gomp_set_nest_lock__30 omp_set_nest_lock_ +# define gomp_unset_nest_lock__30 omp_unset_nest_lock_ +# define gomp_test_nest_lock__30 omp_test_nest_lock_ +#endif + +void +gomp_init_lock__30 (omp_lock_arg_t lock) +{ +#ifndef OMP_LOCK_DIRECT + omp_lock_arg (lock) = malloc (sizeof (omp_lock_t)); +#endif + gomp_init_lock_30 (omp_lock_arg (lock)); +} + +void +gomp_init_nest_lock__30 (omp_nest_lock_arg_t lock) +{ +#ifndef OMP_NEST_LOCK_DIRECT + omp_nest_lock_arg (lock) = malloc (sizeof (omp_nest_lock_t)); +#endif + gomp_init_nest_lock_30 (omp_nest_lock_arg (lock)); +} + +void +gomp_destroy_lock__30 (omp_lock_arg_t lock) +{ + gomp_destroy_lock_30 (omp_lock_arg (lock)); +#ifndef OMP_LOCK_DIRECT + free (omp_lock_arg (lock)); + omp_lock_arg (lock) = NULL; +#endif +} + +void +gomp_destroy_nest_lock__30 (omp_nest_lock_arg_t lock) +{ + gomp_destroy_nest_lock_30 (omp_nest_lock_arg (lock)); +#ifndef OMP_NEST_LOCK_DIRECT + free (omp_nest_lock_arg (lock)); + omp_nest_lock_arg (lock) = NULL; +#endif +} + +void +gomp_set_lock__30 (omp_lock_arg_t lock) +{ + gomp_set_lock_30 (omp_lock_arg (lock)); +} + +void +gomp_set_nest_lock__30 (omp_nest_lock_arg_t lock) +{ + gomp_set_nest_lock_30 (omp_nest_lock_arg (lock)); +} + +void +gomp_unset_lock__30 (omp_lock_arg_t lock) +{ + gomp_unset_lock_30 (omp_lock_arg (lock)); +} + +void +gomp_unset_nest_lock__30 (omp_nest_lock_arg_t lock) +{ + gomp_unset_nest_lock_30 (omp_nest_lock_arg (lock)); +} + +int32_t +gomp_test_lock__30 (omp_lock_arg_t lock) +{ + return gomp_test_lock_30 (omp_lock_arg (lock)); +} + +int32_t +gomp_test_nest_lock__30 (omp_nest_lock_arg_t lock) +{ + return gomp_test_nest_lock_30 (omp_nest_lock_arg (lock)); +} + +#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING +void +gomp_init_lock__25 (omp_lock_25_arg_t lock) +{ +#ifndef OMP_LOCK_25_DIRECT + omp_lock_25_arg (lock) = malloc (sizeof (omp_lock_25_t)); +#endif + gomp_init_lock_25 (omp_lock_25_arg (lock)); +} + +void +gomp_init_nest_lock__25 (omp_nest_lock_25_arg_t lock) +{ +#ifndef OMP_NEST_LOCK_25_DIRECT + omp_nest_lock_25_arg (lock) = malloc (sizeof (omp_nest_lock_25_t)); +#endif + gomp_init_nest_lock_25 (omp_nest_lock_25_arg (lock)); +} + +void +gomp_destroy_lock__25 (omp_lock_25_arg_t lock) +{ + gomp_destroy_lock_25 (omp_lock_25_arg (lock)); +#ifndef OMP_LOCK_25_DIRECT + free (omp_lock_25_arg (lock)); + omp_lock_25_arg (lock) = NULL; +#endif +} + +void +gomp_destroy_nest_lock__25 (omp_nest_lock_25_arg_t lock) +{ + gomp_destroy_nest_lock_25 (omp_nest_lock_25_arg (lock)); +#ifndef OMP_NEST_LOCK_25_DIRECT + free (omp_nest_lock_25_arg (lock)); + omp_nest_lock_25_arg (lock) = NULL; +#endif +} + +void +gomp_set_lock__25 (omp_lock_25_arg_t lock) +{ + gomp_set_lock_25 (omp_lock_25_arg (lock)); +} + +void +gomp_set_nest_lock__25 (omp_nest_lock_25_arg_t lock) +{ + gomp_set_nest_lock_25 (omp_nest_lock_25_arg (lock)); +} + +void +gomp_unset_lock__25 (omp_lock_25_arg_t lock) +{ + gomp_unset_lock_25 (omp_lock_25_arg (lock)); +} + +void +gomp_unset_nest_lock__25 (omp_nest_lock_25_arg_t lock) +{ + gomp_unset_nest_lock_25 (omp_nest_lock_25_arg (lock)); +} + +int32_t +gomp_test_lock__25 (omp_lock_25_arg_t lock) +{ + return gomp_test_lock_25 (omp_lock_25_arg (lock)); +} + +int32_t +gomp_test_nest_lock__25 (omp_nest_lock_25_arg_t lock) +{ + return gomp_test_nest_lock_25 (omp_nest_lock_25_arg (lock)); +} + +omp_lock_symver (omp_init_lock_) +omp_lock_symver (omp_destroy_lock_) +omp_lock_symver (omp_set_lock_) +omp_lock_symver (omp_unset_lock_) +omp_lock_symver (omp_test_lock_) +omp_lock_symver (omp_init_nest_lock_) +omp_lock_symver (omp_destroy_nest_lock_) +omp_lock_symver (omp_set_nest_lock_) +omp_lock_symver (omp_unset_nest_lock_) +omp_lock_symver (omp_test_nest_lock_) +#endif + +#define TO_INT(x) ((x) > INT_MIN ? (x) < INT_MAX ? (x) : INT_MAX : INT_MIN) + +void +omp_set_dynamic_ (const int32_t *set) +{ + omp_set_dynamic (*set); +} + +void +omp_set_dynamic_8_ (const int64_t *set) +{ + omp_set_dynamic (!!*set); +} + +void +omp_set_nested_ (const int32_t *set) +{ + omp_set_nested (*set); +} + +void +omp_set_nested_8_ (const int64_t *set) +{ + omp_set_nested (!!*set); +} + +void +omp_set_num_threads_ (const int32_t *set) +{ + omp_set_num_threads (*set); +} + +void +omp_set_num_threads_8_ (const int64_t *set) +{ + omp_set_num_threads (TO_INT (*set)); +} + +int32_t +omp_get_dynamic_ (void) +{ + return omp_get_dynamic (); +} + +int32_t +omp_get_nested_ (void) +{ + return omp_get_nested (); +} + +int32_t +omp_in_parallel_ (void) +{ + return omp_in_parallel (); +} + +int32_t +omp_get_max_threads_ (void) +{ + return omp_get_max_threads (); +} + +int32_t +omp_get_num_procs_ (void) +{ + return omp_get_num_procs (); +} + +int32_t +omp_get_num_threads_ (void) +{ + return omp_get_num_threads (); +} + +int32_t +omp_get_thread_num_ (void) +{ + return omp_get_thread_num (); +} + +double +omp_get_wtick_ (void) +{ + return omp_get_wtick (); +} + +double +omp_get_wtime_ (void) +{ + return omp_get_wtime (); +} + +void +omp_set_schedule_ (const int32_t *kind, const int32_t *modifier) +{ + omp_set_schedule (*kind, *modifier); +} + +void +omp_set_schedule_8_ (const int32_t *kind, const int64_t *modifier) +{ + omp_set_schedule (*kind, TO_INT (*modifier)); +} + +void +omp_get_schedule_ (int32_t *kind, int32_t *modifier) +{ + omp_sched_t k; + int m; + omp_get_schedule (&k, &m); + *kind = k; + *modifier = m; +} + +void +omp_get_schedule_8_ (int32_t *kind, int64_t *modifier) +{ + omp_sched_t k; + int m; + omp_get_schedule (&k, &m); + *kind = k; + *modifier = m; +} + +int32_t +omp_get_thread_limit_ (void) +{ + return omp_get_thread_limit (); +} + +void +omp_set_max_active_levels_ (const int32_t *levels) +{ + omp_set_max_active_levels (*levels); +} + +void +omp_set_max_active_levels_8_ (const int64_t *levels) +{ + omp_set_max_active_levels (TO_INT (*levels)); +} + +int32_t +omp_get_max_active_levels_ (void) +{ + return omp_get_max_active_levels (); +} + +int32_t +omp_get_level_ (void) +{ + return omp_get_level (); +} + +int32_t +omp_get_ancestor_thread_num_ (const int32_t *level) +{ + return omp_get_ancestor_thread_num (*level); +} + +int32_t +omp_get_ancestor_thread_num_8_ (const int64_t *level) +{ + return omp_get_ancestor_thread_num (TO_INT (*level)); +} + +int32_t +omp_get_team_size_ (const int32_t *level) +{ + return omp_get_team_size (*level); +} + +int32_t +omp_get_team_size_8_ (const int64_t *level) +{ + return omp_get_team_size (TO_INT (*level)); +} + +int32_t +omp_get_active_level_ (void) +{ + return omp_get_active_level (); +} + +int32_t +omp_in_final_ (void) +{ + return omp_in_final (); +} diff --git a/contrib/gcc-4.7/libgomp/iter.c b/contrib/gcc-4.7/libgomp/iter.c new file mode 100644 index 0000000000..cd9484a1ea --- /dev/null +++ b/contrib/gcc-4.7/libgomp/iter.c @@ -0,0 +1,337 @@ +/* Copyright (C) 2005, 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains routines for managing work-share iteration, both + for loops and sections. */ + +#include "libgomp.h" +#include + + +/* This function implements the STATIC scheduling method. The caller should + iterate *pstart <= x < *pend. Return zero if there are more iterations + to perform; nonzero if not. Return less than 0 if this thread had + received the absolutely last iteration. */ + +int +gomp_iter_static_next (long *pstart, long *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned long nthreads = team ? team->nthreads : 1; + + if (thr->ts.static_trip == -1) + return -1; + + /* Quick test for degenerate teams and orphaned constructs. */ + if (nthreads == 1) + { + *pstart = ws->next; + *pend = ws->end; + thr->ts.static_trip = -1; + return ws->next == ws->end; + } + + /* We interpret chunk_size zero as "unspecified", which means that we + should break up the iterations such that each thread makes only one + trip through the outer loop. */ + if (ws->chunk_size == 0) + { + unsigned long n, q, i, t; + unsigned long s0, e0; + long s, e; + + if (thr->ts.static_trip > 0) + return 1; + + /* Compute the total number of iterations. */ + s = ws->incr + (ws->incr > 0 ? -1 : 1); + n = (ws->end - ws->next + s) / ws->incr; + i = thr->ts.team_id; + + /* Compute the "zero-based" start and end points. That is, as + if the loop began at zero and incremented by one. */ + q = n / nthreads; + t = n % nthreads; + if (i < t) + { + t = 0; + q++; + } + s0 = q * i + t; + e0 = s0 + q; + + /* Notice when no iterations allocated for this thread. */ + if (s0 >= e0) + { + thr->ts.static_trip = 1; + return 1; + } + + /* Transform these to the actual start and end numbers. */ + s = (long)s0 * ws->incr + ws->next; + e = (long)e0 * ws->incr + ws->next; + + *pstart = s; + *pend = e; + thr->ts.static_trip = (e0 == n ? -1 : 1); + return 0; + } + else + { + unsigned long n, s0, e0, i, c; + long s, e; + + /* Otherwise, each thread gets exactly chunk_size iterations + (if available) each time through the loop. */ + + s = ws->incr + (ws->incr > 0 ? -1 : 1); + n = (ws->end - ws->next + s) / ws->incr; + i = thr->ts.team_id; + c = ws->chunk_size; + + /* Initial guess is a C sized chunk positioned nthreads iterations + in, offset by our thread number. */ + s0 = (thr->ts.static_trip * nthreads + i) * c; + e0 = s0 + c; + + /* Detect overflow. */ + if (s0 >= n) + return 1; + if (e0 > n) + e0 = n; + + /* Transform these to the actual start and end numbers. */ + s = (long)s0 * ws->incr + ws->next; + e = (long)e0 * ws->incr + ws->next; + + *pstart = s; + *pend = e; + + if (e0 == n) + thr->ts.static_trip = -1; + else + thr->ts.static_trip++; + return 0; + } +} + + +/* This function implements the DYNAMIC scheduling method. Arguments are + as for gomp_iter_static_next. This function must be called with ws->lock + held. */ + +bool +gomp_iter_dynamic_next_locked (long *pstart, long *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + long start, end, chunk, left; + + start = ws->next; + if (start == ws->end) + return false; + + chunk = ws->chunk_size; + left = ws->end - start; + if (ws->incr < 0) + { + if (chunk < left) + chunk = left; + } + else + { + if (chunk > left) + chunk = left; + } + end = start + chunk; + + ws->next = end; + *pstart = start; + *pend = end; + return true; +} + + +#ifdef HAVE_SYNC_BUILTINS +/* Similar, but doesn't require the lock held, and uses compare-and-swap + instead. Note that the only memory value that changes is ws->next. */ + +bool +gomp_iter_dynamic_next (long *pstart, long *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + long start, end, nend, chunk, incr; + + end = ws->end; + incr = ws->incr; + chunk = ws->chunk_size; + + if (__builtin_expect (ws->mode, 1)) + { + long tmp = __sync_fetch_and_add (&ws->next, chunk); + if (incr > 0) + { + if (tmp >= end) + return false; + nend = tmp + chunk; + if (nend > end) + nend = end; + *pstart = tmp; + *pend = nend; + return true; + } + else + { + if (tmp <= end) + return false; + nend = tmp + chunk; + if (nend < end) + nend = end; + *pstart = tmp; + *pend = nend; + return true; + } + } + + start = ws->next; + while (1) + { + long left = end - start; + long tmp; + + if (start == end) + return false; + + if (incr < 0) + { + if (chunk < left) + chunk = left; + } + else + { + if (chunk > left) + chunk = left; + } + nend = start + chunk; + + tmp = __sync_val_compare_and_swap (&ws->next, start, nend); + if (__builtin_expect (tmp == start, 1)) + break; + + start = tmp; + } + + *pstart = start; + *pend = nend; + return true; +} +#endif /* HAVE_SYNC_BUILTINS */ + + +/* This function implements the GUIDED scheduling method. Arguments are + as for gomp_iter_static_next. This function must be called with the + work share lock held. */ + +bool +gomp_iter_guided_next_locked (long *pstart, long *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + struct gomp_team *team = thr->ts.team; + unsigned long nthreads = team ? team->nthreads : 1; + unsigned long n, q; + long start, end; + + if (ws->next == ws->end) + return false; + + start = ws->next; + n = (ws->end - start) / ws->incr; + q = (n + nthreads - 1) / nthreads; + + if (q < ws->chunk_size) + q = ws->chunk_size; + if (q <= n) + end = start + q * ws->incr; + else + end = ws->end; + + ws->next = end; + *pstart = start; + *pend = end; + return true; +} + +#ifdef HAVE_SYNC_BUILTINS +/* Similar, but doesn't require the lock held, and uses compare-and-swap + instead. Note that the only memory value that changes is ws->next. */ + +bool +gomp_iter_guided_next (long *pstart, long *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + struct gomp_team *team = thr->ts.team; + unsigned long nthreads = team ? team->nthreads : 1; + long start, end, nend, incr; + unsigned long chunk_size; + + start = ws->next; + end = ws->end; + incr = ws->incr; + chunk_size = ws->chunk_size; + + while (1) + { + unsigned long n, q; + long tmp; + + if (start == end) + return false; + + n = (end - start) / incr; + q = (n + nthreads - 1) / nthreads; + + if (q < chunk_size) + q = chunk_size; + if (__builtin_expect (q <= n, 1)) + nend = start + q * incr; + else + nend = end; + + tmp = __sync_val_compare_and_swap (&ws->next, start, nend); + if (__builtin_expect (tmp == start, 1)) + break; + + start = tmp; + } + + *pstart = start; + *pend = nend; + return true; +} +#endif /* HAVE_SYNC_BUILTINS */ diff --git a/contrib/gcc-4.7/libgomp/iter_ull.c b/contrib/gcc-4.7/libgomp/iter_ull.c new file mode 100644 index 0000000000..a393920b55 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/iter_ull.c @@ -0,0 +1,344 @@ +/* Copyright (C) 2005, 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains routines for managing work-share iteration, both + for loops and sections. */ + +#include "libgomp.h" +#include + +typedef unsigned long long gomp_ull; + +/* This function implements the STATIC scheduling method. The caller should + iterate *pstart <= x < *pend. Return zero if there are more iterations + to perform; nonzero if not. Return less than 0 if this thread had + received the absolutely last iteration. */ + +int +gomp_iter_ull_static_next (gomp_ull *pstart, gomp_ull *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned long nthreads = team ? team->nthreads : 1; + + if (thr->ts.static_trip == -1) + return -1; + + /* Quick test for degenerate teams and orphaned constructs. */ + if (nthreads == 1) + { + *pstart = ws->next_ull; + *pend = ws->end_ull; + thr->ts.static_trip = -1; + return ws->next_ull == ws->end_ull; + } + + /* We interpret chunk_size zero as "unspecified", which means that we + should break up the iterations such that each thread makes only one + trip through the outer loop. */ + if (ws->chunk_size_ull == 0) + { + gomp_ull n, q, i, t, s0, e0, s, e; + + if (thr->ts.static_trip > 0) + return 1; + + /* Compute the total number of iterations. */ + if (__builtin_expect (ws->mode, 0) == 0) + n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull; + else + n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull; + i = thr->ts.team_id; + + /* Compute the "zero-based" start and end points. That is, as + if the loop began at zero and incremented by one. */ + q = n / nthreads; + t = n % nthreads; + if (i < t) + { + t = 0; + q++; + } + s0 = q * i + t; + e0 = s0 + q; + + /* Notice when no iterations allocated for this thread. */ + if (s0 >= e0) + { + thr->ts.static_trip = 1; + return 1; + } + + /* Transform these to the actual start and end numbers. */ + s = s0 * ws->incr_ull + ws->next_ull; + e = e0 * ws->incr_ull + ws->next_ull; + + *pstart = s; + *pend = e; + thr->ts.static_trip = (e0 == n ? -1 : 1); + return 0; + } + else + { + gomp_ull n, s0, e0, i, c, s, e; + + /* Otherwise, each thread gets exactly chunk_size iterations + (if available) each time through the loop. */ + + if (__builtin_expect (ws->mode, 0) == 0) + n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull; + else + n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull; + i = thr->ts.team_id; + c = ws->chunk_size_ull; + + /* Initial guess is a C sized chunk positioned nthreads iterations + in, offset by our thread number. */ + s0 = (thr->ts.static_trip * (gomp_ull) nthreads + i) * c; + e0 = s0 + c; + + /* Detect overflow. */ + if (s0 >= n) + return 1; + if (e0 > n) + e0 = n; + + /* Transform these to the actual start and end numbers. */ + s = s0 * ws->incr_ull + ws->next_ull; + e = e0 * ws->incr_ull + ws->next_ull; + + *pstart = s; + *pend = e; + + if (e0 == n) + thr->ts.static_trip = -1; + else + thr->ts.static_trip++; + return 0; + } +} + + +/* This function implements the DYNAMIC scheduling method. Arguments are + as for gomp_iter_ull_static_next. This function must be called with + ws->lock held. */ + +bool +gomp_iter_ull_dynamic_next_locked (gomp_ull *pstart, gomp_ull *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + gomp_ull start, end, chunk, left; + + start = ws->next_ull; + if (start == ws->end_ull) + return false; + + chunk = ws->chunk_size_ull; + left = ws->end_ull - start; + if (__builtin_expect (ws->mode & 2, 0)) + { + if (chunk < left) + chunk = left; + } + else + { + if (chunk > left) + chunk = left; + } + end = start + chunk; + + ws->next_ull = end; + *pstart = start; + *pend = end; + return true; +} + + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ +/* Similar, but doesn't require the lock held, and uses compare-and-swap + instead. Note that the only memory value that changes is ws->next_ull. */ + +bool +gomp_iter_ull_dynamic_next (gomp_ull *pstart, gomp_ull *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + gomp_ull start, end, nend, chunk; + + end = ws->end_ull; + chunk = ws->chunk_size_ull; + + if (__builtin_expect (ws->mode & 1, 1)) + { + gomp_ull tmp = __sync_fetch_and_add (&ws->next_ull, chunk); + if (__builtin_expect (ws->mode & 2, 0) == 0) + { + if (tmp >= end) + return false; + nend = tmp + chunk; + if (nend > end) + nend = end; + *pstart = tmp; + *pend = nend; + return true; + } + else + { + if (tmp <= end) + return false; + nend = tmp + chunk; + if (nend < end) + nend = end; + *pstart = tmp; + *pend = nend; + return true; + } + } + + start = ws->next_ull; + while (1) + { + gomp_ull left = end - start; + gomp_ull tmp; + + if (start == end) + return false; + + if (__builtin_expect (ws->mode & 2, 0)) + { + if (chunk < left) + chunk = left; + } + else + { + if (chunk > left) + chunk = left; + } + nend = start + chunk; + + tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend); + if (__builtin_expect (tmp == start, 1)) + break; + + start = tmp; + } + + *pstart = start; + *pend = nend; + return true; +} +#endif /* HAVE_SYNC_BUILTINS */ + + +/* This function implements the GUIDED scheduling method. Arguments are + as for gomp_iter_ull_static_next. This function must be called with the + work share lock held. */ + +bool +gomp_iter_ull_guided_next_locked (gomp_ull *pstart, gomp_ull *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + struct gomp_team *team = thr->ts.team; + gomp_ull nthreads = team ? team->nthreads : 1; + gomp_ull n, q; + gomp_ull start, end; + + if (ws->next_ull == ws->end_ull) + return false; + + start = ws->next_ull; + if (__builtin_expect (ws->mode, 0) == 0) + n = (ws->end_ull - start) / ws->incr_ull; + else + n = (start - ws->end_ull) / -ws->incr_ull; + q = (n + nthreads - 1) / nthreads; + + if (q < ws->chunk_size_ull) + q = ws->chunk_size_ull; + if (q <= n) + end = start + q * ws->incr_ull; + else + end = ws->end_ull; + + ws->next_ull = end; + *pstart = start; + *pend = end; + return true; +} + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ +/* Similar, but doesn't require the lock held, and uses compare-and-swap + instead. Note that the only memory value that changes is ws->next_ull. */ + +bool +gomp_iter_ull_guided_next (gomp_ull *pstart, gomp_ull *pend) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_work_share *ws = thr->ts.work_share; + struct gomp_team *team = thr->ts.team; + gomp_ull nthreads = team ? team->nthreads : 1; + gomp_ull start, end, nend, incr; + gomp_ull chunk_size; + + start = ws->next_ull; + end = ws->end_ull; + incr = ws->incr_ull; + chunk_size = ws->chunk_size_ull; + + while (1) + { + gomp_ull n, q; + gomp_ull tmp; + + if (start == end) + return false; + + if (__builtin_expect (ws->mode, 0) == 0) + n = (end - start) / incr; + else + n = (start - end) / -incr; + q = (n + nthreads - 1) / nthreads; + + if (q < chunk_size) + q = chunk_size; + if (__builtin_expect (q <= n, 1)) + nend = start + q * incr; + else + nend = end; + + tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend); + if (__builtin_expect (tmp == start, 1)) + break; + + start = tmp; + } + + *pstart = start; + *pend = nend; + return true; +} +#endif /* HAVE_SYNC_BUILTINS */ diff --git a/contrib/gcc-4.7/libgomp/libgomp.h b/contrib/gcc-4.7/libgomp/libgomp.h new file mode 100644 index 0000000000..2c9c0716cd --- /dev/null +++ b/contrib/gcc-4.7/libgomp/libgomp.h @@ -0,0 +1,591 @@ +/* Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains data types and function declarations that are not + part of the official OpenMP user interface. There are declarations + in here that are part of the GNU OpenMP ABI, in that the compiler is + required to know about them and use them. + + The convention is that the all caps prefix "GOMP" is used group items + that are part of the external ABI, and the lower case prefix "gomp" + is used group items that are completely private to the library. */ + +#ifndef LIBGOMP_H +#define LIBGOMP_H 1 + +#include "config.h" +#include "gstdint.h" + +#include +#include + +#ifdef HAVE_ATTRIBUTE_VISIBILITY +# pragma GCC visibility push(hidden) +#endif + +/* If we were a C++ library, we'd get this from . */ +enum memmodel +{ + MEMMODEL_RELAXED = 0, + MEMMODEL_CONSUME = 1, + MEMMODEL_ACQUIRE = 2, + MEMMODEL_RELEASE = 3, + MEMMODEL_ACQ_REL = 4, + MEMMODEL_SEQ_CST = 5 +}; + +#include "sem.h" +#include "mutex.h" +#include "bar.h" +#include "ptrlock.h" + + +/* This structure contains the data to control one work-sharing construct, + either a LOOP (FOR/DO) or a SECTIONS. */ + +enum gomp_schedule_type +{ + GFS_RUNTIME, + GFS_STATIC, + GFS_DYNAMIC, + GFS_GUIDED, + GFS_AUTO +}; + +struct gomp_work_share +{ + /* This member records the SCHEDULE clause to be used for this construct. + The user specification of "runtime" will already have been resolved. + If this is a SECTIONS construct, this value will always be DYNAMIC. */ + enum gomp_schedule_type sched; + + int mode; + + union { + struct { + /* This is the chunk_size argument to the SCHEDULE clause. */ + long chunk_size; + + /* This is the iteration end point. If this is a SECTIONS construct, + this is the number of contained sections. */ + long end; + + /* This is the iteration step. If this is a SECTIONS construct, this + is always 1. */ + long incr; + }; + + struct { + /* The same as above, but for the unsigned long long loop variants. */ + unsigned long long chunk_size_ull; + unsigned long long end_ull; + unsigned long long incr_ull; + }; + }; + + /* This is a circular queue that details which threads will be allowed + into the ordered region and in which order. When a thread allocates + iterations on which it is going to work, it also registers itself at + the end of the array. When a thread reaches the ordered region, it + checks to see if it is the one at the head of the queue. If not, it + blocks on its RELEASE semaphore. */ + unsigned *ordered_team_ids; + + /* This is the number of threads that have registered themselves in + the circular queue ordered_team_ids. */ + unsigned ordered_num_used; + + /* This is the team_id of the currently acknowledged owner of the ordered + section, or -1u if the ordered section has not been acknowledged by + any thread. This is distinguished from the thread that is *allowed* + to take the section next. */ + unsigned ordered_owner; + + /* This is the index into the circular queue ordered_team_ids of the + current thread that's allowed into the ordered reason. */ + unsigned ordered_cur; + + /* This is a chain of allocated gomp_work_share blocks, valid only + in the first gomp_work_share struct in the block. */ + struct gomp_work_share *next_alloc; + + /* The above fields are written once during workshare initialization, + or related to ordered worksharing. Make sure the following fields + are in a different cache line. */ + + /* This lock protects the update of the following members. */ + gomp_mutex_t lock __attribute__((aligned (64))); + + /* This is the count of the number of threads that have exited the work + share construct. If the construct was marked nowait, they have moved on + to other work; otherwise they're blocked on a barrier. The last member + of the team to exit the work share construct must deallocate it. */ + unsigned threads_completed; + + union { + /* This is the next iteration value to be allocated. In the case of + GFS_STATIC loops, this the iteration start point and never changes. */ + long next; + + /* The same, but with unsigned long long type. */ + unsigned long long next_ull; + + /* This is the returned data structure for SINGLE COPYPRIVATE. */ + void *copyprivate; + }; + + union { + /* Link to gomp_work_share struct for next work sharing construct + encountered after this one. */ + gomp_ptrlock_t next_ws; + + /* gomp_work_share structs are chained in the free work share cache + through this. */ + struct gomp_work_share *next_free; + }; + + /* If only few threads are in the team, ordered_team_ids can point + to this array which fills the padding at the end of this struct. */ + unsigned inline_ordered_team_ids[0]; +}; + +/* This structure contains all of the thread-local data associated with + a thread team. This is the data that must be saved when a thread + encounters a nested PARALLEL construct. */ + +struct gomp_team_state +{ + /* This is the team of which the thread is currently a member. */ + struct gomp_team *team; + + /* This is the work share construct which this thread is currently + processing. Recall that with NOWAIT, not all threads may be + processing the same construct. */ + struct gomp_work_share *work_share; + + /* This is the previous work share construct or NULL if there wasn't any. + When all threads are done with the current work sharing construct, + the previous one can be freed. The current one can't, as its + next_ws field is used. */ + struct gomp_work_share *last_work_share; + + /* This is the ID of this thread within the team. This value is + guaranteed to be between 0 and N-1, where N is the number of + threads in the team. */ + unsigned team_id; + + /* Nesting level. */ + unsigned level; + + /* Active nesting level. Only active parallel regions are counted. */ + unsigned active_level; + +#ifdef HAVE_SYNC_BUILTINS + /* Number of single stmts encountered. */ + unsigned long single_count; +#endif + + /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the + trip number through the loop. So first time a particular loop + is encountered this number is 0, the second time through the loop + is 1, etc. This is unused when the compiler knows in advance that + the loop is statically scheduled. */ + unsigned long static_trip; +}; + +/* These are the OpenMP 3.0 Internal Control Variables described in + section 2.3.1. Those described as having one copy per task are + stored within the structure; those described as having one copy + for the whole program are (naturally) global variables. */ + +struct gomp_task_icv +{ + unsigned long nthreads_var; + enum gomp_schedule_type run_sched_var; + int run_sched_modifier; + bool dyn_var; + bool nest_var; +}; + +extern struct gomp_task_icv gomp_global_icv; +extern unsigned long gomp_thread_limit_var; +extern unsigned long gomp_remaining_threads_count; +#ifndef HAVE_SYNC_BUILTINS +extern gomp_mutex_t gomp_remaining_threads_lock; +#endif +extern unsigned long gomp_max_active_levels_var; +extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; +extern unsigned long gomp_available_cpus, gomp_managed_threads; +extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; + +enum gomp_task_kind +{ + GOMP_TASK_IMPLICIT, + GOMP_TASK_IFFALSE, + GOMP_TASK_WAITING, + GOMP_TASK_TIED +}; + +/* This structure describes a "task" to be run by a thread. */ + +struct gomp_task +{ + struct gomp_task *parent; + struct gomp_task *children; + struct gomp_task *next_child; + struct gomp_task *prev_child; + struct gomp_task *next_queue; + struct gomp_task *prev_queue; + struct gomp_task_icv icv; + void (*fn) (void *); + void *fn_data; + enum gomp_task_kind kind; + bool in_taskwait; + bool in_tied_task; + bool final_task; + gomp_sem_t taskwait_sem; +}; + +/* This structure describes a "team" of threads. These are the threads + that are spawned by a PARALLEL constructs, as well as the work sharing + constructs that the team encounters. */ + +struct gomp_team +{ + /* This is the number of threads in the current team. */ + unsigned nthreads; + + /* This is number of gomp_work_share structs that have been allocated + as a block last time. */ + unsigned work_share_chunk; + + /* This is the saved team state that applied to a master thread before + the current thread was created. */ + struct gomp_team_state prev_ts; + + /* This semaphore should be used by the master thread instead of its + "native" semaphore in the thread structure. Required for nested + parallels, as the master is a member of two teams. */ + gomp_sem_t master_release; + + /* This points to an array with pointers to the release semaphore + of the threads in the team. */ + gomp_sem_t **ordered_release; + + /* List of gomp_work_share structs chained through next_free fields. + This is populated and taken off only by the first thread in the + team encountering a new work sharing construct, in a critical + section. */ + struct gomp_work_share *work_share_list_alloc; + + /* List of gomp_work_share structs freed by free_work_share. New + entries are atomically added to the start of the list, and + alloc_work_share can safely only move all but the first entry + to work_share_list alloc, as free_work_share can happen concurrently + with alloc_work_share. */ + struct gomp_work_share *work_share_list_free; + +#ifdef HAVE_SYNC_BUILTINS + /* Number of simple single regions encountered by threads in this + team. */ + unsigned long single_count; +#else + /* Mutex protecting addition of workshares to work_share_list_free. */ + gomp_mutex_t work_share_list_free_lock; +#endif + + /* This barrier is used for most synchronization of the team. */ + gomp_barrier_t barrier; + + /* Initial work shares, to avoid allocating any gomp_work_share + structs in the common case. */ + struct gomp_work_share work_shares[8]; + + gomp_mutex_t task_lock; + struct gomp_task *task_queue; + int task_count; + int task_running_count; + + /* This array contains structures for implicit tasks. */ + struct gomp_task implicit_task[]; +}; + +/* This structure contains all data that is private to libgomp and is + allocated per thread. */ + +struct gomp_thread +{ + /* This is the function that the thread should run upon launch. */ + void (*fn) (void *data); + void *data; + + /* This is the current team state for this thread. The ts.team member + is NULL only if the thread is idle. */ + struct gomp_team_state ts; + + /* This is the task that the thread is currently executing. */ + struct gomp_task *task; + + /* This semaphore is used for ordered loops. */ + gomp_sem_t release; + + /* user pthread thread pool */ + struct gomp_thread_pool *thread_pool; +}; + + +struct gomp_thread_pool +{ + /* This array manages threads spawned from the top level, which will + return to the idle loop once the current PARALLEL construct ends. */ + struct gomp_thread **threads; + unsigned threads_size; + unsigned threads_used; + struct gomp_team *last_team; + + /* This barrier holds and releases threads waiting in threads. */ + gomp_barrier_t threads_dock; +}; + +/* ... and here is that TLS data. */ + +#ifdef HAVE_TLS +extern __thread struct gomp_thread gomp_tls_data; +static inline struct gomp_thread *gomp_thread (void) +{ + return &gomp_tls_data; +} +#else +extern pthread_key_t gomp_tls_key; +static inline struct gomp_thread *gomp_thread (void) +{ + return pthread_getspecific (gomp_tls_key); +} +#endif + +extern struct gomp_task_icv *gomp_new_icv (void); + +/* Here's how to access the current copy of the ICVs. */ + +static inline struct gomp_task_icv *gomp_icv (bool write) +{ + struct gomp_task *task = gomp_thread ()->task; + if (task) + return &task->icv; + else if (write) + return gomp_new_icv (); + else + return &gomp_global_icv; +} + +/* The attributes to be used during thread creation. */ +extern pthread_attr_t gomp_thread_attr; + +/* Other variables. */ + +extern unsigned short *gomp_cpu_affinity; +extern size_t gomp_cpu_affinity_len; + +/* Function prototypes. */ + +/* affinity.c */ + +extern void gomp_init_affinity (void); +extern void gomp_init_thread_affinity (pthread_attr_t *); + +/* alloc.c */ + +extern void *gomp_malloc (size_t) __attribute__((malloc)); +extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); +extern void *gomp_realloc (void *, size_t); + +/* Avoid conflicting prototypes of alloca() in system headers by using + GCC's builtin alloca(). */ +#define gomp_alloca(x) __builtin_alloca(x) + +/* error.c */ + +extern void gomp_error (const char *, ...) + __attribute__((format (printf, 1, 2))); +extern void gomp_fatal (const char *, ...) + __attribute__((noreturn, format (printf, 1, 2))); + +/* iter.c */ + +extern int gomp_iter_static_next (long *, long *); +extern bool gomp_iter_dynamic_next_locked (long *, long *); +extern bool gomp_iter_guided_next_locked (long *, long *); + +#ifdef HAVE_SYNC_BUILTINS +extern bool gomp_iter_dynamic_next (long *, long *); +extern bool gomp_iter_guided_next (long *, long *); +#endif + +/* iter_ull.c */ + +extern int gomp_iter_ull_static_next (unsigned long long *, + unsigned long long *); +extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, + unsigned long long *); +extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, + unsigned long long *); + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ +extern bool gomp_iter_ull_dynamic_next (unsigned long long *, + unsigned long long *); +extern bool gomp_iter_ull_guided_next (unsigned long long *, + unsigned long long *); +#endif + +/* ordered.c */ + +extern void gomp_ordered_first (void); +extern void gomp_ordered_last (void); +extern void gomp_ordered_next (void); +extern void gomp_ordered_static_init (void); +extern void gomp_ordered_static_next (void); +extern void gomp_ordered_sync (void); + +/* parallel.c */ + +extern unsigned gomp_resolve_num_threads (unsigned, unsigned); + +/* proc.c (in config/) */ + +extern void gomp_init_num_threads (void); +extern unsigned gomp_dynamic_max_threads (void); + +/* task.c */ + +extern void gomp_init_task (struct gomp_task *, struct gomp_task *, + struct gomp_task_icv *); +extern void gomp_end_task (void); +extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); + +static void inline +gomp_finish_task (struct gomp_task *task) +{ + gomp_sem_destroy (&task->taskwait_sem); +} + +/* team.c */ + +extern struct gomp_team *gomp_new_team (unsigned); +extern void gomp_team_start (void (*) (void *), void *, unsigned, + struct gomp_team *); +extern void gomp_team_end (void); + +/* work.c */ + +extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned); +extern void gomp_fini_work_share (struct gomp_work_share *); +extern bool gomp_work_share_start (bool); +extern void gomp_work_share_end (void); +extern void gomp_work_share_end_nowait (void); + +static inline void +gomp_work_share_init_done (void) +{ + struct gomp_thread *thr = gomp_thread (); + if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) + gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); +} + +#ifdef HAVE_ATTRIBUTE_VISIBILITY +# pragma GCC visibility pop +#endif + +/* Now that we're back to default visibility, include the globals. */ +#include "libgomp_g.h" + +/* Include omp.h by parts. */ +#include "omp-lock.h" +#define _LIBGOMP_OMP_LOCK_DEFINED 1 +#include "omp.h.in" + +#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ + || !defined (HAVE_ATTRIBUTE_ALIAS) \ + || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ + || !defined (PIC) \ + || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) +# undef LIBGOMP_GNU_SYMBOL_VERSIONING +#endif + +#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING +extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; +extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; +extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; +extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; +extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; +extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; +extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; + +extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; +extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; +extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; +extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; + +# define strong_alias(fn, al) \ + extern __typeof (fn) al __attribute__ ((alias (#fn))); +# define omp_lock_symver(fn) \ + __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ + __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); +#else +# define gomp_init_lock_30 omp_init_lock +# define gomp_destroy_lock_30 omp_destroy_lock +# define gomp_set_lock_30 omp_set_lock +# define gomp_unset_lock_30 omp_unset_lock +# define gomp_test_lock_30 omp_test_lock +# define gomp_init_nest_lock_30 omp_init_nest_lock +# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock +# define gomp_set_nest_lock_30 omp_set_nest_lock +# define gomp_unset_nest_lock_30 omp_unset_nest_lock +# define gomp_test_nest_lock_30 omp_test_nest_lock +#endif + +#ifdef HAVE_ATTRIBUTE_VISIBILITY +# define attribute_hidden __attribute__ ((visibility ("hidden"))) +#else +# define attribute_hidden +#endif + +#ifdef HAVE_ATTRIBUTE_ALIAS +# define ialias(fn) \ + extern __typeof (fn) gomp_ialias_##fn \ + __attribute__ ((alias (#fn))) attribute_hidden; +#else +# define ialias(fn) +#endif + +#endif /* LIBGOMP_H */ diff --git a/contrib/gcc-4.7/libgomp/libgomp.map b/contrib/gcc-4.7/libgomp/libgomp.map new file mode 100644 index 0000000000..7b051f96aa --- /dev/null +++ b/contrib/gcc-4.7/libgomp/libgomp.map @@ -0,0 +1,186 @@ +OMP_1.0 { + global: + omp_set_num_threads; + omp_get_num_threads; + omp_get_max_threads; + omp_get_thread_num; + omp_get_num_procs; + omp_in_parallel; + omp_set_dynamic; + omp_get_dynamic; + omp_set_nested; + omp_get_nested; +#ifdef HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT + # If the assembler used lacks the .symver directive or the linker + # doesn't support GNU symbol versioning, we have the same symbol in + # two versions, which Sun ld chokes on. + omp_init_lock; + omp_init_nest_lock; + omp_destroy_lock; + omp_destroy_nest_lock; + omp_set_lock; + omp_set_nest_lock; + omp_unset_lock; + omp_unset_nest_lock; + omp_test_lock; + omp_test_nest_lock; + omp_destroy_lock_; + omp_destroy_nest_lock_; + omp_init_lock_; + omp_init_nest_lock_; + omp_set_lock_; + omp_set_nest_lock_; + omp_test_lock_; + omp_test_nest_lock_; + omp_unset_lock_; + omp_unset_nest_lock_; +#endif + omp_get_dynamic_; + omp_get_max_threads_; + omp_get_nested_; + omp_get_num_procs_; + omp_get_num_threads_; + omp_get_thread_num_; + omp_in_parallel_; + omp_set_dynamic_; + omp_set_dynamic_8_; + omp_set_nested_; + omp_set_nested_8_; + omp_set_num_threads_; + omp_set_num_threads_8_; + local: + *; +}; + +OMP_2.0 { + global: + omp_get_wtick; + omp_get_wtime; + omp_get_wtick_; + omp_get_wtime_; +} OMP_1.0; + +OMP_3.0 { + global: + omp_set_schedule; + omp_set_schedule_; + omp_set_schedule_8_; + omp_get_schedule; + omp_get_schedule_; + omp_get_schedule_8_; + omp_get_thread_limit; + omp_get_thread_limit_; + omp_set_max_active_levels; + omp_set_max_active_levels_; + omp_set_max_active_levels_8_; + omp_get_max_active_levels; + omp_get_max_active_levels_; + omp_get_level; + omp_get_level_; + omp_get_ancestor_thread_num; + omp_get_ancestor_thread_num_; + omp_get_ancestor_thread_num_8_; + omp_get_team_size; + omp_get_team_size_; + omp_get_team_size_8_; + omp_get_active_level; + omp_get_active_level_; + omp_init_lock; + omp_init_nest_lock; + omp_destroy_lock; + omp_destroy_nest_lock; + omp_set_lock; + omp_set_nest_lock; + omp_unset_lock; + omp_unset_nest_lock; + omp_test_lock; + omp_test_nest_lock; + omp_destroy_lock_; + omp_destroy_nest_lock_; + omp_init_lock_; + omp_init_nest_lock_; + omp_set_lock_; + omp_set_nest_lock_; + omp_test_lock_; + omp_test_nest_lock_; + omp_unset_lock_; + omp_unset_nest_lock_; +} OMP_2.0; + +OMP_3.1 { + global: + omp_in_final; + omp_in_final_; +} OMP_3.0; + +GOMP_1.0 { + global: + GOMP_atomic_end; + GOMP_atomic_start; + GOMP_barrier; + GOMP_critical_end; + GOMP_critical_name_end; + GOMP_critical_name_start; + GOMP_critical_start; + GOMP_loop_dynamic_next; + GOMP_loop_dynamic_start; + GOMP_loop_end; + GOMP_loop_end_nowait; + GOMP_loop_guided_next; + GOMP_loop_guided_start; + GOMP_loop_ordered_dynamic_next; + GOMP_loop_ordered_dynamic_start; + GOMP_loop_ordered_guided_next; + GOMP_loop_ordered_guided_start; + GOMP_loop_ordered_runtime_next; + GOMP_loop_ordered_runtime_start; + GOMP_loop_ordered_static_next; + GOMP_loop_ordered_static_start; + GOMP_loop_runtime_next; + GOMP_loop_runtime_start; + GOMP_loop_static_next; + GOMP_loop_static_start; + GOMP_ordered_end; + GOMP_ordered_start; + GOMP_parallel_end; + GOMP_parallel_loop_dynamic_start; + GOMP_parallel_loop_guided_start; + GOMP_parallel_loop_runtime_start; + GOMP_parallel_loop_static_start; + GOMP_parallel_sections_start; + GOMP_parallel_start; + GOMP_sections_end; + GOMP_sections_end_nowait; + GOMP_sections_next; + GOMP_sections_start; + GOMP_single_copy_end; + GOMP_single_copy_start; + GOMP_single_start; +}; + +GOMP_2.0 { + global: + GOMP_task; + GOMP_taskwait; + GOMP_loop_ull_dynamic_next; + GOMP_loop_ull_dynamic_start; + GOMP_loop_ull_guided_next; + GOMP_loop_ull_guided_start; + GOMP_loop_ull_ordered_dynamic_next; + GOMP_loop_ull_ordered_dynamic_start; + GOMP_loop_ull_ordered_guided_next; + GOMP_loop_ull_ordered_guided_start; + GOMP_loop_ull_ordered_runtime_next; + GOMP_loop_ull_ordered_runtime_start; + GOMP_loop_ull_ordered_static_next; + GOMP_loop_ull_ordered_static_start; + GOMP_loop_ull_runtime_next; + GOMP_loop_ull_runtime_start; + GOMP_loop_ull_static_next; + GOMP_loop_ull_static_start; +} GOMP_1.0; + +GOMP_3.0 { + global: + GOMP_taskyield; +} GOMP_2.0; diff --git a/contrib/gcc-4.7/libgomp/libgomp.spec.in b/contrib/gcc-4.7/libgomp/libgomp.spec.in new file mode 100644 index 0000000000..b7319f33a5 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/libgomp.spec.in @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with -fopenmp. +*link_gomp: @link_gomp@ diff --git a/contrib/gcc-4.7/libgomp/libgomp_f.h.in b/contrib/gcc-4.7/libgomp/libgomp_f.h.in new file mode 100644 index 0000000000..ccb7e83947 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/libgomp_f.h.in @@ -0,0 +1,93 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Jakub Jelinek . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains prototypes of functions in the external ABI. + This file is included by files in the testsuite. */ + +#ifndef LIBGOMP_F_H +#define LIBGOMP_F_H 1 + +#include "libgomp.h" + +#if (@OMP_LOCK_SIZE@ == @OMP_LOCK_KIND@) \ + && (@OMP_LOCK_ALIGN@ <= @OMP_LOCK_SIZE@) +# define OMP_LOCK_DIRECT +typedef omp_lock_t *omp_lock_arg_t; +# define omp_lock_arg(arg) (arg) +#else +typedef union { omp_lock_t *lock; uint64_t u; } *omp_lock_arg_t; +# define omp_lock_arg(arg) ((arg)->lock) +# endif + +#if (@OMP_NEST_LOCK_SIZE@ == @OMP_NEST_LOCK_KIND@) \ + && (@OMP_NEST_LOCK_ALIGN@ <= @OMP_NEST_LOCK_SIZE@) +# define OMP_NEST_LOCK_DIRECT +typedef omp_nest_lock_t *omp_nest_lock_arg_t; +# define omp_nest_lock_arg(arg) (arg) +#else +typedef union { omp_nest_lock_t *lock; uint64_t u; } *omp_nest_lock_arg_t; +# define omp_nest_lock_arg(arg) ((arg)->lock) +# endif + +#if (@OMP_LOCK_25_SIZE@ == @OMP_LOCK_25_KIND@) \ + && (@OMP_LOCK_25_ALIGN@ <= @OMP_LOCK_25_SIZE@) +# define OMP_LOCK_25_DIRECT +typedef omp_lock_25_t *omp_lock_25_arg_t; +# define omp_lock_25_arg(arg) (arg) +#else +typedef union { omp_lock_25_t *lock; uint64_t u; } *omp_lock_25_arg_t; +# define omp_lock_25_arg(arg) ((arg)->lock) +# endif + +#if (@OMP_NEST_LOCK_25_SIZE@ == @OMP_NEST_LOCK_25_KIND@) \ + && (@OMP_NEST_LOCK_25_ALIGN@ <= @OMP_NEST_LOCK_25_SIZE@) +# define OMP_NEST_LOCK_25_DIRECT +typedef omp_nest_lock_25_t *omp_nest_lock_25_arg_t; +# define omp_nest_lock_25_arg(arg) (arg) +#else +typedef union { omp_nest_lock_25_t *lock; uint64_t u; } *omp_nest_lock_25_arg_t; +# define omp_nest_lock_25_arg(arg) ((arg)->lock) +# endif + +static inline void +omp_check_defines (void) +{ + char test[(@OMP_LOCK_SIZE@ != sizeof (omp_lock_t) + || @OMP_LOCK_ALIGN@ != __alignof (omp_lock_t) + || @OMP_NEST_LOCK_SIZE@ != sizeof (omp_nest_lock_t) + || @OMP_NEST_LOCK_ALIGN@ != __alignof (omp_nest_lock_t) + || @OMP_LOCK_KIND@ != sizeof (*(omp_lock_arg_t) 0) + || @OMP_NEST_LOCK_KIND@ != sizeof (*(omp_nest_lock_arg_t) 0)) + ? -1 : 1] __attribute__ ((__unused__)); + char test2[(@OMP_LOCK_25_SIZE@ != sizeof (omp_lock_25_t) + || @OMP_LOCK_25_ALIGN@ != __alignof (omp_lock_25_t) + || @OMP_NEST_LOCK_25_SIZE@ != sizeof (omp_nest_lock_25_t) + || @OMP_NEST_LOCK_25_ALIGN@ != __alignof (omp_nest_lock_25_t) + || @OMP_LOCK_25_KIND@ != sizeof (*(omp_lock_25_arg_t) 0) + || @OMP_NEST_LOCK_25_KIND@ + != sizeof (*(omp_nest_lock_25_arg_t) 0)) + ? -1 : 1] __attribute__ ((__unused__)); +} + +#endif /* LIBGOMP_F_H */ diff --git a/contrib/gcc-4.7/libgomp/libgomp_g.h b/contrib/gcc-4.7/libgomp/libgomp_g.h new file mode 100644 index 0000000000..8a7c31f0d2 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/libgomp_g.h @@ -0,0 +1,183 @@ +/* Copyright (C) 2005, 2007, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains prototypes of functions in the external ABI. + This file is included by files in the testsuite. */ + +#ifndef LIBGOMP_G_H +#define LIBGOMP_G_H 1 + +#include + +/* barrier.c */ + +extern void GOMP_barrier (void); + +/* critical.c */ + +extern void GOMP_critical_start (void); +extern void GOMP_critical_end (void); +extern void GOMP_critical_name_start (void **); +extern void GOMP_critical_name_end (void **); +extern void GOMP_atomic_start (void); +extern void GOMP_atomic_end (void); + +/* loop.c */ + +extern bool GOMP_loop_static_start (long, long, long, long, long *, long *); +extern bool GOMP_loop_dynamic_start (long, long, long, long, long *, long *); +extern bool GOMP_loop_guided_start (long, long, long, long, long *, long *); +extern bool GOMP_loop_runtime_start (long, long, long, long *, long *); + +extern bool GOMP_loop_ordered_static_start (long, long, long, long, + long *, long *); +extern bool GOMP_loop_ordered_dynamic_start (long, long, long, long, + long *, long *); +extern bool GOMP_loop_ordered_guided_start (long, long, long, long, + long *, long *); +extern bool GOMP_loop_ordered_runtime_start (long, long, long, long *, long *); + +extern bool GOMP_loop_static_next (long *, long *); +extern bool GOMP_loop_dynamic_next (long *, long *); +extern bool GOMP_loop_guided_next (long *, long *); +extern bool GOMP_loop_runtime_next (long *, long *); + +extern bool GOMP_loop_ordered_static_next (long *, long *); +extern bool GOMP_loop_ordered_dynamic_next (long *, long *); +extern bool GOMP_loop_ordered_guided_next (long *, long *); +extern bool GOMP_loop_ordered_runtime_next (long *, long *); + +extern void GOMP_parallel_loop_static_start (void (*)(void *), void *, + unsigned, long, long, long, long); +extern void GOMP_parallel_loop_dynamic_start (void (*)(void *), void *, + unsigned, long, long, long, long); +extern void GOMP_parallel_loop_guided_start (void (*)(void *), void *, + unsigned, long, long, long, long); +extern void GOMP_parallel_loop_runtime_start (void (*)(void *), void *, + unsigned, long, long, long); + +extern void GOMP_loop_end (void); +extern void GOMP_loop_end_nowait (void); + +/* loop_ull.c */ + +extern bool GOMP_loop_ull_static_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_dynamic_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_guided_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_runtime_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); + +extern bool GOMP_loop_ull_ordered_static_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_ordered_dynamic_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_ordered_guided_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_ordered_runtime_start (bool, unsigned long long, + unsigned long long, + unsigned long long, + unsigned long long *, + unsigned long long *); + +extern bool GOMP_loop_ull_static_next (unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_dynamic_next (unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_guided_next (unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_runtime_next (unsigned long long *, + unsigned long long *); + +extern bool GOMP_loop_ull_ordered_static_next (unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_ordered_dynamic_next (unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_ordered_guided_next (unsigned long long *, + unsigned long long *); +extern bool GOMP_loop_ull_ordered_runtime_next (unsigned long long *, + unsigned long long *); + +/* ordered.c */ + +extern void GOMP_ordered_start (void); +extern void GOMP_ordered_end (void); + +/* parallel.c */ + +extern void GOMP_parallel_start (void (*) (void *), void *, unsigned); +extern void GOMP_parallel_end (void); + +/* task.c */ + +extern void GOMP_task (void (*) (void *), void *, void (*) (void *, void *), + long, long, bool, unsigned); +extern void GOMP_taskwait (void); +extern void GOMP_taskyield (void); + +/* sections.c */ + +extern unsigned GOMP_sections_start (unsigned); +extern unsigned GOMP_sections_next (void); +extern void GOMP_parallel_sections_start (void (*) (void *), void *, + unsigned, unsigned); +extern void GOMP_sections_end (void); +extern void GOMP_sections_end_nowait (void); + +/* single.c */ + +extern bool GOMP_single_start (void); +extern void *GOMP_single_copy_start (void); +extern void GOMP_single_copy_end (void *); + +#endif /* LIBGOMP_G_H */ diff --git a/contrib/gcc-4.7/libgomp/loop.c b/contrib/gcc-4.7/libgomp/loop.c new file mode 100644 index 0000000000..ca389214c2 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/loop.c @@ -0,0 +1,620 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the LOOP (FOR/DO) construct. */ + +#include +#include +#include "libgomp.h" + + +/* Initialize the given work share construct from the given arguments. */ + +static inline void +gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr, + enum gomp_schedule_type sched, long chunk_size) +{ + ws->sched = sched; + ws->chunk_size = chunk_size; + /* Canonicalize loops that have zero iterations to ->next == ->end. */ + ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end)) + ? start : end; + ws->incr = incr; + ws->next = start; + if (sched == GFS_DYNAMIC) + { + ws->chunk_size *= incr; + +#ifdef HAVE_SYNC_BUILTINS + { + /* For dynamic scheduling prepare things to make each iteration + faster. */ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + long nthreads = team ? team->nthreads : 1; + + if (__builtin_expect (incr > 0, 1)) + { + /* Cheap overflow protection. */ + if (__builtin_expect ((nthreads | ws->chunk_size) + >= 1UL << (sizeof (long) + * __CHAR_BIT__ / 2 - 1), 0)) + ws->mode = 0; + else + ws->mode = ws->end < (LONG_MAX + - (nthreads + 1) * ws->chunk_size); + } + /* Cheap overflow protection. */ + else if (__builtin_expect ((nthreads | -ws->chunk_size) + >= 1UL << (sizeof (long) + * __CHAR_BIT__ / 2 - 1), 0)) + ws->mode = 0; + else + ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX; + } +#endif + } +} + +/* The *_start routines are called when first encountering a loop construct + that is not bound directly to a parallel construct. The first thread + that arrives will create the work-share construct; subsequent threads + will see the construct exists and allocate work from it. + + START, END, INCR are the bounds of the loop; due to the restrictions of + OpenMP, these values must be the same in every thread. This is not + verified (nor is it entirely verifiable, since START is not necessarily + retained intact in the work-share data structure). CHUNK_SIZE is the + scheduling parameter; again this must be identical in all threads. + + Returns true if there's any work for this thread to perform. If so, + *ISTART and *IEND are filled with the bounds of the iteration block + allocated to this thread. Returns false if all work was assigned to + other threads prior to this thread's arrival. */ + +static bool +gomp_loop_static_start (long start, long end, long incr, long chunk_size, + long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + thr->ts.static_trip = 0; + if (gomp_work_share_start (false)) + { + gomp_loop_init (thr->ts.work_share, start, end, incr, + GFS_STATIC, chunk_size); + gomp_work_share_init_done (); + } + + return !gomp_iter_static_next (istart, iend); +} + +static bool +gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size, + long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (false)) + { + gomp_loop_init (thr->ts.work_share, start, end, incr, + GFS_DYNAMIC, chunk_size); + gomp_work_share_init_done (); + } + +#ifdef HAVE_SYNC_BUILTINS + ret = gomp_iter_dynamic_next (istart, iend); +#else + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_dynamic_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +static bool +gomp_loop_guided_start (long start, long end, long incr, long chunk_size, + long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (false)) + { + gomp_loop_init (thr->ts.work_share, start, end, incr, + GFS_GUIDED, chunk_size); + gomp_work_share_init_done (); + } + +#ifdef HAVE_SYNC_BUILTINS + ret = gomp_iter_guided_next (istart, iend); +#else + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_guided_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +bool +GOMP_loop_runtime_start (long start, long end, long incr, + long *istart, long *iend) +{ + struct gomp_task_icv *icv = gomp_icv (false); + switch (icv->run_sched_var) + { + case GFS_STATIC: + return gomp_loop_static_start (start, end, incr, icv->run_sched_modifier, + istart, iend); + case GFS_DYNAMIC: + return gomp_loop_dynamic_start (start, end, incr, icv->run_sched_modifier, + istart, iend); + case GFS_GUIDED: + return gomp_loop_guided_start (start, end, incr, icv->run_sched_modifier, + istart, iend); + case GFS_AUTO: + /* For now map to schedule(static), later on we could play with feedback + driven choice. */ + return gomp_loop_static_start (start, end, incr, 0, istart, iend); + default: + abort (); + } +} + +/* The *_ordered_*_start routines are similar. The only difference is that + this work-share construct is initialized to expect an ORDERED section. */ + +static bool +gomp_loop_ordered_static_start (long start, long end, long incr, + long chunk_size, long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + thr->ts.static_trip = 0; + if (gomp_work_share_start (true)) + { + gomp_loop_init (thr->ts.work_share, start, end, incr, + GFS_STATIC, chunk_size); + gomp_ordered_static_init (); + gomp_work_share_init_done (); + } + + return !gomp_iter_static_next (istart, iend); +} + +static bool +gomp_loop_ordered_dynamic_start (long start, long end, long incr, + long chunk_size, long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (true)) + { + gomp_loop_init (thr->ts.work_share, start, end, incr, + GFS_DYNAMIC, chunk_size); + gomp_mutex_lock (&thr->ts.work_share->lock); + gomp_work_share_init_done (); + } + else + gomp_mutex_lock (&thr->ts.work_share->lock); + + ret = gomp_iter_dynamic_next_locked (istart, iend); + if (ret) + gomp_ordered_first (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +static bool +gomp_loop_ordered_guided_start (long start, long end, long incr, + long chunk_size, long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (true)) + { + gomp_loop_init (thr->ts.work_share, start, end, incr, + GFS_GUIDED, chunk_size); + gomp_mutex_lock (&thr->ts.work_share->lock); + gomp_work_share_init_done (); + } + else + gomp_mutex_lock (&thr->ts.work_share->lock); + + ret = gomp_iter_guided_next_locked (istart, iend); + if (ret) + gomp_ordered_first (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +bool +GOMP_loop_ordered_runtime_start (long start, long end, long incr, + long *istart, long *iend) +{ + struct gomp_task_icv *icv = gomp_icv (false); + switch (icv->run_sched_var) + { + case GFS_STATIC: + return gomp_loop_ordered_static_start (start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_DYNAMIC: + return gomp_loop_ordered_dynamic_start (start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_GUIDED: + return gomp_loop_ordered_guided_start (start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_AUTO: + /* For now map to schedule(static), later on we could play with feedback + driven choice. */ + return gomp_loop_ordered_static_start (start, end, incr, + 0, istart, iend); + default: + abort (); + } +} + +/* The *_next routines are called when the thread completes processing of + the iteration block currently assigned to it. If the work-share + construct is bound directly to a parallel construct, then the iteration + bounds may have been set up before the parallel. In which case, this + may be the first iteration for the thread. + + Returns true if there is work remaining to be performed; *ISTART and + *IEND are filled with a new iteration block. Returns false if all work + has been assigned. */ + +static bool +gomp_loop_static_next (long *istart, long *iend) +{ + return !gomp_iter_static_next (istart, iend); +} + +static bool +gomp_loop_dynamic_next (long *istart, long *iend) +{ + bool ret; + +#ifdef HAVE_SYNC_BUILTINS + ret = gomp_iter_dynamic_next (istart, iend); +#else + struct gomp_thread *thr = gomp_thread (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_dynamic_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +static bool +gomp_loop_guided_next (long *istart, long *iend) +{ + bool ret; + +#ifdef HAVE_SYNC_BUILTINS + ret = gomp_iter_guided_next (istart, iend); +#else + struct gomp_thread *thr = gomp_thread (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_guided_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +bool +GOMP_loop_runtime_next (long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + switch (thr->ts.work_share->sched) + { + case GFS_STATIC: + case GFS_AUTO: + return gomp_loop_static_next (istart, iend); + case GFS_DYNAMIC: + return gomp_loop_dynamic_next (istart, iend); + case GFS_GUIDED: + return gomp_loop_guided_next (istart, iend); + default: + abort (); + } +} + +/* The *_ordered_*_next routines are called when the thread completes + processing of the iteration block currently assigned to it. + + Returns true if there is work remaining to be performed; *ISTART and + *IEND are filled with a new iteration block. Returns false if all work + has been assigned. */ + +static bool +gomp_loop_ordered_static_next (long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + int test; + + gomp_ordered_sync (); + gomp_mutex_lock (&thr->ts.work_share->lock); + test = gomp_iter_static_next (istart, iend); + if (test >= 0) + gomp_ordered_static_next (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return test == 0; +} + +static bool +gomp_loop_ordered_dynamic_next (long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + gomp_ordered_sync (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_dynamic_next_locked (istart, iend); + if (ret) + gomp_ordered_next (); + else + gomp_ordered_last (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +static bool +gomp_loop_ordered_guided_next (long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + gomp_ordered_sync (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_guided_next_locked (istart, iend); + if (ret) + gomp_ordered_next (); + else + gomp_ordered_last (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +bool +GOMP_loop_ordered_runtime_next (long *istart, long *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + switch (thr->ts.work_share->sched) + { + case GFS_STATIC: + case GFS_AUTO: + return gomp_loop_ordered_static_next (istart, iend); + case GFS_DYNAMIC: + return gomp_loop_ordered_dynamic_next (istart, iend); + case GFS_GUIDED: + return gomp_loop_ordered_guided_next (istart, iend); + default: + abort (); + } +} + +/* The GOMP_parallel_loop_* routines pre-initialize a work-share construct + to avoid one synchronization once we get into the loop. */ + +static void +gomp_parallel_loop_start (void (*fn) (void *), void *data, + unsigned num_threads, long start, long end, + long incr, enum gomp_schedule_type sched, + long chunk_size) +{ + struct gomp_team *team; + + num_threads = gomp_resolve_num_threads (num_threads, 0); + team = gomp_new_team (num_threads); + gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size); + gomp_team_start (fn, data, num_threads, team); +} + +void +GOMP_parallel_loop_static_start (void (*fn) (void *), void *data, + unsigned num_threads, long start, long end, + long incr, long chunk_size) +{ + gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, + GFS_STATIC, chunk_size); +} + +void +GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data, + unsigned num_threads, long start, long end, + long incr, long chunk_size) +{ + gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, + GFS_DYNAMIC, chunk_size); +} + +void +GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data, + unsigned num_threads, long start, long end, + long incr, long chunk_size) +{ + gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, + GFS_GUIDED, chunk_size); +} + +void +GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data, + unsigned num_threads, long start, long end, + long incr) +{ + struct gomp_task_icv *icv = gomp_icv (false); + gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, + icv->run_sched_var, icv->run_sched_modifier); +} + +/* The GOMP_loop_end* routines are called after the thread is told that + all loop iterations are complete. This first version synchronizes + all threads; the nowait version does not. */ + +void +GOMP_loop_end (void) +{ + gomp_work_share_end (); +} + +void +GOMP_loop_end_nowait (void) +{ + gomp_work_share_end_nowait (); +} + + +/* We use static functions above so that we're sure that the "runtime" + function can defer to the proper routine without interposition. We + export the static function with a strong alias when possible, or with + a wrapper function otherwise. */ + +#ifdef HAVE_ATTRIBUTE_ALIAS +extern __typeof(gomp_loop_static_start) GOMP_loop_static_start + __attribute__((alias ("gomp_loop_static_start"))); +extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start + __attribute__((alias ("gomp_loop_dynamic_start"))); +extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start + __attribute__((alias ("gomp_loop_guided_start"))); + +extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start + __attribute__((alias ("gomp_loop_ordered_static_start"))); +extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start + __attribute__((alias ("gomp_loop_ordered_dynamic_start"))); +extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start + __attribute__((alias ("gomp_loop_ordered_guided_start"))); + +extern __typeof(gomp_loop_static_next) GOMP_loop_static_next + __attribute__((alias ("gomp_loop_static_next"))); +extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next + __attribute__((alias ("gomp_loop_dynamic_next"))); +extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next + __attribute__((alias ("gomp_loop_guided_next"))); + +extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next + __attribute__((alias ("gomp_loop_ordered_static_next"))); +extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next + __attribute__((alias ("gomp_loop_ordered_dynamic_next"))); +extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next + __attribute__((alias ("gomp_loop_ordered_guided_next"))); +#else +bool +GOMP_loop_static_start (long start, long end, long incr, long chunk_size, + long *istart, long *iend) +{ + return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend); +} + +bool +GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size, + long *istart, long *iend) +{ + return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend); +} + +bool +GOMP_loop_guided_start (long start, long end, long incr, long chunk_size, + long *istart, long *iend) +{ + return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend); +} + +bool +GOMP_loop_ordered_static_start (long start, long end, long incr, + long chunk_size, long *istart, long *iend) +{ + return gomp_loop_ordered_static_start (start, end, incr, chunk_size, + istart, iend); +} + +bool +GOMP_loop_ordered_dynamic_start (long start, long end, long incr, + long chunk_size, long *istart, long *iend) +{ + return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size, + istart, iend); +} + +bool +GOMP_loop_ordered_guided_start (long start, long end, long incr, + long chunk_size, long *istart, long *iend) +{ + return gomp_loop_ordered_guided_start (start, end, incr, chunk_size, + istart, iend); +} + +bool +GOMP_loop_static_next (long *istart, long *iend) +{ + return gomp_loop_static_next (istart, iend); +} + +bool +GOMP_loop_dynamic_next (long *istart, long *iend) +{ + return gomp_loop_dynamic_next (istart, iend); +} + +bool +GOMP_loop_guided_next (long *istart, long *iend) +{ + return gomp_loop_guided_next (istart, iend); +} + +bool +GOMP_loop_ordered_static_next (long *istart, long *iend) +{ + return gomp_loop_ordered_static_next (istart, iend); +} + +bool +GOMP_loop_ordered_dynamic_next (long *istart, long *iend) +{ + return gomp_loop_ordered_dynamic_next (istart, iend); +} + +bool +GOMP_loop_ordered_guided_next (long *istart, long *iend) +{ + return gomp_loop_ordered_guided_next (istart, iend); +} +#endif diff --git a/contrib/gcc-4.7/libgomp/loop_ull.c b/contrib/gcc-4.7/libgomp/loop_ull.c new file mode 100644 index 0000000000..82da2d56ec --- /dev/null +++ b/contrib/gcc-4.7/libgomp/loop_ull.c @@ -0,0 +1,571 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the LOOP (FOR/DO) construct. */ + +#include +#include +#include "libgomp.h" + +typedef unsigned long long gomp_ull; + +/* Initialize the given work share construct from the given arguments. */ + +static inline void +gomp_loop_ull_init (struct gomp_work_share *ws, bool up, gomp_ull start, + gomp_ull end, gomp_ull incr, enum gomp_schedule_type sched, + gomp_ull chunk_size) +{ + ws->sched = sched; + ws->chunk_size_ull = chunk_size; + /* Canonicalize loops that have zero iterations to ->next == ->end. */ + ws->end_ull = ((up && start > end) || (!up && start < end)) + ? start : end; + ws->incr_ull = incr; + ws->next_ull = start; + ws->mode = 0; + if (sched == GFS_DYNAMIC) + { + ws->chunk_size_ull *= incr; + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ + { + /* For dynamic scheduling prepare things to make each iteration + faster. */ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + long nthreads = team ? team->nthreads : 1; + + if (__builtin_expect (up, 1)) + { + /* Cheap overflow protection. */ + if (__builtin_expect ((nthreads | ws->chunk_size_ull) + < 1ULL << (sizeof (gomp_ull) + * __CHAR_BIT__ / 2 - 1), 1)) + ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1 + - (nthreads + 1) * ws->chunk_size_ull); + } + /* Cheap overflow protection. */ + else if (__builtin_expect ((nthreads | -ws->chunk_size_ull) + < 1ULL << (sizeof (gomp_ull) + * __CHAR_BIT__ / 2 - 1), 1)) + ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull + - (__LONG_LONG_MAX__ * 2ULL + 1)); + } +#endif + } + if (!up) + ws->mode |= 2; +} + +/* The *_start routines are called when first encountering a loop construct + that is not bound directly to a parallel construct. The first thread + that arrives will create the work-share construct; subsequent threads + will see the construct exists and allocate work from it. + + START, END, INCR are the bounds of the loop; due to the restrictions of + OpenMP, these values must be the same in every thread. This is not + verified (nor is it entirely verifiable, since START is not necessarily + retained intact in the work-share data structure). CHUNK_SIZE is the + scheduling parameter; again this must be identical in all threads. + + Returns true if there's any work for this thread to perform. If so, + *ISTART and *IEND are filled with the bounds of the iteration block + allocated to this thread. Returns false if all work was assigned to + other threads prior to this thread's arrival. */ + +static bool +gomp_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + thr->ts.static_trip = 0; + if (gomp_work_share_start (false)) + { + gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, + GFS_STATIC, chunk_size); + gomp_work_share_init_done (); + } + + return !gomp_iter_ull_static_next (istart, iend); +} + +static bool +gomp_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (false)) + { + gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, + GFS_DYNAMIC, chunk_size); + gomp_work_share_init_done (); + } + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ + ret = gomp_iter_ull_dynamic_next (istart, iend); +#else + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_ull_dynamic_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +static bool +gomp_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (false)) + { + gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, + GFS_GUIDED, chunk_size); + gomp_work_share_init_done (); + } + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ + ret = gomp_iter_ull_guided_next (istart, iend); +#else + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_ull_guided_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +bool +GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_task_icv *icv = gomp_icv (false); + switch (icv->run_sched_var) + { + case GFS_STATIC: + return gomp_loop_ull_static_start (up, start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_DYNAMIC: + return gomp_loop_ull_dynamic_start (up, start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_GUIDED: + return gomp_loop_ull_guided_start (up, start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_AUTO: + /* For now map to schedule(static), later on we could play with feedback + driven choice. */ + return gomp_loop_ull_static_start (up, start, end, incr, + 0, istart, iend); + default: + abort (); + } +} + +/* The *_ordered_*_start routines are similar. The only difference is that + this work-share construct is initialized to expect an ORDERED section. */ + +static bool +gomp_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + thr->ts.static_trip = 0; + if (gomp_work_share_start (true)) + { + gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, + GFS_STATIC, chunk_size); + gomp_ordered_static_init (); + gomp_work_share_init_done (); + } + + return !gomp_iter_ull_static_next (istart, iend); +} + +static bool +gomp_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (true)) + { + gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, + GFS_DYNAMIC, chunk_size); + gomp_mutex_lock (&thr->ts.work_share->lock); + gomp_work_share_init_done (); + } + else + gomp_mutex_lock (&thr->ts.work_share->lock); + + ret = gomp_iter_ull_dynamic_next_locked (istart, iend); + if (ret) + gomp_ordered_first (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +static bool +gomp_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + if (gomp_work_share_start (true)) + { + gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, + GFS_GUIDED, chunk_size); + gomp_mutex_lock (&thr->ts.work_share->lock); + gomp_work_share_init_done (); + } + else + gomp_mutex_lock (&thr->ts.work_share->lock); + + ret = gomp_iter_ull_guided_next_locked (istart, iend); + if (ret) + gomp_ordered_first (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +bool +GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull *istart, + gomp_ull *iend) +{ + struct gomp_task_icv *icv = gomp_icv (false); + switch (icv->run_sched_var) + { + case GFS_STATIC: + return gomp_loop_ull_ordered_static_start (up, start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_DYNAMIC: + return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_GUIDED: + return gomp_loop_ull_ordered_guided_start (up, start, end, incr, + icv->run_sched_modifier, + istart, iend); + case GFS_AUTO: + /* For now map to schedule(static), later on we could play with feedback + driven choice. */ + return gomp_loop_ull_ordered_static_start (up, start, end, incr, + 0, istart, iend); + default: + abort (); + } +} + +/* The *_next routines are called when the thread completes processing of + the iteration block currently assigned to it. If the work-share + construct is bound directly to a parallel construct, then the iteration + bounds may have been set up before the parallel. In which case, this + may be the first iteration for the thread. + + Returns true if there is work remaining to be performed; *ISTART and + *IEND are filled with a new iteration block. Returns false if all work + has been assigned. */ + +static bool +gomp_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend) +{ + return !gomp_iter_ull_static_next (istart, iend); +} + +static bool +gomp_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend) +{ + bool ret; + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ + ret = gomp_iter_ull_dynamic_next (istart, iend); +#else + struct gomp_thread *thr = gomp_thread (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_ull_dynamic_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +static bool +gomp_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend) +{ + bool ret; + +#if defined HAVE_SYNC_BUILTINS && defined __LP64__ + ret = gomp_iter_ull_guided_next (istart, iend); +#else + struct gomp_thread *thr = gomp_thread (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_ull_guided_next_locked (istart, iend); + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +bool +GOMP_loop_ull_runtime_next (gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + switch (thr->ts.work_share->sched) + { + case GFS_STATIC: + case GFS_AUTO: + return gomp_loop_ull_static_next (istart, iend); + case GFS_DYNAMIC: + return gomp_loop_ull_dynamic_next (istart, iend); + case GFS_GUIDED: + return gomp_loop_ull_guided_next (istart, iend); + default: + abort (); + } +} + +/* The *_ordered_*_next routines are called when the thread completes + processing of the iteration block currently assigned to it. + + Returns true if there is work remaining to be performed; *ISTART and + *IEND are filled with a new iteration block. Returns false if all work + has been assigned. */ + +static bool +gomp_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + int test; + + gomp_ordered_sync (); + gomp_mutex_lock (&thr->ts.work_share->lock); + test = gomp_iter_ull_static_next (istart, iend); + if (test >= 0) + gomp_ordered_static_next (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return test == 0; +} + +static bool +gomp_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + gomp_ordered_sync (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_ull_dynamic_next_locked (istart, iend); + if (ret) + gomp_ordered_next (); + else + gomp_ordered_last (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +static bool +gomp_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + bool ret; + + gomp_ordered_sync (); + gomp_mutex_lock (&thr->ts.work_share->lock); + ret = gomp_iter_ull_guided_next_locked (istart, iend); + if (ret) + gomp_ordered_next (); + else + gomp_ordered_last (); + gomp_mutex_unlock (&thr->ts.work_share->lock); + + return ret; +} + +bool +GOMP_loop_ull_ordered_runtime_next (gomp_ull *istart, gomp_ull *iend) +{ + struct gomp_thread *thr = gomp_thread (); + + switch (thr->ts.work_share->sched) + { + case GFS_STATIC: + case GFS_AUTO: + return gomp_loop_ull_ordered_static_next (istart, iend); + case GFS_DYNAMIC: + return gomp_loop_ull_ordered_dynamic_next (istart, iend); + case GFS_GUIDED: + return gomp_loop_ull_ordered_guided_next (istart, iend); + default: + abort (); + } +} + +/* We use static functions above so that we're sure that the "runtime" + function can defer to the proper routine without interposition. We + export the static function with a strong alias when possible, or with + a wrapper function otherwise. */ + +#ifdef HAVE_ATTRIBUTE_ALIAS +extern __typeof(gomp_loop_ull_static_start) GOMP_loop_ull_static_start + __attribute__((alias ("gomp_loop_ull_static_start"))); +extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start + __attribute__((alias ("gomp_loop_ull_dynamic_start"))); +extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start + __attribute__((alias ("gomp_loop_ull_guided_start"))); + +extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start + __attribute__((alias ("gomp_loop_ull_ordered_static_start"))); +extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynamic_start + __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start"))); +extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start + __attribute__((alias ("gomp_loop_ull_ordered_guided_start"))); + +extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next + __attribute__((alias ("gomp_loop_ull_static_next"))); +extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next + __attribute__((alias ("gomp_loop_ull_dynamic_next"))); +extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next + __attribute__((alias ("gomp_loop_ull_guided_next"))); + +extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next + __attribute__((alias ("gomp_loop_ull_ordered_static_next"))); +extern __typeof(gomp_loop_ull_ordered_dynamic_next) GOMP_loop_ull_ordered_dynamic_next + __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next"))); +extern __typeof(gomp_loop_ull_ordered_guided_next) GOMP_loop_ull_ordered_guided_next + __attribute__((alias ("gomp_loop_ull_ordered_guided_next"))); +#else +bool +GOMP_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_static_start (up, start, end, incr, chunk_size, istart, + iend); +} + +bool +GOMP_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart, + iend); +} + +bool +GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart, + iend); +} + +bool +GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_ordered_static_start (up, start, end, incr, chunk_size, + istart, iend); +} + +bool +GOMP_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, chunk_size, + istart, iend); +} + +bool +GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end, + gomp_ull incr, gomp_ull chunk_size, + gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_ordered_guided_start (up, start, end, incr, chunk_size, + istart, iend); +} + +bool +GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_static_next (istart, iend); +} + +bool +GOMP_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_dynamic_next (istart, iend); +} + +bool +GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_guided_next (istart, iend); +} + +bool +GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_ordered_static_next (istart, iend); +} + +bool +GOMP_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_ordered_dynamic_next (istart, iend); +} + +bool +GOMP_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend) +{ + return gomp_loop_ull_ordered_guided_next (istart, iend); +} +#endif diff --git a/contrib/gcc-4.7/libgomp/omp.h.in b/contrib/gcc-4.7/libgomp/omp.h.in new file mode 100644 index 0000000000..f2d7ba4e11 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/omp.h.in @@ -0,0 +1,107 @@ +/* Copyright (C) 2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef OMP_H +#define OMP_H 1 + +#ifndef _LIBGOMP_OMP_LOCK_DEFINED +#define _LIBGOMP_OMP_LOCK_DEFINED 1 +/* These two structures get edited by the libgomp build process to + reflect the shape of the two types. Their internals are private + to the library. */ + +typedef struct +{ + unsigned char _x[@OMP_LOCK_SIZE@] + __attribute__((__aligned__(@OMP_LOCK_ALIGN@))); +} omp_lock_t; + +typedef struct +{ + unsigned char _x[@OMP_NEST_LOCK_SIZE@] + __attribute__((__aligned__(@OMP_NEST_LOCK_ALIGN@))); +} omp_nest_lock_t; +#endif + +typedef enum omp_sched_t +{ + omp_sched_static = 1, + omp_sched_dynamic = 2, + omp_sched_guided = 3, + omp_sched_auto = 4 +} omp_sched_t; + +#ifdef __cplusplus +extern "C" { +# define __GOMP_NOTHROW throw () +#else +# define __GOMP_NOTHROW __attribute__((__nothrow__)) +#endif + +extern void omp_set_num_threads (int) __GOMP_NOTHROW; +extern int omp_get_num_threads (void) __GOMP_NOTHROW; +extern int omp_get_max_threads (void) __GOMP_NOTHROW; +extern int omp_get_thread_num (void) __GOMP_NOTHROW; +extern int omp_get_num_procs (void) __GOMP_NOTHROW; + +extern int omp_in_parallel (void) __GOMP_NOTHROW; + +extern void omp_set_dynamic (int) __GOMP_NOTHROW; +extern int omp_get_dynamic (void) __GOMP_NOTHROW; + +extern void omp_set_nested (int) __GOMP_NOTHROW; +extern int omp_get_nested (void) __GOMP_NOTHROW; + +extern void omp_init_lock (omp_lock_t *) __GOMP_NOTHROW; +extern void omp_destroy_lock (omp_lock_t *) __GOMP_NOTHROW; +extern void omp_set_lock (omp_lock_t *) __GOMP_NOTHROW; +extern void omp_unset_lock (omp_lock_t *) __GOMP_NOTHROW; +extern int omp_test_lock (omp_lock_t *) __GOMP_NOTHROW; + +extern void omp_init_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void omp_destroy_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void omp_set_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void omp_unset_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern int omp_test_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; + +extern double omp_get_wtime (void) __GOMP_NOTHROW; +extern double omp_get_wtick (void) __GOMP_NOTHROW; + +void omp_set_schedule (omp_sched_t, int) __GOMP_NOTHROW; +void omp_get_schedule (omp_sched_t *, int *) __GOMP_NOTHROW; +int omp_get_thread_limit (void) __GOMP_NOTHROW; +void omp_set_max_active_levels (int) __GOMP_NOTHROW; +int omp_get_max_active_levels (void) __GOMP_NOTHROW; +int omp_get_level (void) __GOMP_NOTHROW; +int omp_get_ancestor_thread_num (int) __GOMP_NOTHROW; +int omp_get_team_size (int) __GOMP_NOTHROW; +int omp_get_active_level (void) __GOMP_NOTHROW; + +int omp_in_final (void) __GOMP_NOTHROW; + +#ifdef __cplusplus +} +#endif + +#endif /* OMP_H */ diff --git a/contrib/gcc-4.7/libgomp/omp_lib.f90.in b/contrib/gcc-4.7/libgomp/omp_lib.f90.in new file mode 100644 index 0000000000..d00fa0551f --- /dev/null +++ b/contrib/gcc-4.7/libgomp/omp_lib.f90.in @@ -0,0 +1,299 @@ +! Copyright (C) 2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc. +! Contributed by Jakub Jelinek . + +! This file is part of the GNU OpenMP Library (libgomp). + +! Libgomp is free software; you can redistribute it and/or modify it +! under the terms of the GNU General Public License as published by +! the Free Software Foundation; either version 3, or (at your option) +! any later version. + +! Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY +! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +! FOR A PARTICULAR PURPOSE. See the GNU General Public License for +! more details. + +! Under Section 7 of GPL version 3, you are granted additional +! permissions described in the GCC Runtime Library Exception, version +! 3.1, as published by the Free Software Foundation. + +! You should have received a copy of the GNU General Public License and +! a copy of the GCC Runtime Library Exception along with this program; +! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +! . + + module omp_lib_kinds + implicit none + integer, parameter :: omp_lock_kind = @OMP_LOCK_KIND@ + integer, parameter :: omp_nest_lock_kind = @OMP_NEST_LOCK_KIND@ + integer, parameter :: omp_sched_kind = 4 + end module + + module omp_lib + use omp_lib_kinds + implicit none + integer, parameter :: openmp_version = 201107 + integer (omp_sched_kind), parameter :: omp_sched_static = 1 + integer (omp_sched_kind), parameter :: omp_sched_dynamic = 2 + integer (omp_sched_kind), parameter :: omp_sched_guided = 3 + integer (omp_sched_kind), parameter :: omp_sched_auto = 4 + + interface + subroutine omp_init_lock (lock) + use omp_lib_kinds + integer (omp_lock_kind), intent (out) :: lock + end subroutine omp_init_lock + end interface + + interface + subroutine omp_init_nest_lock (lock) + use omp_lib_kinds + integer (omp_nest_lock_kind), intent (out) :: lock + end subroutine omp_init_nest_lock + end interface + + interface + subroutine omp_destroy_lock (lock) + use omp_lib_kinds + integer (omp_lock_kind), intent (inout) :: lock + end subroutine omp_destroy_lock + end interface + + interface + subroutine omp_destroy_nest_lock (lock) + use omp_lib_kinds + integer (omp_nest_lock_kind), intent (inout) :: lock + end subroutine omp_destroy_nest_lock + end interface + + interface + subroutine omp_set_lock (lock) + use omp_lib_kinds + integer (omp_lock_kind), intent (inout) :: lock + end subroutine omp_set_lock + end interface + + interface + subroutine omp_set_nest_lock (lock) + use omp_lib_kinds + integer (omp_nest_lock_kind), intent (inout) :: lock + end subroutine omp_set_nest_lock + end interface + + interface + subroutine omp_unset_lock (lock) + use omp_lib_kinds + integer (omp_lock_kind), intent (inout) :: lock + end subroutine omp_unset_lock + end interface + + interface + subroutine omp_unset_nest_lock (lock) + use omp_lib_kinds + integer (omp_nest_lock_kind), intent (inout) :: lock + end subroutine omp_unset_nest_lock + end interface + + interface omp_set_dynamic + subroutine omp_set_dynamic (set) + logical (4), intent (in) :: set + end subroutine omp_set_dynamic + subroutine omp_set_dynamic_8 (set) + logical (8), intent (in) :: set + end subroutine omp_set_dynamic_8 + end interface + + interface omp_set_nested + subroutine omp_set_nested (set) + logical (4), intent (in) :: set + end subroutine omp_set_nested + subroutine omp_set_nested_8 (set) + logical (8), intent (in) :: set + end subroutine omp_set_nested_8 + end interface + + interface omp_set_num_threads + subroutine omp_set_num_threads (set) + integer (4), intent (in) :: set + end subroutine omp_set_num_threads + subroutine omp_set_num_threads_8 (set) + integer (8), intent (in) :: set + end subroutine omp_set_num_threads_8 + end interface + + interface + function omp_get_dynamic () + use omp_lib_kinds + logical (4) :: omp_get_dynamic + end function omp_get_dynamic + end interface + + interface + function omp_get_nested () + use omp_lib_kinds + logical (4) :: omp_get_nested + end function omp_get_nested + end interface + + interface + function omp_in_parallel () + use omp_lib_kinds + logical (4) :: omp_in_parallel + end function omp_in_parallel + end interface + + interface + function omp_test_lock (lock) + use omp_lib_kinds + logical (4) :: omp_test_lock + integer (omp_lock_kind), intent (inout) :: lock + end function omp_test_lock + end interface + + interface + function omp_get_max_threads () + use omp_lib_kinds + integer (4) :: omp_get_max_threads + end function omp_get_max_threads + end interface + + interface + function omp_get_num_procs () + use omp_lib_kinds + integer (4) :: omp_get_num_procs + end function omp_get_num_procs + end interface + + interface + function omp_get_num_threads () + use omp_lib_kinds + integer (4) :: omp_get_num_threads + end function omp_get_num_threads + end interface + + interface + function omp_get_thread_num () + use omp_lib_kinds + integer (4) :: omp_get_thread_num + end function omp_get_thread_num + end interface + + interface + function omp_test_nest_lock (lock) + use omp_lib_kinds + integer (4) :: omp_test_nest_lock + integer (omp_nest_lock_kind), intent (inout) :: lock + end function omp_test_nest_lock + end interface + + interface + function omp_get_wtick () + double precision :: omp_get_wtick + end function omp_get_wtick + end interface + + interface + function omp_get_wtime () + double precision :: omp_get_wtime + end function omp_get_wtime + end interface + + interface omp_set_schedule + subroutine omp_set_schedule (kind, modifier) + use omp_lib_kinds + integer (omp_sched_kind), intent (in) :: kind + integer (4), intent (in) :: modifier + end subroutine omp_set_schedule + subroutine omp_set_schedule_8 (kind, modifier) + use omp_lib_kinds + integer (omp_sched_kind), intent (in) :: kind + integer (8), intent (in) :: modifier + end subroutine omp_set_schedule_8 + end interface + + interface omp_get_schedule + subroutine omp_get_schedule (kind, modifier) + use omp_lib_kinds + integer (omp_sched_kind), intent (out) :: kind + integer (4), intent (out) :: modifier + end subroutine omp_get_schedule + subroutine omp_get_schedule_8 (kind, modifier) + use omp_lib_kinds + integer (omp_sched_kind), intent (out) :: kind + integer (8), intent (out) :: modifier + end subroutine omp_get_schedule_8 + end interface + + interface + function omp_get_thread_limit () + use omp_lib_kinds + integer (4) :: omp_get_thread_limit + end function omp_get_thread_limit + end interface + + interface omp_set_max_active_levels + subroutine omp_set_max_active_levels (max_levels) + use omp_lib_kinds + integer (4), intent (in) :: max_levels + end subroutine omp_set_max_active_levels + subroutine omp_set_max_active_levels_8 (max_levels) + use omp_lib_kinds + integer (8), intent (in) :: max_levels + end subroutine omp_set_max_active_levels_8 + end interface + + interface + function omp_get_max_active_levels () + use omp_lib_kinds + integer (4) :: omp_get_max_active_levels + end function omp_get_max_active_levels + end interface + + interface + function omp_get_level () + use omp_lib_kinds + integer (4) :: omp_get_level + end function omp_get_level + end interface + + interface omp_get_ancestor_thread_num + function omp_get_ancestor_thread_num (level) + use omp_lib_kinds + integer (4), intent (in) :: level + integer (4) :: omp_get_ancestor_thread_num + end function omp_get_ancestor_thread_num + function omp_get_ancestor_thread_num_8 (level) + use omp_lib_kinds + integer (8), intent (in) :: level + integer (4) :: omp_get_ancestor_thread_num_8 + end function omp_get_ancestor_thread_num_8 + end interface + + interface omp_get_team_size + function omp_get_team_size (level) + use omp_lib_kinds + integer (4), intent (in) :: level + integer (4) :: omp_get_team_size + end function omp_get_team_size + function omp_get_team_size_8 (level) + use omp_lib_kinds + integer (8), intent (in) :: level + integer (4) :: omp_get_team_size_8 + end function omp_get_team_size_8 + end interface + + interface + function omp_get_active_level () + use omp_lib_kinds + integer (4) :: omp_get_active_level + end function omp_get_active_level + end interface + + interface + function omp_in_final () + use omp_lib_kinds + logical (4) :: omp_in_final + end function omp_in_final + end interface + + end module omp_lib diff --git a/contrib/gcc-4.7/libgomp/omp_lib.h.in b/contrib/gcc-4.7/libgomp/omp_lib.h.in new file mode 100644 index 0000000000..c583ba3d24 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/omp_lib.h.in @@ -0,0 +1,70 @@ +! Copyright (C) 2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc. +! Contributed by Jakub Jelinek . + +! This file is part of the GNU OpenMP Library (libgomp). + +! Libgomp is free software; you can redistribute it and/or modify it +! under the terms of the GNU General Public License as published by +! the Free Software Foundation; either version 3, or (at your option) +! any later version. + +! Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY +! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +! FOR A PARTICULAR PURPOSE. See the GNU General Public License for +! more details. + +! Under Section 7 of GPL version 3, you are granted additional +! permissions described in the GCC Runtime Library Exception, version +! 3.1, as published by the Free Software Foundation. + +! You should have received a copy of the GNU General Public License and +! a copy of the GCC Runtime Library Exception along with this program; +! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +! . + + integer omp_lock_kind, omp_nest_lock_kind, openmp_version + parameter (omp_lock_kind = @OMP_LOCK_KIND@) + parameter (omp_nest_lock_kind = @OMP_NEST_LOCK_KIND@) + integer omp_sched_kind + parameter (omp_sched_kind = 4) + integer (omp_sched_kind) omp_sched_static, omp_sched_dynamic + integer (omp_sched_kind) omp_sched_guided, omp_sched_auto + parameter (omp_sched_static = 1) + parameter (omp_sched_dynamic = 2) + parameter (omp_sched_guided = 3) + parameter (omp_sched_auto = 4) + parameter (openmp_version = 201107) + + external omp_init_lock, omp_init_nest_lock + external omp_destroy_lock, omp_destroy_nest_lock + external omp_set_lock, omp_set_nest_lock + external omp_unset_lock, omp_unset_nest_lock + external omp_set_dynamic, omp_set_nested + external omp_set_num_threads + + external omp_get_dynamic, omp_get_nested + logical(4) omp_get_dynamic, omp_get_nested + external omp_test_lock, omp_in_parallel + logical(4) omp_test_lock, omp_in_parallel + + external omp_get_max_threads, omp_get_num_procs + integer(4) omp_get_max_threads, omp_get_num_procs + external omp_get_num_threads, omp_get_thread_num + integer(4) omp_get_num_threads, omp_get_thread_num + external omp_test_nest_lock + integer(4) omp_test_nest_lock + + external omp_get_wtick, omp_get_wtime + double precision omp_get_wtick, omp_get_wtime + + external omp_set_schedule, omp_get_schedule + external omp_get_thread_limit, omp_set_max_active_levels + external omp_get_max_active_levels, omp_get_level + external omp_get_ancestor_thread_num, omp_get_team_size + external omp_get_active_level + integer(4) omp_get_thread_limit, omp_get_max_active_levels + integer(4) omp_get_level, omp_get_ancestor_thread_num + integer(4) omp_get_team_size, omp_get_active_level + + external omp_in_final + logical(4) omp_in_final diff --git a/contrib/gcc-4.7/libgomp/ordered.c b/contrib/gcc-4.7/libgomp/ordered.c new file mode 100644 index 0000000000..f84d52eb9f --- /dev/null +++ b/contrib/gcc-4.7/libgomp/ordered.c @@ -0,0 +1,251 @@ +/* Copyright (C) 2005, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the ORDERED construct. */ + +#include "libgomp.h" + + +/* This function is called when first allocating an iteration block. That + is, the thread is not currently on the queue. The work-share lock must + be held on entry. */ + +void +gomp_ordered_first (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned index; + + /* Work share constructs can be orphaned. */ + if (team == NULL || team->nthreads == 1) + return; + + index = ws->ordered_cur + ws->ordered_num_used; + if (index >= team->nthreads) + index -= team->nthreads; + ws->ordered_team_ids[index] = thr->ts.team_id; + + /* If this is the first and only thread in the queue, then there is + no one to release us when we get to our ordered section. Post to + our own release queue now so that we won't block later. */ + if (ws->ordered_num_used++ == 0) + gomp_sem_post (team->ordered_release[thr->ts.team_id]); +} + +/* This function is called when completing the last iteration block. That + is, there are no more iterations to perform and so the thread should be + removed from the queue entirely. Because of the way ORDERED blocks are + managed, it follows that we currently own access to the ORDERED block, + and should now pass it on to the next thread. The work-share lock must + be held on entry. */ + +void +gomp_ordered_last (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned next_id; + + /* Work share constructs can be orphaned. */ + if (team == NULL || team->nthreads == 1) + return; + + /* We're no longer the owner. */ + ws->ordered_owner = -1; + + /* If we're not the last thread in the queue, then wake the next. */ + if (--ws->ordered_num_used > 0) + { + unsigned next = ws->ordered_cur + 1; + if (next == team->nthreads) + next = 0; + ws->ordered_cur = next; + + next_id = ws->ordered_team_ids[next]; + gomp_sem_post (team->ordered_release[next_id]); + } +} + + +/* This function is called when allocating a subsequent allocation block. + That is, we're done with the current iteration block and we're allocating + another. This is the logical combination of a call to gomp_ordered_last + followed by a call to gomp_ordered_first. The work-share lock must be + held on entry. */ + +void +gomp_ordered_next (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned index, next_id; + + /* Work share constructs can be orphaned. */ + if (team == NULL || team->nthreads == 1) + return; + + /* We're no longer the owner. */ + ws->ordered_owner = -1; + + /* If there's only one thread in the queue, that must be us. */ + if (ws->ordered_num_used == 1) + { + /* We have a similar situation as in gomp_ordered_first + where we need to post to our own release semaphore. */ + gomp_sem_post (team->ordered_release[thr->ts.team_id]); + return; + } + + /* If the queue is entirely full, then we move ourself to the end of + the queue merely by incrementing ordered_cur. Only if it's not + full do we have to write our id. */ + if (ws->ordered_num_used < team->nthreads) + { + index = ws->ordered_cur + ws->ordered_num_used; + if (index >= team->nthreads) + index -= team->nthreads; + ws->ordered_team_ids[index] = thr->ts.team_id; + } + + index = ws->ordered_cur + 1; + if (index == team->nthreads) + index = 0; + ws->ordered_cur = index; + + next_id = ws->ordered_team_ids[index]; + gomp_sem_post (team->ordered_release[next_id]); +} + + +/* This function is called when a statically scheduled loop is first + being created. */ + +void +gomp_ordered_static_init (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + if (team == NULL || team->nthreads == 1) + return; + + gomp_sem_post (team->ordered_release[0]); +} + +/* This function is called when a statically scheduled loop is moving to + the next allocation block. Static schedules are not first come first + served like the others, so we're to move to the numerically next thread, + not the next thread on a list. The work-share lock should *not* be held + on entry. */ + +void +gomp_ordered_static_next (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned id = thr->ts.team_id; + + if (team == NULL || team->nthreads == 1) + return; + + ws->ordered_owner = -1; + + /* This thread currently owns the lock. Increment the owner. */ + if (++id == team->nthreads) + id = 0; + ws->ordered_team_ids[0] = id; + gomp_sem_post (team->ordered_release[id]); +} + +/* This function is called when we need to assert that the thread owns the + ordered section. Due to the problem of posted-but-not-waited semaphores, + this needs to happen before completing a loop iteration. */ + +void +gomp_ordered_sync (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + + /* Work share constructs can be orphaned. But this clearly means that + we are the only thread, and so we automatically own the section. */ + if (team == NULL || team->nthreads == 1) + return; + + /* ??? I believe it to be safe to access this data without taking the + ws->lock. The only presumed race condition is with the previous + thread on the queue incrementing ordered_cur such that it points + to us, concurrently with our check below. But our team_id is + already present in the queue, and the other thread will always + post to our release semaphore. So the two cases are that we will + either win the race an momentarily block on the semaphore, or lose + the race and find the semaphore already unlocked and so not block. + Either way we get correct results. + However, there is an implicit flush on entry to an ordered region, + so we do need to have a barrier here. If we were taking a lock + this could be MEMMODEL_RELEASE since the acquire would be coverd + by the lock. */ + + __atomic_thread_fence (MEMMODEL_ACQ_REL); + if (ws->ordered_owner != thr->ts.team_id) + { + gomp_sem_wait (team->ordered_release[thr->ts.team_id]); + ws->ordered_owner = thr->ts.team_id; + } +} + +/* This function is called by user code when encountering the start of an + ORDERED block. We must check to see if the current thread is at the + head of the queue, and if not, block. */ + +#ifdef HAVE_ATTRIBUTE_ALIAS +extern void GOMP_ordered_start (void) + __attribute__((alias ("gomp_ordered_sync"))); +#else +void +GOMP_ordered_start (void) +{ + gomp_ordered_sync (); +} +#endif + +/* This function is called by user code when encountering the end of an + ORDERED block. With the current ORDERED implementation there's nothing + for us to do. + + However, the current implementation has a flaw in that it does not allow + the next thread into the ORDERED section immediately after the current + thread exits the ORDERED section in its last iteration. The existance + of this function allows the implementation to change. */ + +void +GOMP_ordered_end (void) +{ +} diff --git a/contrib/gcc-4.7/libgomp/parallel.c b/contrib/gcc-4.7/libgomp/parallel.c new file mode 100644 index 0000000000..c0966df9f9 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/parallel.c @@ -0,0 +1,202 @@ +/* Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the (bare) PARALLEL construct. */ + +#include "libgomp.h" +#include + + +/* Determine the number of threads to be launched for a PARALLEL construct. + This algorithm is explicitly described in OpenMP 3.0 section 2.4.1. + SPECIFIED is a combination of the NUM_THREADS clause and the IF clause. + If the IF clause is false, SPECIFIED is forced to 1. When NUM_THREADS + is not present, SPECIFIED is 0. */ + +unsigned +gomp_resolve_num_threads (unsigned specified, unsigned count) +{ + struct gomp_thread *thread = gomp_thread(); + struct gomp_task_icv *icv; + unsigned threads_requested, max_num_threads, num_threads; + unsigned long remaining; + + icv = gomp_icv (false); + + if (specified == 1) + return 1; + else if (thread->ts.active_level >= 1 && !icv->nest_var) + return 1; + else if (thread->ts.active_level >= gomp_max_active_levels_var) + return 1; + + /* If NUM_THREADS not specified, use nthreads_var. */ + if (specified == 0) + threads_requested = icv->nthreads_var; + else + threads_requested = specified; + + max_num_threads = threads_requested; + + /* If dynamic threads are enabled, bound the number of threads + that we launch. */ + if (icv->dyn_var) + { + unsigned dyn = gomp_dynamic_max_threads (); + if (dyn < max_num_threads) + max_num_threads = dyn; + + /* Optimization for parallel sections. */ + if (count && count < max_num_threads) + max_num_threads = count; + } + + /* ULONG_MAX stands for infinity. */ + if (__builtin_expect (gomp_thread_limit_var == ULONG_MAX, 1) + || max_num_threads == 1) + return max_num_threads; + +#ifdef HAVE_SYNC_BUILTINS + do + { + remaining = gomp_remaining_threads_count; + num_threads = max_num_threads; + if (num_threads > remaining) + num_threads = remaining + 1; + } + while (__sync_val_compare_and_swap (&gomp_remaining_threads_count, + remaining, remaining - num_threads + 1) + != remaining); +#else + gomp_mutex_lock (&gomp_remaining_threads_lock); + num_threads = max_num_threads; + remaining = gomp_remaining_threads_count; + if (num_threads > remaining) + num_threads = remaining + 1; + gomp_remaining_threads_count -= num_threads - 1; + gomp_mutex_unlock (&gomp_remaining_threads_lock); +#endif + + return num_threads; +} + +void +GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads) +{ + num_threads = gomp_resolve_num_threads (num_threads, 0); + gomp_team_start (fn, data, num_threads, gomp_new_team (num_threads)); +} + +void +GOMP_parallel_end (void) +{ + if (__builtin_expect (gomp_thread_limit_var != ULONG_MAX, 0)) + { + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + if (team && team->nthreads > 1) + { +#ifdef HAVE_SYNC_BUILTINS + __sync_fetch_and_add (&gomp_remaining_threads_count, + 1UL - team->nthreads); +#else + gomp_mutex_lock (&gomp_remaining_threads_lock); + gomp_remaining_threads_count -= team->nthreads - 1; + gomp_mutex_unlock (&gomp_remaining_threads_lock); +#endif + } + } + gomp_team_end (); +} + + +/* The public OpenMP API for thread and team related inquiries. */ + +int +omp_get_num_threads (void) +{ + struct gomp_team *team = gomp_thread ()->ts.team; + return team ? team->nthreads : 1; +} + +int +omp_get_thread_num (void) +{ + return gomp_thread ()->ts.team_id; +} + +/* This wasn't right for OpenMP 2.5. Active region used to be non-zero + when the IF clause doesn't evaluate to false, starting with OpenMP 3.0 + it is non-zero with more than one thread in the team. */ + +int +omp_in_parallel (void) +{ + return gomp_thread ()->ts.active_level > 0; +} + +int +omp_get_level (void) +{ + return gomp_thread ()->ts.level; +} + +int +omp_get_ancestor_thread_num (int level) +{ + struct gomp_team_state *ts = &gomp_thread ()->ts; + if (level < 0 || level > ts->level) + return -1; + for (level = ts->level - level; level > 0; --level) + ts = &ts->team->prev_ts; + return ts->team_id; +} + +int +omp_get_team_size (int level) +{ + struct gomp_team_state *ts = &gomp_thread ()->ts; + if (level < 0 || level > ts->level) + return -1; + for (level = ts->level - level; level > 0; --level) + ts = &ts->team->prev_ts; + if (ts->team == NULL) + return 1; + else + return ts->team->nthreads; +} + +int +omp_get_active_level (void) +{ + return gomp_thread ()->ts.active_level; +} + +ialias (omp_get_num_threads) +ialias (omp_get_thread_num) +ialias (omp_in_parallel) +ialias (omp_get_level) +ialias (omp_get_ancestor_thread_num) +ialias (omp_get_team_size) +ialias (omp_get_active_level) diff --git a/contrib/gcc-4.7/libgomp/sections.c b/contrib/gcc-4.7/libgomp/sections.c new file mode 100644 index 0000000000..c7f49b7c32 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/sections.c @@ -0,0 +1,159 @@ +/* Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the SECTIONS construct. */ + +#include "libgomp.h" + + +/* Initialize the given work share construct from the given arguments. */ + +static inline void +gomp_sections_init (struct gomp_work_share *ws, unsigned count) +{ + ws->sched = GFS_DYNAMIC; + ws->chunk_size = 1; + ws->end = count + 1L; + ws->incr = 1; + ws->next = 1; +#ifdef HAVE_SYNC_BUILTINS + /* Prepare things to make each iteration faster. */ + if (sizeof (long) > sizeof (unsigned)) + ws->mode = 1; + else + { + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + long nthreads = team ? team->nthreads : 1; + + ws->mode = ((nthreads | ws->end) + < 1UL << (sizeof (long) * __CHAR_BIT__ / 2 - 1)); + } +#else + ws->mode = 0; +#endif +} + +/* This routine is called when first encountering a sections construct + that is not bound directly to a parallel construct. The first thread + that arrives will create the work-share construct; subsequent threads + will see the construct exists and allocate work from it. + + COUNT is the number of sections in this construct. + + Returns the 1-based section number for this thread to perform, or 0 if + all work was assigned to other threads prior to this thread's arrival. */ + +unsigned +GOMP_sections_start (unsigned count) +{ + struct gomp_thread *thr = gomp_thread (); + long s, e, ret; + + if (gomp_work_share_start (false)) + { + gomp_sections_init (thr->ts.work_share, count); + gomp_work_share_init_done (); + } + +#ifdef HAVE_SYNC_BUILTINS + if (gomp_iter_dynamic_next (&s, &e)) + ret = s; + else + ret = 0; +#else + gomp_mutex_lock (&thr->ts.work_share->lock); + if (gomp_iter_dynamic_next_locked (&s, &e)) + ret = s; + else + ret = 0; + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +/* This routine is called when the thread completes processing of the + section currently assigned to it. If the work-share construct is + bound directly to a parallel construct, then the construct may have + been set up before the parallel. In which case, this may be the + first iteration for the thread. + + Returns the 1-based section number for this thread to perform, or 0 if + all work was assigned to other threads prior to this thread's arrival. */ + +unsigned +GOMP_sections_next (void) +{ + long s, e, ret; + +#ifdef HAVE_SYNC_BUILTINS + if (gomp_iter_dynamic_next (&s, &e)) + ret = s; + else + ret = 0; +#else + struct gomp_thread *thr = gomp_thread (); + + gomp_mutex_lock (&thr->ts.work_share->lock); + if (gomp_iter_dynamic_next_locked (&s, &e)) + ret = s; + else + ret = 0; + gomp_mutex_unlock (&thr->ts.work_share->lock); +#endif + + return ret; +} + +/* This routine pre-initializes a work-share construct to avoid one + synchronization once we get into the loop. */ + +void +GOMP_parallel_sections_start (void (*fn) (void *), void *data, + unsigned num_threads, unsigned count) +{ + struct gomp_team *team; + + num_threads = gomp_resolve_num_threads (num_threads, count); + team = gomp_new_team (num_threads); + gomp_sections_init (&team->work_shares[0], count); + gomp_team_start (fn, data, num_threads, team); +} + +/* The GOMP_section_end* routines are called after the thread is told + that all sections are complete. This first version synchronizes + all threads; the nowait version does not. */ + +void +GOMP_sections_end (void) +{ + gomp_work_share_end (); +} + +void +GOMP_sections_end_nowait (void) +{ + gomp_work_share_end_nowait (); +} diff --git a/contrib/gcc-4.7/libgomp/single.c b/contrib/gcc-4.7/libgomp/single.c new file mode 100644 index 0000000000..8c5ade11f4 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/single.c @@ -0,0 +1,104 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the SINGLE construct. */ + +#include "libgomp.h" + + +/* This routine is called when first encountering a SINGLE construct that + doesn't have a COPYPRIVATE clause. Returns true if this is the thread + that should execute the clause. */ + +bool +GOMP_single_start (void) +{ +#ifdef HAVE_SYNC_BUILTINS + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + unsigned long single_count; + + if (__builtin_expect (team == NULL, 0)) + return true; + + single_count = thr->ts.single_count++; + return __sync_bool_compare_and_swap (&team->single_count, single_count, + single_count + 1L); +#else + bool ret = gomp_work_share_start (false); + if (ret) + gomp_work_share_init_done (); + gomp_work_share_end_nowait (); + return ret; +#endif +} + +/* This routine is called when first encountering a SINGLE construct that + does have a COPYPRIVATE clause. Returns NULL if this is the thread + that should execute the clause; otherwise the return value is pointer + given to GOMP_single_copy_end by the thread that did execute the clause. */ + +void * +GOMP_single_copy_start (void) +{ + struct gomp_thread *thr = gomp_thread (); + + bool first; + void *ret; + + first = gomp_work_share_start (false); + + if (first) + { + gomp_work_share_init_done (); + ret = NULL; + } + else + { + gomp_team_barrier_wait (&thr->ts.team->barrier); + + ret = thr->ts.work_share->copyprivate; + gomp_work_share_end_nowait (); + } + + return ret; +} + +/* This routine is called when the thread that entered a SINGLE construct + with a COPYPRIVATE clause gets to the end of the construct. */ + +void +GOMP_single_copy_end (void *data) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + if (team != NULL) + { + thr->ts.work_share->copyprivate = data; + gomp_team_barrier_wait (&team->barrier); + } + + gomp_work_share_end_nowait (); +} diff --git a/contrib/gcc-4.7/libgomp/task.c b/contrib/gcc-4.7/libgomp/task.c new file mode 100644 index 0000000000..4b75850072 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/task.c @@ -0,0 +1,387 @@ +/* Copyright (C) 2007, 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the maintainence of tasks in response to task + creation and termination. */ + +#include "libgomp.h" +#include +#include + + +/* Create a new task data structure. */ + +void +gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task, + struct gomp_task_icv *prev_icv) +{ + task->parent = parent_task; + task->icv = *prev_icv; + task->kind = GOMP_TASK_IMPLICIT; + task->in_taskwait = false; + task->in_tied_task = false; + task->final_task = false; + task->children = NULL; + gomp_sem_init (&task->taskwait_sem, 0); +} + +/* Clean up a task, after completing it. */ + +void +gomp_end_task (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_task *task = thr->task; + + gomp_finish_task (task); + thr->task = task->parent; +} + +static inline void +gomp_clear_parent (struct gomp_task *children) +{ + struct gomp_task *task = children; + + if (task) + do + { + task->parent = NULL; + task = task->next_child; + } + while (task != children); +} + +/* Called when encountering an explicit task directive. If IF_CLAUSE is + false, then we must not delay in executing the task. If UNTIED is true, + then the task may be executed by any member of the team. */ + +void +GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), + long arg_size, long arg_align, bool if_clause, unsigned flags) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + +#ifdef HAVE_BROKEN_POSIX_SEMAPHORES + /* If pthread_mutex_* is used for omp_*lock*, then each task must be + tied to one thread all the time. This means UNTIED tasks must be + tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN + might be running on different thread than FN. */ + if (cpyfn) + if_clause = false; + if (flags & 1) + flags &= ~1; +#endif + + if (!if_clause || team == NULL + || (thr->task && thr->task->final_task) + || team->task_count > 64 * team->nthreads) + { + struct gomp_task task; + + gomp_init_task (&task, thr->task, gomp_icv (false)); + task.kind = GOMP_TASK_IFFALSE; + task.final_task = (thr->task && thr->task->final_task) || (flags & 2); + if (thr->task) + task.in_tied_task = thr->task->in_tied_task; + thr->task = &task; + if (__builtin_expect (cpyfn != NULL, 0)) + { + char buf[arg_size + arg_align - 1]; + char *arg = (char *) (((uintptr_t) buf + arg_align - 1) + & ~(uintptr_t) (arg_align - 1)); + cpyfn (arg, data); + fn (arg); + } + else + fn (data); + if (team != NULL) + { + gomp_mutex_lock (&team->task_lock); + if (task.children != NULL) + gomp_clear_parent (task.children); + gomp_mutex_unlock (&team->task_lock); + } + gomp_end_task (); + } + else + { + struct gomp_task *task; + struct gomp_task *parent = thr->task; + char *arg; + bool do_wake; + + task = gomp_malloc (sizeof (*task) + arg_size + arg_align - 1); + arg = (char *) (((uintptr_t) (task + 1) + arg_align - 1) + & ~(uintptr_t) (arg_align - 1)); + gomp_init_task (task, parent, gomp_icv (false)); + task->kind = GOMP_TASK_IFFALSE; + task->in_tied_task = parent->in_tied_task; + thr->task = task; + if (cpyfn) + cpyfn (arg, data); + else + memcpy (arg, data, arg_size); + thr->task = parent; + task->kind = GOMP_TASK_WAITING; + task->fn = fn; + task->fn_data = arg; + task->in_tied_task = true; + task->final_task = (flags & 2) >> 1; + gomp_mutex_lock (&team->task_lock); + if (parent->children) + { + task->next_child = parent->children; + task->prev_child = parent->children->prev_child; + task->next_child->prev_child = task; + task->prev_child->next_child = task; + } + else + { + task->next_child = task; + task->prev_child = task; + } + parent->children = task; + if (team->task_queue) + { + task->next_queue = team->task_queue; + task->prev_queue = team->task_queue->prev_queue; + task->next_queue->prev_queue = task; + task->prev_queue->next_queue = task; + } + else + { + task->next_queue = task; + task->prev_queue = task; + team->task_queue = task; + } + ++team->task_count; + gomp_team_barrier_set_task_pending (&team->barrier); + do_wake = team->task_running_count + !parent->in_tied_task + < team->nthreads; + gomp_mutex_unlock (&team->task_lock); + if (do_wake) + gomp_team_barrier_wake (&team->barrier, 1); + } +} + +void +gomp_barrier_handle_tasks (gomp_barrier_state_t state) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_task *task = thr->task; + struct gomp_task *child_task = NULL; + struct gomp_task *to_free = NULL; + + gomp_mutex_lock (&team->task_lock); + if (gomp_barrier_last_thread (state)) + { + if (team->task_count == 0) + { + gomp_team_barrier_done (&team->barrier, state); + gomp_mutex_unlock (&team->task_lock); + gomp_team_barrier_wake (&team->barrier, 0); + return; + } + gomp_team_barrier_set_waiting_for_tasks (&team->barrier); + } + + while (1) + { + if (team->task_queue != NULL) + { + struct gomp_task *parent; + + child_task = team->task_queue; + parent = child_task->parent; + if (parent && parent->children == child_task) + parent->children = child_task->next_child; + child_task->prev_queue->next_queue = child_task->next_queue; + child_task->next_queue->prev_queue = child_task->prev_queue; + if (child_task->next_queue != child_task) + team->task_queue = child_task->next_queue; + else + team->task_queue = NULL; + child_task->kind = GOMP_TASK_TIED; + team->task_running_count++; + if (team->task_count == team->task_running_count) + gomp_team_barrier_clear_task_pending (&team->barrier); + } + gomp_mutex_unlock (&team->task_lock); + if (to_free) + { + gomp_finish_task (to_free); + free (to_free); + to_free = NULL; + } + if (child_task) + { + thr->task = child_task; + child_task->fn (child_task->fn_data); + thr->task = task; + } + else + return; + gomp_mutex_lock (&team->task_lock); + if (child_task) + { + struct gomp_task *parent = child_task->parent; + if (parent) + { + child_task->prev_child->next_child = child_task->next_child; + child_task->next_child->prev_child = child_task->prev_child; + if (parent->children == child_task) + { + if (child_task->next_child != child_task) + parent->children = child_task->next_child; + else + { + parent->children = NULL; + if (parent->in_taskwait) + gomp_sem_post (&parent->taskwait_sem); + } + } + } + gomp_clear_parent (child_task->children); + to_free = child_task; + child_task = NULL; + team->task_running_count--; + if (--team->task_count == 0 + && gomp_team_barrier_waiting_for_tasks (&team->barrier)) + { + gomp_team_barrier_done (&team->barrier, state); + gomp_mutex_unlock (&team->task_lock); + gomp_team_barrier_wake (&team->barrier, 0); + gomp_mutex_lock (&team->task_lock); + } + } + } +} + +/* Called when encountering a taskwait directive. */ + +void +GOMP_taskwait (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_task *task = thr->task; + struct gomp_task *child_task = NULL; + struct gomp_task *to_free = NULL; + + if (task == NULL || team == NULL) + return; + + gomp_mutex_lock (&team->task_lock); + while (1) + { + if (task->children == NULL) + { + gomp_mutex_unlock (&team->task_lock); + if (to_free) + { + gomp_finish_task (to_free); + free (to_free); + } + return; + } + if (task->children->kind == GOMP_TASK_WAITING) + { + child_task = task->children; + task->children = child_task->next_child; + child_task->prev_queue->next_queue = child_task->next_queue; + child_task->next_queue->prev_queue = child_task->prev_queue; + if (team->task_queue == child_task) + { + if (child_task->next_queue != child_task) + team->task_queue = child_task->next_queue; + else + team->task_queue = NULL; + } + child_task->kind = GOMP_TASK_TIED; + team->task_running_count++; + if (team->task_count == team->task_running_count) + gomp_team_barrier_clear_task_pending (&team->barrier); + } + else + /* All tasks we are waiting for are already running + in other threads. Wait for them. */ + task->in_taskwait = true; + gomp_mutex_unlock (&team->task_lock); + if (to_free) + { + gomp_finish_task (to_free); + free (to_free); + to_free = NULL; + } + if (child_task) + { + thr->task = child_task; + child_task->fn (child_task->fn_data); + thr->task = task; + } + else + { + gomp_sem_wait (&task->taskwait_sem); + task->in_taskwait = false; + return; + } + gomp_mutex_lock (&team->task_lock); + if (child_task) + { + child_task->prev_child->next_child = child_task->next_child; + child_task->next_child->prev_child = child_task->prev_child; + if (task->children == child_task) + { + if (child_task->next_child != child_task) + task->children = child_task->next_child; + else + task->children = NULL; + } + gomp_clear_parent (child_task->children); + to_free = child_task; + child_task = NULL; + team->task_count--; + team->task_running_count--; + } + } +} + +/* Called when encountering a taskyield directive. */ + +void +GOMP_taskyield (void) +{ + /* Nothing at the moment. */ +} + +int +omp_in_final (void) +{ + struct gomp_thread *thr = gomp_thread (); + return thr->task && thr->task->final_task; +} + +ialias (omp_in_final) diff --git a/contrib/gcc-4.7/libgomp/team.c b/contrib/gcc-4.7/libgomp/team.c new file mode 100644 index 0000000000..633902ca56 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/team.c @@ -0,0 +1,564 @@ +/* Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 + Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file handles the maintainence of threads in response to team + creation and termination. */ + +#include "libgomp.h" +#include +#include + +/* This attribute contains PTHREAD_CREATE_DETACHED. */ +pthread_attr_t gomp_thread_attr; + +/* This key is for the thread destructor. */ +pthread_key_t gomp_thread_destructor; + + +/* This is the libgomp per-thread data structure. */ +#ifdef HAVE_TLS +__thread struct gomp_thread gomp_tls_data; +#else +pthread_key_t gomp_tls_key; +#endif + + +/* This structure is used to communicate across pthread_create. */ + +struct gomp_thread_start_data +{ + void (*fn) (void *); + void *fn_data; + struct gomp_team_state ts; + struct gomp_task *task; + struct gomp_thread_pool *thread_pool; + bool nested; +}; + + +/* This function is a pthread_create entry point. This contains the idle + loop in which a thread waits to be called up to become part of a team. */ + +static void * +gomp_thread_start (void *xdata) +{ + struct gomp_thread_start_data *data = xdata; + struct gomp_thread *thr; + struct gomp_thread_pool *pool; + void (*local_fn) (void *); + void *local_data; + +#ifdef HAVE_TLS + thr = &gomp_tls_data; +#else + struct gomp_thread local_thr; + thr = &local_thr; + pthread_setspecific (gomp_tls_key, thr); +#endif + gomp_sem_init (&thr->release, 0); + + /* Extract what we need from data. */ + local_fn = data->fn; + local_data = data->fn_data; + thr->thread_pool = data->thread_pool; + thr->ts = data->ts; + thr->task = data->task; + + thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release; + + /* Make thread pool local. */ + pool = thr->thread_pool; + + if (data->nested) + { + struct gomp_team *team = thr->ts.team; + struct gomp_task *task = thr->task; + + gomp_barrier_wait (&team->barrier); + + local_fn (local_data); + gomp_team_barrier_wait (&team->barrier); + gomp_finish_task (task); + gomp_barrier_wait_last (&team->barrier); + } + else + { + pool->threads[thr->ts.team_id] = thr; + + gomp_barrier_wait (&pool->threads_dock); + do + { + struct gomp_team *team = thr->ts.team; + struct gomp_task *task = thr->task; + + local_fn (local_data); + gomp_team_barrier_wait (&team->barrier); + gomp_finish_task (task); + + gomp_barrier_wait (&pool->threads_dock); + + local_fn = thr->fn; + local_data = thr->data; + thr->fn = NULL; + } + while (local_fn); + } + + gomp_sem_destroy (&thr->release); + return NULL; +} + + +/* Create a new team data structure. */ + +struct gomp_team * +gomp_new_team (unsigned nthreads) +{ + struct gomp_team *team; + size_t size; + int i; + + size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0]) + + sizeof (team->implicit_task[0])); + team = gomp_malloc (size); + + team->work_share_chunk = 8; +#ifdef HAVE_SYNC_BUILTINS + team->single_count = 0; +#else + gomp_mutex_init (&team->work_share_list_free_lock); +#endif + gomp_init_work_share (&team->work_shares[0], false, nthreads); + team->work_shares[0].next_alloc = NULL; + team->work_share_list_free = NULL; + team->work_share_list_alloc = &team->work_shares[1]; + for (i = 1; i < 7; i++) + team->work_shares[i].next_free = &team->work_shares[i + 1]; + team->work_shares[i].next_free = NULL; + + team->nthreads = nthreads; + gomp_barrier_init (&team->barrier, nthreads); + + gomp_sem_init (&team->master_release, 0); + team->ordered_release = (void *) &team->implicit_task[nthreads]; + team->ordered_release[0] = &team->master_release; + + gomp_mutex_init (&team->task_lock); + team->task_queue = NULL; + team->task_count = 0; + team->task_running_count = 0; + + return team; +} + + +/* Free a team data structure. */ + +static void +free_team (struct gomp_team *team) +{ + gomp_barrier_destroy (&team->barrier); + gomp_mutex_destroy (&team->task_lock); + free (team); +} + +/* Allocate and initialize a thread pool. */ + +static struct gomp_thread_pool *gomp_new_thread_pool (void) +{ + struct gomp_thread_pool *pool + = gomp_malloc (sizeof(struct gomp_thread_pool)); + pool->threads = NULL; + pool->threads_size = 0; + pool->threads_used = 0; + pool->last_team = NULL; + return pool; +} + +static void +gomp_free_pool_helper (void *thread_pool) +{ + struct gomp_thread_pool *pool + = (struct gomp_thread_pool *) thread_pool; + gomp_barrier_wait_last (&pool->threads_dock); + gomp_sem_destroy (&gomp_thread ()->release); + pthread_exit (NULL); +} + +/* Free a thread pool and release its threads. */ + +static void +gomp_free_thread (void *arg __attribute__((unused))) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_thread_pool *pool = thr->thread_pool; + if (pool) + { + if (pool->threads_used > 0) + { + int i; + for (i = 1; i < pool->threads_used; i++) + { + struct gomp_thread *nthr = pool->threads[i]; + nthr->fn = gomp_free_pool_helper; + nthr->data = pool; + } + /* This barrier undocks threads docked on pool->threads_dock. */ + gomp_barrier_wait (&pool->threads_dock); + /* And this waits till all threads have called gomp_barrier_wait_last + in gomp_free_pool_helper. */ + gomp_barrier_wait (&pool->threads_dock); + /* Now it is safe to destroy the barrier and free the pool. */ + gomp_barrier_destroy (&pool->threads_dock); + } + free (pool->threads); + if (pool->last_team) + free_team (pool->last_team); + free (pool); + thr->thread_pool = NULL; + } + if (thr->task != NULL) + { + struct gomp_task *task = thr->task; + gomp_end_task (); + free (task); + } +} + +/* Launch a team. */ + +void +gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads, + struct gomp_team *team) +{ + struct gomp_thread_start_data *start_data; + struct gomp_thread *thr, *nthr; + struct gomp_task *task; + struct gomp_task_icv *icv; + bool nested; + struct gomp_thread_pool *pool; + unsigned i, n, old_threads_used = 0; + pthread_attr_t thread_attr, *attr; + unsigned long nthreads_var; + + thr = gomp_thread (); + nested = thr->ts.team != NULL; + if (__builtin_expect (thr->thread_pool == NULL, 0)) + { + thr->thread_pool = gomp_new_thread_pool (); + pthread_setspecific (gomp_thread_destructor, thr); + } + pool = thr->thread_pool; + task = thr->task; + icv = task ? &task->icv : &gomp_global_icv; + + /* Always save the previous state, even if this isn't a nested team. + In particular, we should save any work share state from an outer + orphaned work share construct. */ + team->prev_ts = thr->ts; + + thr->ts.team = team; + thr->ts.team_id = 0; + ++thr->ts.level; + if (nthreads > 1) + ++thr->ts.active_level; + thr->ts.work_share = &team->work_shares[0]; + thr->ts.last_work_share = NULL; +#ifdef HAVE_SYNC_BUILTINS + thr->ts.single_count = 0; +#endif + thr->ts.static_trip = 0; + thr->task = &team->implicit_task[0]; + nthreads_var = icv->nthreads_var; + if (__builtin_expect (gomp_nthreads_var_list != NULL, 0) + && thr->ts.level < gomp_nthreads_var_list_len) + nthreads_var = gomp_nthreads_var_list[thr->ts.level]; + gomp_init_task (thr->task, task, icv); + team->implicit_task[0].icv.nthreads_var = nthreads_var; + + if (nthreads == 1) + return; + + i = 1; + + /* We only allow the reuse of idle threads for non-nested PARALLEL + regions. This appears to be implied by the semantics of + threadprivate variables, but perhaps that's reading too much into + things. Certainly it does prevent any locking problems, since + only the initial program thread will modify gomp_threads. */ + if (!nested) + { + old_threads_used = pool->threads_used; + + if (nthreads <= old_threads_used) + n = nthreads; + else if (old_threads_used == 0) + { + n = 0; + gomp_barrier_init (&pool->threads_dock, nthreads); + } + else + { + n = old_threads_used; + + /* Increase the barrier threshold to make sure all new + threads arrive before the team is released. */ + gomp_barrier_reinit (&pool->threads_dock, nthreads); + } + + /* Not true yet, but soon will be. We're going to release all + threads from the dock, and those that aren't part of the + team will exit. */ + pool->threads_used = nthreads; + + /* Release existing idle threads. */ + for (; i < n; ++i) + { + nthr = pool->threads[i]; + nthr->ts.team = team; + nthr->ts.work_share = &team->work_shares[0]; + nthr->ts.last_work_share = NULL; + nthr->ts.team_id = i; + nthr->ts.level = team->prev_ts.level + 1; + nthr->ts.active_level = thr->ts.active_level; +#ifdef HAVE_SYNC_BUILTINS + nthr->ts.single_count = 0; +#endif + nthr->ts.static_trip = 0; + nthr->task = &team->implicit_task[i]; + gomp_init_task (nthr->task, task, icv); + team->implicit_task[i].icv.nthreads_var = nthreads_var; + nthr->fn = fn; + nthr->data = data; + team->ordered_release[i] = &nthr->release; + } + + if (i == nthreads) + goto do_release; + + /* If necessary, expand the size of the gomp_threads array. It is + expected that changes in the number of threads are rare, thus we + make no effort to expand gomp_threads_size geometrically. */ + if (nthreads >= pool->threads_size) + { + pool->threads_size = nthreads + 1; + pool->threads + = gomp_realloc (pool->threads, + pool->threads_size + * sizeof (struct gomp_thread_data *)); + } + } + + if (__builtin_expect (nthreads > old_threads_used, 0)) + { + long diff = (long) nthreads - (long) old_threads_used; + + if (old_threads_used == 0) + --diff; + +#ifdef HAVE_SYNC_BUILTINS + __sync_fetch_and_add (&gomp_managed_threads, diff); +#else + gomp_mutex_lock (&gomp_remaining_threads_lock); + gomp_managed_threads += diff; + gomp_mutex_unlock (&gomp_remaining_threads_lock); +#endif + } + + attr = &gomp_thread_attr; + if (__builtin_expect (gomp_cpu_affinity != NULL, 0)) + { + size_t stacksize; + pthread_attr_init (&thread_attr); + pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED); + if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize)) + pthread_attr_setstacksize (&thread_attr, stacksize); + attr = &thread_attr; + } + + start_data = gomp_alloca (sizeof (struct gomp_thread_start_data) + * (nthreads-i)); + + /* Launch new threads. */ + for (; i < nthreads; ++i, ++start_data) + { + pthread_t pt; + int err; + + start_data->fn = fn; + start_data->fn_data = data; + start_data->ts.team = team; + start_data->ts.work_share = &team->work_shares[0]; + start_data->ts.last_work_share = NULL; + start_data->ts.team_id = i; + start_data->ts.level = team->prev_ts.level + 1; + start_data->ts.active_level = thr->ts.active_level; +#ifdef HAVE_SYNC_BUILTINS + start_data->ts.single_count = 0; +#endif + start_data->ts.static_trip = 0; + start_data->task = &team->implicit_task[i]; + gomp_init_task (start_data->task, task, icv); + team->implicit_task[i].icv.nthreads_var = nthreads_var; + start_data->thread_pool = pool; + start_data->nested = nested; + + if (gomp_cpu_affinity != NULL) + gomp_init_thread_affinity (attr); + + err = pthread_create (&pt, attr, gomp_thread_start, start_data); + if (err != 0) + gomp_fatal ("Thread creation failed: %s", strerror (err)); + } + + if (__builtin_expect (gomp_cpu_affinity != NULL, 0)) + pthread_attr_destroy (&thread_attr); + + do_release: + gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock); + + /* Decrease the barrier threshold to match the number of threads + that should arrive back at the end of this team. The extra + threads should be exiting. Note that we arrange for this test + to never be true for nested teams. */ + if (__builtin_expect (nthreads < old_threads_used, 0)) + { + long diff = (long) nthreads - (long) old_threads_used; + + gomp_barrier_reinit (&pool->threads_dock, nthreads); + +#ifdef HAVE_SYNC_BUILTINS + __sync_fetch_and_add (&gomp_managed_threads, diff); +#else + gomp_mutex_lock (&gomp_remaining_threads_lock); + gomp_managed_threads += diff; + gomp_mutex_unlock (&gomp_remaining_threads_lock); +#endif + } +} + + +/* Terminate the current team. This is only to be called by the master + thread. We assume that we must wait for the other threads. */ + +void +gomp_team_end (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + /* This barrier handles all pending explicit threads. */ + gomp_team_barrier_wait (&team->barrier); + gomp_fini_work_share (thr->ts.work_share); + + gomp_end_task (); + thr->ts = team->prev_ts; + + if (__builtin_expect (thr->ts.team != NULL, 0)) + { +#ifdef HAVE_SYNC_BUILTINS + __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads); +#else + gomp_mutex_lock (&gomp_remaining_threads_lock); + gomp_managed_threads -= team->nthreads - 1L; + gomp_mutex_unlock (&gomp_remaining_threads_lock); +#endif + /* This barrier has gomp_barrier_wait_last counterparts + and ensures the team can be safely destroyed. */ + gomp_barrier_wait (&team->barrier); + } + + if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0)) + { + struct gomp_work_share *ws = team->work_shares[0].next_alloc; + do + { + struct gomp_work_share *next_ws = ws->next_alloc; + free (ws); + ws = next_ws; + } + while (ws != NULL); + } + gomp_sem_destroy (&team->master_release); +#ifndef HAVE_SYNC_BUILTINS + gomp_mutex_destroy (&team->work_share_list_free_lock); +#endif + + if (__builtin_expect (thr->ts.team != NULL, 0) + || __builtin_expect (team->nthreads == 1, 0)) + free_team (team); + else + { + struct gomp_thread_pool *pool = thr->thread_pool; + if (pool->last_team) + free_team (pool->last_team); + pool->last_team = team; + } +} + + +/* Constructors for this file. */ + +static void __attribute__((constructor)) +initialize_team (void) +{ + struct gomp_thread *thr; + +#ifndef HAVE_TLS + static struct gomp_thread initial_thread_tls_data; + + pthread_key_create (&gomp_tls_key, NULL); + pthread_setspecific (gomp_tls_key, &initial_thread_tls_data); +#endif + + if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0) + gomp_fatal ("could not create thread pool destructor."); + +#ifdef HAVE_TLS + thr = &gomp_tls_data; +#else + thr = &initial_thread_tls_data; +#endif + gomp_sem_init (&thr->release, 0); +} + +static void __attribute__((destructor)) +team_destructor (void) +{ + /* Without this dlclose on libgomp could lead to subsequent + crashes. */ + pthread_key_delete (gomp_thread_destructor); +} + +struct gomp_task_icv * +gomp_new_icv (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task)); + gomp_init_task (task, NULL, &gomp_global_icv); + thr->task = task; + pthread_setspecific (gomp_thread_destructor, thr); + return &task->icv; +} diff --git a/contrib/gcc-4.7/libgomp/work.c b/contrib/gcc-4.7/libgomp/work.c new file mode 100644 index 0000000000..6bd9c245b7 --- /dev/null +++ b/contrib/gcc-4.7/libgomp/work.c @@ -0,0 +1,264 @@ +/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU OpenMP Library (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This file contains routines to manage the work-share queue for a team + of threads. */ + +#include "libgomp.h" +#include +#include +#include + + +/* Allocate a new work share structure, preferably from current team's + free gomp_work_share cache. */ + +static struct gomp_work_share * +alloc_work_share (struct gomp_team *team) +{ + struct gomp_work_share *ws; + unsigned int i; + + /* This is called in a critical section. */ + if (team->work_share_list_alloc != NULL) + { + ws = team->work_share_list_alloc; + team->work_share_list_alloc = ws->next_free; + return ws; + } + +#ifdef HAVE_SYNC_BUILTINS + ws = team->work_share_list_free; + /* We need atomic read from work_share_list_free, + as free_work_share can be called concurrently. */ + __asm ("" : "+r" (ws)); + + if (ws && ws->next_free) + { + struct gomp_work_share *next = ws->next_free; + ws->next_free = NULL; + team->work_share_list_alloc = next->next_free; + return next; + } +#else + gomp_mutex_lock (&team->work_share_list_free_lock); + ws = team->work_share_list_free; + if (ws) + { + team->work_share_list_alloc = ws->next_free; + team->work_share_list_free = NULL; + gomp_mutex_unlock (&team->work_share_list_free_lock); + return ws; + } + gomp_mutex_unlock (&team->work_share_list_free_lock); +#endif + + team->work_share_chunk *= 2; + ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share)); + ws->next_alloc = team->work_shares[0].next_alloc; + team->work_shares[0].next_alloc = ws; + team->work_share_list_alloc = &ws[1]; + for (i = 1; i < team->work_share_chunk - 1; i++) + ws[i].next_free = &ws[i + 1]; + ws[i].next_free = NULL; + return ws; +} + +/* Initialize an already allocated struct gomp_work_share. + This shouldn't touch the next_alloc field. */ + +void +gomp_init_work_share (struct gomp_work_share *ws, bool ordered, + unsigned nthreads) +{ + gomp_mutex_init (&ws->lock); + if (__builtin_expect (ordered, 0)) + { +#define INLINE_ORDERED_TEAM_IDS_CNT \ + ((sizeof (struct gomp_work_share) \ + - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \ + / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0])) + + if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT) + ws->ordered_team_ids + = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids)); + else + ws->ordered_team_ids = ws->inline_ordered_team_ids; + memset (ws->ordered_team_ids, '\0', + nthreads * sizeof (*ws->ordered_team_ids)); + ws->ordered_num_used = 0; + ws->ordered_owner = -1; + ws->ordered_cur = 0; + } + else + ws->ordered_team_ids = NULL; + gomp_ptrlock_init (&ws->next_ws, NULL); + ws->threads_completed = 0; +} + +/* Do any needed destruction of gomp_work_share fields before it + is put back into free gomp_work_share cache or freed. */ + +void +gomp_fini_work_share (struct gomp_work_share *ws) +{ + gomp_mutex_destroy (&ws->lock); + if (ws->ordered_team_ids != ws->inline_ordered_team_ids) + free (ws->ordered_team_ids); + gomp_ptrlock_destroy (&ws->next_ws); +} + +/* Free a work share struct, if not orphaned, put it into current + team's free gomp_work_share cache. */ + +static inline void +free_work_share (struct gomp_team *team, struct gomp_work_share *ws) +{ + gomp_fini_work_share (ws); + if (__builtin_expect (team == NULL, 0)) + free (ws); + else + { + struct gomp_work_share *next_ws; +#ifdef HAVE_SYNC_BUILTINS + do + { + next_ws = team->work_share_list_free; + ws->next_free = next_ws; + } + while (!__sync_bool_compare_and_swap (&team->work_share_list_free, + next_ws, ws)); +#else + gomp_mutex_lock (&team->work_share_list_free_lock); + next_ws = team->work_share_list_free; + ws->next_free = next_ws; + team->work_share_list_free = ws; + gomp_mutex_unlock (&team->work_share_list_free_lock); +#endif + } +} + +/* The current thread is ready to begin the next work sharing construct. + In all cases, thr->ts.work_share is updated to point to the new + structure. In all cases the work_share lock is locked. Return true + if this was the first thread to reach this point. */ + +bool +gomp_work_share_start (bool ordered) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws; + + /* Work sharing constructs can be orphaned. */ + if (team == NULL) + { + ws = gomp_malloc (sizeof (*ws)); + gomp_init_work_share (ws, ordered, 1); + thr->ts.work_share = ws; + return ws; + } + + ws = thr->ts.work_share; + thr->ts.last_work_share = ws; + ws = gomp_ptrlock_get (&ws->next_ws); + if (ws == NULL) + { + /* This thread encountered a new ws first. */ + struct gomp_work_share *ws = alloc_work_share (team); + gomp_init_work_share (ws, ordered, team->nthreads); + thr->ts.work_share = ws; + return true; + } + else + { + thr->ts.work_share = ws; + return false; + } +} + +/* The current thread is done with its current work sharing construct. + This version does imply a barrier at the end of the work-share. */ + +void +gomp_work_share_end (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + gomp_barrier_state_t bstate; + + /* Work sharing constructs can be orphaned. */ + if (team == NULL) + { + free_work_share (NULL, thr->ts.work_share); + thr->ts.work_share = NULL; + return; + } + + bstate = gomp_barrier_wait_start (&team->barrier); + + if (gomp_barrier_last_thread (bstate)) + { + if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) + free_work_share (team, thr->ts.last_work_share); + } + + gomp_team_barrier_wait_end (&team->barrier, bstate); + thr->ts.last_work_share = NULL; +} + +/* The current thread is done with its current work sharing construct. + This version does NOT imply a barrier at the end of the work-share. */ + +void +gomp_work_share_end_nowait (void) +{ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + struct gomp_work_share *ws = thr->ts.work_share; + unsigned completed; + + /* Work sharing constructs can be orphaned. */ + if (team == NULL) + { + free_work_share (NULL, ws); + thr->ts.work_share = NULL; + return; + } + + if (__builtin_expect (thr->ts.last_work_share == NULL, 0)) + return; + +#ifdef HAVE_SYNC_BUILTINS + completed = __sync_add_and_fetch (&ws->threads_completed, 1); +#else + gomp_mutex_lock (&ws->lock); + completed = ++ws->threads_completed; + gomp_mutex_unlock (&ws->lock); +#endif + + if (completed == team->nthreads) + free_work_share (team, thr->ts.last_work_share); + thr->ts.last_work_share = NULL; +} -- 2.41.0 From 585a13d64f44fa9fa8b1775265d69885270e9664 Mon Sep 17 00:00:00 2001 From: John Marino Date: Sat, 20 Oct 2012 08:37:48 +0200 Subject: [PATCH 04/16] gcc47: Add libgomp This library was not part of the gcc 4.7 system compiler set. By request for the purpose of supporting OpenMP with system compiler, libgomp is being added to the gcc 4.7 library set. --- contrib/gcc-4.7/README.DELETED | 16 ++- gnu/lib/gcc47/Makefile | 1 + gnu/lib/gcc47/libgomp/Makefile | 47 +++++++++ gnu/lib/gcc47/libgomp/Makefile.i386 | 15 +++ gnu/lib/gcc47/libgomp/Makefile.x86_64 | 15 +++ gnu/lib/gcc47/libgomp/config.h | 136 ++++++++++++++++++++++++++ gnu/lib/gcc47/libgomp/libgomp.spec | 3 + 7 files changed, 232 insertions(+), 1 deletion(-) create mode 100644 gnu/lib/gcc47/libgomp/Makefile create mode 100644 gnu/lib/gcc47/libgomp/Makefile.i386 create mode 100644 gnu/lib/gcc47/libgomp/Makefile.x86_64 create mode 100644 gnu/lib/gcc47/libgomp/config.h create mode 100644 gnu/lib/gcc47/libgomp/libgomp.spec diff --git a/contrib/gcc-4.7/README.DELETED b/contrib/gcc-4.7/README.DELETED index 0009c70c74..af8bea1620 100644 --- a/contrib/gcc-4.7/README.DELETED +++ b/contrib/gcc-4.7/README.DELETED @@ -526,7 +526,21 @@ libgcc/siditi-object.mk libgcc/static-object.mk libgfortran/ libgo/ -libgomp/ +libgomp/ChangeLog +libgomp/ChangeLog.graphite +libgomp/Makefile.am +libgomp/Makefile.in +libgomp/acinclude.m4 +libgomp/aclocal.m4 +libgomp/config/linux/ +libgomp/config/mingw32/ +libgomp/config/osf/ +libgomp/configure +libgomp/configure.ac +libgomp/configure.tgt +libgomp/libgomp.info +libgomp/libgomp.texi +libgomp/testsuite/ libiberty/.gitignore libiberty/COPYING.LIB libiberty/ChangeLog diff --git a/gnu/lib/gcc47/Makefile b/gnu/lib/gcc47/Makefile index e575e9e1f6..d684f1db9a 100644 --- a/gnu/lib/gcc47/Makefile +++ b/gnu/lib/gcc47/Makefile @@ -5,6 +5,7 @@ SUBDIR+= libgcc_pic SUBDIR+= libgcov SUBDIR+= libssp SUBDIR+= libssp_nonshared +SUBDIR+= libgomp .if !defined(NO_CXX) SUBDIR+= libstdc++ diff --git a/gnu/lib/gcc47/libgomp/Makefile b/gnu/lib/gcc47/libgomp/Makefile new file mode 100644 index 0000000000..5301403e85 --- /dev/null +++ b/gnu/lib/gcc47/libgomp/Makefile @@ -0,0 +1,47 @@ +CFLAGS+= -I${.CURDIR} +CFLAGS+= -I${.OBJDIR} +.include "../Makefile.inc" +.include "Makefile.${TARGET_ARCH}" + +.PATH: ${GCCDIR}/libgomp +.PATH: ${GCCDIR}/libgomp/config/bsd +.PATH: ${GCCDIR}/libgomp/config/posix + +CFLAGS+= -DHAVE_CONFIG_H +CFLAGS+= -I${GCCDIR}/libgomp +CFLAGS+= -I${GCCDIR}/libgomp/config/posix +CFLAGS+= -I${GCCDIR}/libgcc +LDFLAGS+= -Wl,--version-script=${GCCDIR}/libgomp/libgomp.map + +LIB= gomp +SHLIB_MAJOR= 1 + +# From libgomp Makefile +libgomp_la_SOURCES = alloc.c barrier.c critical.c env.c error.c iter.c \ + iter_ull.c loop.c loop_ull.c ordered.c parallel.c sections.c single.c \ + task.c team.c work.c lock.c mutex.c proc.c sem.c bar.c ptrlock.c \ + time.c fortran.c affinity.c + +SRCS= ${libgomp_la_SOURCES} + +# generated +SRCS+= libgomp_f.h + +libgomp_f.h: ${GCCDIR}/libgomp/libgomp_f.h.in + sed -e 's/@OMP_LOCK_25_ALIGN@/${OMP_LOCK_25_ALIGN}/g' \ + -e 's/@OMP_LOCK_25_KIND@/${OMP_LOCK_25_KIND}/g' \ + -e 's/@OMP_LOCK_25_SIZE@/${OMP_LOCK_25_SIZE}/g' \ + -e 's/@OMP_LOCK_ALIGN@/${OMP_LOCK_ALIGN}/g' \ + -e 's/@OMP_LOCK_KIND@/${OMP_LOCK_KIND}/g' \ + -e 's/@OMP_LOCK_SIZE@/${OMP_LOCK_SIZE}/g' \ + -e 's/@OMP_NEST_LOCK_25_ALIGN@/${OMP_NEST_LOCK_25_ALIGN}/g' \ + -e 's/@OMP_NEST_LOCK_25_KIND@/${OMP_NEST_LOCK_25_KIND}/g' \ + -e 's/@OMP_NEST_LOCK_25_SIZE@/${OMP_NEST_LOCK_25_SIZE}/g' \ + -e 's/@OMP_NEST_LOCK_ALIGN@/${OMP_NEST_LOCK_ALIGN}/g' \ + -e 's/@OMP_NEST_LOCK_KIND@/${OMP_NEST_LOCK_KIND}/g' \ + -e 's/@OMP_NEST_LOCK_SIZE@/${OMP_NEST_LOCK_SIZE}/g' \ + < ${.ALLSRC} > ${.TARGET} + +CLEANFILES+= libgomp_f.h + +.include diff --git a/gnu/lib/gcc47/libgomp/Makefile.i386 b/gnu/lib/gcc47/libgomp/Makefile.i386 new file mode 100644 index 0000000000..66a49bbdaf --- /dev/null +++ b/gnu/lib/gcc47/libgomp/Makefile.i386 @@ -0,0 +1,15 @@ +# Values recorded in /libgomp/config.log +# i386 platform + +OMP_LOCK_25_ALIGN= 4 +OMP_LOCK_25_KIND= 4 +OMP_LOCK_25_SIZE= 4 +OMP_LOCK_ALIGN= 4 +OMP_LOCK_KIND= 4 +OMP_LOCK_SIZE= 4 +OMP_NEST_LOCK_25_ALIGN= 4 +OMP_NEST_LOCK_25_KIND= 8 +OMP_NEST_LOCK_25_SIZE= 8 +OMP_NEST_LOCK_ALIGN= 4 +OMP_NEST_LOCK_KIND= 8 +OMP_NEST_LOCK_SIZE= 12 diff --git a/gnu/lib/gcc47/libgomp/Makefile.x86_64 b/gnu/lib/gcc47/libgomp/Makefile.x86_64 new file mode 100644 index 0000000000..e64e7ffed0 --- /dev/null +++ b/gnu/lib/gcc47/libgomp/Makefile.x86_64 @@ -0,0 +1,15 @@ +# Values recorded in /libgomp/config.log +# x86-64 platform + +OMP_LOCK_25_ALIGN= 8 +OMP_LOCK_25_KIND= 8 +OMP_LOCK_25_SIZE= 8 +OMP_LOCK_ALIGN= 8 +OMP_LOCK_KIND= 8 +OMP_LOCK_SIZE= 8 +OMP_NEST_LOCK_25_ALIGN= 8 +OMP_NEST_LOCK_25_KIND= 8 +OMP_NEST_LOCK_25_SIZE= 16 +OMP_NEST_LOCK_ALIGN= 8 +OMP_NEST_LOCK_KIND= 8 +OMP_NEST_LOCK_SIZE= 24 diff --git a/gnu/lib/gcc47/libgomp/config.h b/gnu/lib/gcc47/libgomp/config.h new file mode 100644 index 0000000000..c553a296cc --- /dev/null +++ b/gnu/lib/gcc47/libgomp/config.h @@ -0,0 +1,136 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if the target assembler supports .symver directive. */ +#define HAVE_AS_SYMVER_DIRECTIVE 1 + +/* Define to 1 if the target supports __attribute__((alias(...))). */ +#define HAVE_ATTRIBUTE_ALIAS 1 + +/* Define to 1 if the target supports __attribute__((dllexport)). */ +/* #undef HAVE_ATTRIBUTE_DLLEXPORT */ + +/* Define to 1 if the target supports __attribute__((visibility(...))). */ +#define HAVE_ATTRIBUTE_VISIBILITY 1 + +/* Define if the POSIX Semaphores do not work on your system. */ +/* #undef HAVE_BROKEN_POSIX_SEMAPHORES */ + +/* Define to 1 if the target assembler supports thread-local storage. */ +/* #undef HAVE_CC_TLS */ + +/* Define to 1 if you have the `clock_gettime' function. */ +#define HAVE_CLOCK_GETTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the `getloadavg' function. */ +#define HAVE_GETLOADAVG 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define if pthread_{,attr_}{g,s}etaffinity_np is supported. */ +/* #undef HAVE_PTHREAD_AFFINITY_NP */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SEMAPHORE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strtoull' function. */ +#define HAVE_STRTOULL 1 + +/* Define to 1 if the target runtime linker supports binding the same symbol + to different versions. */ +#define HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT 1 + +/* Define to 1 if the target supports __sync_*_compare_and_swap */ +#define HAVE_SYNC_BUILTINS 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_LOADAVG_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if the target supports thread-local storage. */ +#define HAVE_TLS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libgomp. */ +#define LIBGOMP_GNU_SYMBOL_VERSIONING 1 + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libgomp" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "GNU OpenMP Runtime Library" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "GNU OpenMP Runtime Library 1.0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libgomp" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "http://www.gnu.org/software/libgomp/" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "1.0" + +/* The size of `char', as computed by sizeof. */ +/* #undef SIZEOF_CHAR */ + +/* The size of `int', as computed by sizeof. */ +/* #undef SIZEOF_INT */ + +/* The size of `long', as computed by sizeof. */ +/* #undef SIZEOF_LONG */ + +/* The size of `short', as computed by sizeof. */ +/* #undef SIZEOF_SHORT */ + +/* The size of `void *', as computed by sizeof. */ +/* #undef SIZEOF_VOID_P */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if you can safely include both and . */ +#define STRING_WITH_STRINGS 1 + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Version number of package */ +#define VERSION "1.0" diff --git a/gnu/lib/gcc47/libgomp/libgomp.spec b/gnu/lib/gcc47/libgomp/libgomp.spec new file mode 100644 index 0000000000..7102255429 --- /dev/null +++ b/gnu/lib/gcc47/libgomp/libgomp.spec @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with -fopenmp. +*link_gomp: -lgomp %{static: } -- 2.41.0 From 0c7fdccd449859edbb19b30ec96423a0bd184e4b Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sat, 20 Oct 2012 20:12:38 +0800 Subject: [PATCH 05/16] ifpoll: Make status fraction and TX fraction easier to read --- sys/dev/netif/bnx/if_bnx.c | 8 ++++---- sys/net/if_poll.c | 24 ++++++++++++------------ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/sys/dev/netif/bnx/if_bnx.c b/sys/dev/netif/bnx/if_bnx.c index 71cb526430..dbd8d74337 100644 --- a/sys/dev/netif/bnx/if_bnx.c +++ b/sys/dev/netif/bnx/if_bnx.c @@ -2102,7 +2102,7 @@ bnx_attach(device_t dev) } #ifdef IFPOLL_ENABLE - sc->bnx_npoll_stfrac = 39; /* 1/40 polling freq */ + sc->bnx_npoll_stfrac = 40 - 1; /* 1/40 polling freq */ sc->bnx_npoll_cpuid = device_get_unit(dev) % ncpus2; #endif @@ -3946,13 +3946,13 @@ bnx_sysctl_npoll_stfrac(SYSCTL_HANDLER_ARGS) lwkt_serialize_enter(ifp->if_serializer); - stfrac = sc->bnx_npoll_stfrac; + stfrac = sc->bnx_npoll_stfrac + 1; error = sysctl_handle_int(oidp, &stfrac, 0, req); if (!error && req->newptr != NULL) { - if (stfrac < 0) { + if (stfrac < 1) { error = EINVAL; } else { - sc->bnx_npoll_stfrac = stfrac; + sc->bnx_npoll_stfrac = stfrac - 1; if (sc->bnx_npoll_stcount > sc->bnx_npoll_stfrac) sc->bnx_npoll_stcount = sc->bnx_npoll_stfrac; } diff --git a/sys/net/if_poll.c b/sys/net/if_poll.c index b0117aa81f..189fe195be 100644 --- a/sys/net/if_poll.c +++ b/sys/net/if_poll.c @@ -113,8 +113,8 @@ #define IFPOLL_FREQ_DEFAULT 4000 -#define IFPOLL_TXFRAC_DEFAULT 0 /* 1/1 of the pollhz */ -#define IFPOLL_STFRAC_DEFAULT 39 /* 1/40 of the pollhz */ +#define IFPOLL_TXFRAC_DEFAULT 1 /* 1/1 of the pollhz */ +#define IFPOLL_STFRAC_DEFAULT 40 /* 1/40 of the pollhz */ #define IFPOLL_RX 0x1 #define IFPOLL_TX 0x2 @@ -1222,15 +1222,15 @@ poll_comm_init(int cpuid) comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO); - if (ifpoll_stfrac < 0) + if (ifpoll_stfrac < 1) ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT; - if (ifpoll_txfrac < 0) + if (ifpoll_txfrac < 1) ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT; comm->pollhz = ifpoll_pollhz; comm->poll_cpuid = cpuid; - comm->poll_stfrac = ifpoll_stfrac; - comm->poll_txfrac = ifpoll_txfrac; + comm->poll_stfrac = ifpoll_stfrac - 1; + comm->poll_txfrac = ifpoll_txfrac - 1; ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid); @@ -1400,16 +1400,16 @@ sysctl_stfrac(SYSCTL_HANDLER_ARGS) KKASSERT(comm->poll_cpuid == 0); - stfrac = comm->poll_stfrac; + stfrac = comm->poll_stfrac + 1; error = sysctl_handle_int(oidp, &stfrac, 0, req); if (error || req->newptr == NULL) return error; - if (stfrac < 0) + if (stfrac < 1) return EINVAL; netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, sysctl_stfrac_handler); - nmsg.lmsg.u.ms_result = stfrac; + nmsg.lmsg.u.ms_result = stfrac - 1; return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); } @@ -1438,16 +1438,16 @@ sysctl_txfrac(SYSCTL_HANDLER_ARGS) struct netmsg_base nmsg; int error, txfrac; - txfrac = comm->poll_txfrac; + txfrac = comm->poll_txfrac + 1; error = sysctl_handle_int(oidp, &txfrac, 0, req); if (error || req->newptr == NULL) return error; - if (txfrac < 0) + if (txfrac < 1) return EINVAL; netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, sysctl_txfrac_handler); - nmsg.lmsg.u.ms_result = txfrac; + nmsg.lmsg.u.ms_result = txfrac - 1; return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); } -- 2.41.0 From f85247aee5239760523de09ad12e899ec4d10f55 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sat, 20 Oct 2012 20:17:24 +0800 Subject: [PATCH 06/16] config: Enable ifpoll (IFPOLL_ENABLE) by default --- sys/config/GENERIC | 2 ++ sys/config/X86_64_GENERIC | 2 ++ 2 files changed, 4 insertions(+) diff --git a/sys/config/GENERIC b/sys/config/GENERIC index 626b4fb092..7beab925c1 100644 --- a/sys/config/GENERIC +++ b/sys/config/GENERIC @@ -37,6 +37,8 @@ options COMPAT_DF12 #Compatible with DragonFly up to 1.2 options SCSI_DELAY=5000 #Delay (in ms) before probing SCSI options DEVICE_POLLING # Support mixed interrupt-polling # handling of network device drivers +options IFPOLL_ENABLE # Support mixed interrupt-polling + # handling of network device drivers options UCONSOLE #Allow users to grab the console options USERCONFIG #boot -c editor options VISUAL_USERCONFIG #visual boot -c editor diff --git a/sys/config/X86_64_GENERIC b/sys/config/X86_64_GENERIC index 032911685e..9f27754e5d 100644 --- a/sys/config/X86_64_GENERIC +++ b/sys/config/X86_64_GENERIC @@ -33,6 +33,8 @@ options PROCFS #Process filesystem options SCSI_DELAY=5000 #Delay (in ms) before probing SCSI options DEVICE_POLLING # Support mixed interrupt-polling # handling of network device drivers +options IFPOLL_ENABLE # Support mixed interrupt-polling + # handling of network device drivers options UCONSOLE #Allow users to grab the console options KTRACE #ktrace(1) support options SYSVSHM #SYSV-style shared memory -- 2.41.0 From 48d6c00e601b14da1f4e93187f0436495e83b6d1 Mon Sep 17 00:00:00 2001 From: John Marino Date: Sat, 20 Oct 2012 15:19:06 +0200 Subject: [PATCH 07/16] libgomp47: link pthread library --- gnu/lib/gcc47/libgomp/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gnu/lib/gcc47/libgomp/Makefile b/gnu/lib/gcc47/libgomp/Makefile index 5301403e85..e7547a4f0d 100644 --- a/gnu/lib/gcc47/libgomp/Makefile +++ b/gnu/lib/gcc47/libgomp/Makefile @@ -16,6 +16,10 @@ LDFLAGS+= -Wl,--version-script=${GCCDIR}/libgomp/libgomp.map LIB= gomp SHLIB_MAJOR= 1 +THRLIB= ${.OBJDIR}/../../../../lib/libpthread/libpthread.so +LDADD+= ${THRLIB} +DPADD+= ${THRLIB} + # From libgomp Makefile libgomp_la_SOURCES = alloc.c barrier.c critical.c env.c error.c iter.c \ iter_ull.c loop.c loop_ull.c ordered.c parallel.c sections.c single.c \ -- 2.41.0 From dc43b724957730a2aec1f4314b64f3554e60b46c Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sat, 20 Oct 2012 21:33:06 +0800 Subject: [PATCH 08/16] netif: Enable ifpoll in module building --- sys/dev/netif/bnx/Makefile | 2 +- sys/dev/netif/emx/Makefile | 2 +- sys/dev/netif/igb/Makefile | 5 ++++- sys/dev/netif/jme/Makefile | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/sys/dev/netif/bnx/Makefile b/sys/dev/netif/bnx/Makefile index 44aae6bfc6..3878ec2bb5 100644 --- a/sys/dev/netif/bnx/Makefile +++ b/sys/dev/netif/bnx/Makefile @@ -6,7 +6,7 @@ SRCS+= opt_ifpoll.h opt_bnx.h .ifndef BUILDING_WITH_KERNEL opt_ifpoll.h: - touch ${.OBJDIR}/${.TARGET} + echo '#define IFPOLL_ENABLE 1' > ${.OBJDIR}/${.TARGET} opt_bnx.h: touch ${.OBJDIR}/${.TARGET} diff --git a/sys/dev/netif/emx/Makefile b/sys/dev/netif/emx/Makefile index a7e4dbbd51..a298105cfb 100644 --- a/sys/dev/netif/emx/Makefile +++ b/sys/dev/netif/emx/Makefile @@ -6,7 +6,7 @@ SRCS+= opt_ifpoll.h opt_ktr.h opt_emx.h .ifndef BUILDING_WITH_KERNEL opt_ifpoll.h: - touch ${.OBJDIR}/${.TARGET} + echo '#define IFPOLL_ENABLE 1' > ${.OBJDIR}/${.TARGET} opt_emx.h: touch ${.OBJDIR}/${.TARGET} diff --git a/sys/dev/netif/igb/Makefile b/sys/dev/netif/igb/Makefile index 845ce8f116..7339bc303b 100644 --- a/sys/dev/netif/igb/Makefile +++ b/sys/dev/netif/igb/Makefile @@ -4,10 +4,13 @@ SRCS+= device_if.h bus_if.h pci_if.h SRCS+= opt_ifpoll.h opt_igb.h .ifndef BUILDING_WITH_KERNEL + opt_ifpoll.h: - touch ${.OBJDIR}/${.TARGET} + echo '#define IFPOLL_ENABLE 1' > ${.OBJDIR}/${.TARGET} + opt_igb.h: touch ${.OBJDIR}/${.TARGET} + .endif .include diff --git a/sys/dev/netif/jme/Makefile b/sys/dev/netif/jme/Makefile index 7d832b3738..44a763a903 100644 --- a/sys/dev/netif/jme/Makefile +++ b/sys/dev/netif/jme/Makefile @@ -8,7 +8,7 @@ SRCS+= opt_ifpoll.h opt_jme.h .ifndef BUILDING_WITH_KERNEL opt_ifpoll.h: - touch ${.OBJDIR}/${.TARGET} + echo '#define IFPOLL_ENABLE 1' > ${.OBJDIR}/${.TARGET} opt_jme.h: touch ${.OBJDIR}/${.TARGET} -- 2.41.0 From 5127ef5e25d3b45982df720ecb2de4c4b95ff39a Mon Sep 17 00:00:00 2001 From: Markus Pfeiffer Date: Sat, 20 Oct 2012 20:50:17 +0000 Subject: [PATCH 09/16] condvar(9): adjust manpage to reflect reality also fix a typo while I am here --- share/man/man9/condvar.9 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/share/man/man9/condvar.9 b/share/man/man9/condvar.9 index bb9aeb52e7..599301636c 100644 --- a/share/man/man9/condvar.9 +++ b/share/man/man9/condvar.9 @@ -129,8 +129,8 @@ unblock, their calling threads are made runnable. and .Fn cv_timedwait_sig wait for at most -.Fa timo -seconds before being unblocked and returning +.Fa timo +/ hz seconds before being unblocked and returning .Er EWOULDBLOCK ; otherwise, they return 0. .Fn cv_wait_sig @@ -148,7 +148,8 @@ or Condition variables exist primarily for code imported from other systems; for .Dx code, the -.Fn tsleep / +.Fn tsleep +/ .Fn wakeup family of functions should be used instead. .Pp -- 2.41.0 From a1882035f10072463b3a184e978e5bdedf0253dd Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sun, 21 Oct 2012 16:36:37 +0800 Subject: [PATCH 10/16] objcache: Cache align magazinedepot and percpu_objcache --- sys/kern/kern_objcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sys/kern/kern_objcache.c b/sys/kern/kern_objcache.c index 1a5fbeca53..d314888583 100644 --- a/sys/kern/kern_objcache.c +++ b/sys/kern/kern_objcache.c @@ -85,7 +85,7 @@ struct magazinedepot { * return a full magazine to * the depot */ int contested; /* depot contention count */ -}; +} __cachealign; /* * per-cpu object cache @@ -104,7 +104,7 @@ struct percpu_objcache { /* infrequently used fields */ int waiting; /* waiting for a thread on this cpu to * return an obj to the per-cpu cache */ -}; +} __cachealign; /* only until we have NUMA cluster topology information XXX */ #define MAXCLUSTERS 1 -- 2.41.0 From 2fce25797650849338d38de57731a5efad1d5158 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sun, 21 Oct 2012 17:00:54 +0800 Subject: [PATCH 11/16] objcache: objcache_create on longer changes cluster_limit It is no longer needed to pass the pointer to cluster_limit. --- sys/kern/kern_exec.c | 2 +- sys/kern/kern_objcache.c | 14 +++----------- sys/kern/kern_syslink.c | 6 +++--- sys/kern/kern_sysref.c | 2 +- sys/kern/lwkt_thread.c | 2 +- sys/kern/uipc_mbuf.c | 16 ++++++++-------- sys/sys/objcache.h | 4 ++-- sys/vfs/puffs/puffs_msgif.c | 2 +- 8 files changed, 20 insertions(+), 28 deletions(-) diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 74059f39b1..4aa7cb8344 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -128,7 +128,7 @@ exec_objcache_init(void *arg __unused) exec_objcache = objcache_create_mbacked( M_EXECARGS, PATH_MAX + ARG_MAX, - &cluster_limit, 8, + cluster_limit, 8, NULL, NULL, NULL); } SYSINIT(exec_objcache, SI_BOOT2_MACHDEP, SI_ORDER_ANY, exec_objcache_init, 0); diff --git a/sys/kern/kern_objcache.c b/sys/kern/kern_objcache.c index d314888583..0a36ee63c6 100644 --- a/sys/kern/kern_objcache.c +++ b/sys/kern/kern_objcache.c @@ -173,7 +173,7 @@ null_ctor(void *obj, void *privdata, int ocflags) * Create an object cache. */ struct objcache * -objcache_create(const char *name, int *cluster_limit0, int nom_cache, +objcache_create(const char *name, int cluster_limit, int nom_cache, objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *privdata, objcache_alloc_fn *alloc, objcache_free_fn *free, void *allocator_args) @@ -184,12 +184,6 @@ objcache_create(const char *name, int *cluster_limit0, int nom_cache, int nmagdepot; int mag_capacity; int i; - int cluster_limit; - - if (cluster_limit0 == NULL) - cluster_limit = 0; - else - cluster_limit = *cluster_limit0; /* * Allocate object cache structure @@ -291,8 +285,6 @@ objcache_create(const char *name, int *cluster_limit0, int nom_cache, LIST_INSERT_HEAD(&allobjcaches, oc, oc_next); spin_unlock(&objcachelist_spin); - if (cluster_limit0 != NULL) - *cluster_limit0 = cluster_limit; return (oc); } @@ -305,7 +297,7 @@ objcache_create_simple(malloc_type_t mtype, size_t objsize) margs = kmalloc(sizeof(*margs), M_OBJCACHE, M_WAITOK|M_ZERO); margs->objsize = objsize; margs->mtype = mtype; - oc = objcache_create(mtype->ks_shortdesc, NULL, 0, + oc = objcache_create(mtype->ks_shortdesc, 0, 0, NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free, margs); @@ -314,7 +306,7 @@ objcache_create_simple(malloc_type_t mtype, size_t objsize) struct objcache * objcache_create_mbacked(malloc_type_t mtype, size_t objsize, - int *cluster_limit, int nom_cache, + int cluster_limit, int nom_cache, objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *privdata) { diff --git a/sys/kern/kern_syslink.c b/sys/kern/kern_syslink.c index b93844f7ae..1e70a2ee62 100644 --- a/sys/kern/kern_syslink.c +++ b/sys/kern/kern_syslink.c @@ -148,13 +148,13 @@ syslinkinit(void *dummy __unused) { size_t n = sizeof(struct slmsg); - sl_objcache_none = objcache_create_mbacked(M_SYSLINK, n, NULL, 64, + sl_objcache_none = objcache_create_mbacked(M_SYSLINK, n, 0, 64, slmsg_ctor, slmsg_dtor, &sl_objcache_none); - sl_objcache_small= objcache_create_mbacked(M_SYSLINK, n, NULL, 64, + sl_objcache_small= objcache_create_mbacked(M_SYSLINK, n, 0, 64, slmsg_ctor, slmsg_dtor, &sl_objcache_small); - sl_objcache_big = objcache_create_mbacked(M_SYSLINK, n, NULL, 16, + sl_objcache_big = objcache_create_mbacked(M_SYSLINK, n, 0, 16, slmsg_ctor, slmsg_dtor, &sl_objcache_big); } diff --git a/sys/kern/kern_sysref.c b/sys/kern/kern_sysref.c index 0cffa99bac..97aed420ad 100644 --- a/sys/kern/kern_sysref.c +++ b/sys/kern/kern_sysref.c @@ -144,7 +144,7 @@ sysref_alloc(struct sysref_class *srclass) KKASSERT(srclass->mtype != NULL); srclass->oc = objcache_create_mbacked( srclass->mtype, srclass->objsize, - NULL, srclass->nom_cache, + 0, srclass->nom_cache, sysref_ctor, sysref_dtor, srclass); } diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index 6dd8537097..9962b4b475 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -283,7 +283,7 @@ lwkt_init(void) } thread_cache = objcache_create_mbacked( M_THREAD, sizeof(struct thread), - NULL, lwkt_cache_threads, + 0, lwkt_cache_threads, _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); } diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 7c79fba3f9..c1b4d00bad 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -665,56 +665,56 @@ mbinit(void *dummy) limit = nmbufs; mbuf_cache = objcache_create("mbuf", - &limit, 0, + limit, 0, mbuf_ctor, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; limit = nmbufs; mbufphdr_cache = objcache_create("mbuf pkt hdr", - &limit, nmbufs / 4, + limit, nmbufs / 4, mbufphdr_ctor, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; ncl_limit = nmbclusters; mclmeta_cache = objcache_create("cluster mbuf", - &ncl_limit, 0, + ncl_limit, 0, mclmeta_ctor, mclmeta_dtor, NULL, objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); cl_limit += ncl_limit; jcl_limit = nmbjclusters; mjclmeta_cache = objcache_create("jcluster mbuf", - &jcl_limit, 0, + jcl_limit, 0, mjclmeta_ctor, mclmeta_dtor, NULL, objcache_malloc_alloc, objcache_malloc_free, &mclmeta_malloc_args); cl_limit += jcl_limit; limit = nmbclusters; mbufcluster_cache = objcache_create("mbuf + cluster", - &limit, nmbclusters / mcl_cachefrac, + limit, nmbclusters / mcl_cachefrac, mbufcluster_ctor, mbufcluster_dtor, NULL, objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; limit = nmbclusters; mbufphdrcluster_cache = objcache_create("mbuf pkt hdr + cluster", - &limit, nmbclusters / mclph_cachefrac, + limit, nmbclusters / mclph_cachefrac, mbufphdrcluster_ctor, mbufcluster_dtor, NULL, objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; limit = nmbjclusters / 4; /* XXX really rarely used */ mbufjcluster_cache = objcache_create("mbuf + jcluster", - &limit, 0, + limit, 0, mbufjcluster_ctor, mbufcluster_dtor, NULL, objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; limit = nmbjclusters; mbufphdrjcluster_cache = objcache_create("mbuf pkt hdr + jcluster", - &limit, nmbjclusters / 16, + limit, nmbjclusters / 16, mbufphdrjcluster_ctor, mbufcluster_dtor, NULL, objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; diff --git a/sys/sys/objcache.h b/sys/sys/objcache.h index 2ac48cbf52..2188dc8073 100644 --- a/sys/sys/objcache.h +++ b/sys/sys/objcache.h @@ -60,7 +60,7 @@ typedef void (objcache_free_fn)(void *obj, void *allocator_args); struct objcache; struct objcache - *objcache_create(const char *name, int *cluster_limit, int nom_cache, + *objcache_create(const char *name, int cluster_limit, int nom_cache, objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *privdata, objcache_alloc_fn *alloc, objcache_free_fn *free, @@ -69,7 +69,7 @@ struct objcache *objcache_create_simple(malloc_type_t mtype, size_t objsize); struct objcache *objcache_create_mbacked(malloc_type_t mtype, size_t objsize, - int *cluster_limit, int nom_cache, + int cluster_limit, int nom_cache, objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *privdata); void *objcache_get(struct objcache *oc, int ocflags); diff --git a/sys/vfs/puffs/puffs_msgif.c b/sys/vfs/puffs/puffs_msgif.c index f2b78cc55c..54e7cee1b4 100644 --- a/sys/vfs/puffs/puffs_msgif.c +++ b/sys/vfs/puffs/puffs_msgif.c @@ -111,7 +111,7 @@ puffs_msgif_init(void) { parkpc = objcache_create_mbacked(M_PUFFS, sizeof(struct puffs_msgpark), - NULL, 0, makepark, nukepark, NULL); + 0, 0, makepark, nukepark, NULL); } void -- 2.41.0 From 38fdaecd10a60cf3772deb0a4f6863796e159571 Mon Sep 17 00:00:00 2001 From: John Marino Date: Sun, 21 Oct 2012 11:08:28 +0200 Subject: [PATCH 12/16] gcc4X man pages: Introduce generic MLINK handling A few months ago, the MLINK handling for binutils was updated to make it easier to switch the primary version. For binutils, every manpage has a suffix based on the binutils version and the "real" man page is linked to the primary version. The two base compilers had the same organization, but the manual system was not converted when binutils system was. When gcc47 was brought in, the manual organization wasn't implemented correctly and the gcc47 pages ended up getting linked to the "real" man page instead of the primary compiler gcc44. While fixing this, bring in the binutils MLINK handling to prepare for switching gcc47 to the primary compiler in the future. --- gnu/usr.bin/Makefile.cco | 21 ++++++++++++++++++++ gnu/usr.bin/cc44/Makefile.inc | 2 -- gnu/usr.bin/cc44/Makefile.langs | 2 ++ gnu/usr.bin/cc44/c++/Makefile | 16 +++++++++------- gnu/usr.bin/cc44/cc/Makefile | 34 +++++++++++++++++---------------- gnu/usr.bin/cc44/cpp/Makefile | 17 +++++++++-------- gnu/usr.bin/cc44/doc/Makefile | 5 +---- gnu/usr.bin/cc44/gcov/Makefile | 17 +++++++++-------- gnu/usr.bin/cc47/Makefile.inc | 3 --- gnu/usr.bin/cc47/Makefile.langs | 2 ++ gnu/usr.bin/cc47/c++/Makefile | 17 ++++++++++------- gnu/usr.bin/cc47/cc/Makefile | 34 +++++++++++++++++---------------- gnu/usr.bin/cc47/cpp/Makefile | 17 +++++++++-------- gnu/usr.bin/cc47/doc/Makefile | 5 +---- gnu/usr.bin/cc47/gcov/Makefile | 17 +++++++++-------- 15 files changed, 118 insertions(+), 91 deletions(-) create mode 100644 gnu/usr.bin/Makefile.cco diff --git a/gnu/usr.bin/Makefile.cco b/gnu/usr.bin/Makefile.cco new file mode 100644 index 0000000000..815a2db91c --- /dev/null +++ b/gnu/usr.bin/Makefile.cco @@ -0,0 +1,21 @@ +# DragonFly maintains two sets of compilers. +# In order to keep man page generation simple when the primary compiler +# changes, this makefile will direct the makefile that included it how +# to name the man pages and if it should hardlink a generic man page to it. +# The same approach is used for the two sets of binutils + +CC_PRIMARY= gcc44 +CC_BACKUP= gcc47 + +.if defined(COMPVERSION) +.if (${COMPVERSION} == ${CC_PRIMARY}) +IS_PRIMARY= 1 +MANPAGEVER= 44 +.else +.endif + +.if (${COMPVERSION} == ${CC_BACKUP}) +MANPAGEVER= 47 +.else +.endif +.endif diff --git a/gnu/usr.bin/cc44/Makefile.inc b/gnu/usr.bin/cc44/Makefile.inc index b1f622bc6b..406044e685 100644 --- a/gnu/usr.bin/cc44/Makefile.inc +++ b/gnu/usr.bin/cc44/Makefile.inc @@ -6,8 +6,6 @@ GCCDATESTAMP= 2012.03.13 GCCPOINTVER= ${GCCCOMPLETEVER:R} GCCSHORTVER= ${GCCPOINTVER:S/.//} -GCCDOCSUFFIX= ${GCCSHORTVER} - CSTD?= gnu89 CFLAGS+= -DGCCPOINTVER=\"${GCCPOINTVER}\" diff --git a/gnu/usr.bin/cc44/Makefile.langs b/gnu/usr.bin/cc44/Makefile.langs index 7b2b137aa7..e1f56eeecd 100644 --- a/gnu/usr.bin/cc44/Makefile.langs +++ b/gnu/usr.bin/cc44/Makefile.langs @@ -1,5 +1,7 @@ .include "Makefile.inc" +COMPVERSION= gcc44 + .if !defined(NO_CXX) lang_tree_files= cp/cp-tree.def .endif diff --git a/gnu/usr.bin/cc44/c++/Makefile b/gnu/usr.bin/cc44/c++/Makefile index d0ce03465d..00116e7a42 100644 --- a/gnu/usr.bin/cc44/c++/Makefile +++ b/gnu/usr.bin/cc44/c++/Makefile @@ -2,24 +2,26 @@ GCC_LANG_DIR= gcc/cp .include "../Makefile.inc" .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= c++ LINKS= ${BINDIR}/c++ ${BINDIR}/g++ LINKS+= ${BINDIR}/c++ ${BINDIR}/CC -MAN= g++${GCCSHORTVER}.1 - -.if ${GCCDOCSUFFIX} != ${GCCSHORTVER} -MLINKS= g++${GCCSHORTVER}.1 g++${GCCDOCSUFFIX}.1 -.endif +MFILE= g++${MANPAGEVER}.1 +MAN= ${MFILE} SRCS= ${GCC_SRCS} g++spec.c intl.c prefix.c version.c SRCS+= ${EXTRA_GCC_SRCS} CFLAGS+= -DGCC_DRIVER -g++${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/g++.1 +${MFILE}: ${GCCDIR}/gcc/doc/g++.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= g++${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} g++.1 +.endif .include diff --git a/gnu/usr.bin/cc44/cc/Makefile b/gnu/usr.bin/cc44/cc/Makefile index 3a8f0f7129..975294ba7b 100644 --- a/gnu/usr.bin/cc44/cc/Makefile +++ b/gnu/usr.bin/cc44/cc/Makefile @@ -1,31 +1,33 @@ .include "../Makefile.inc" .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= cc LINKS= ${BINDIR}/cc ${BINDIR}/gcc -MAN= gcc${GCCSHORTVER}.1 -MLINKS= gcc${GCCSHORTVER}.1 cc.1 \ - gcc${GCCSHORTVER}.1 c++.1 \ - gcc${GCCSHORTVER}.1 g++.1 \ - gcc${GCCSHORTVER}.1 gcc.1 \ - gcc${GCCSHORTVER}.1 CC.1 - -.if ${GCCDOCSUFFIX} != ${GCCSHORTVER} -MLINKS+=gcc${GCCSHORTVER}.1 cc${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 c++${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 g++${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 gcc${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 CC${GCCDOCSUFFIX}.1 -.endif +MFILE= gcc${MANPAGEVER}.1 +MAN= ${MFILE} SRCS= ${GCC_SRCS} gccspec.c intl.c prefix.c version.c SRCS+= ${EXTRA_GCC_SRCS} CFLAGS+= -DGCC_DRIVER -gcc${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/gcc.1 +${MFILE}: ${GCCDIR}/gcc/doc/gcc.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= gcc${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +MLINKS+= ${MFILE} cc${MANPAGEVER}.1 +MLINKS+= ${MFILE} CC${MANPAGEVER}.1 +MLINKS+= ${MFILE} c++${MANPAGEVER}.1 +MLINKS+= ${MFILE} g++${MANPAGEVER}.1 + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} cc.1 +MLINKS+= ${MFILE} CC.1 +MLINKS+= ${MFILE} gcc.1 +MLINKS+= ${MFILE} c++.1 +MLINKS+= ${MFILE} g++.1 +.endif .include diff --git a/gnu/usr.bin/cc44/cpp/Makefile b/gnu/usr.bin/cc44/cpp/Makefile index fbe2f10a88..e96e380af7 100644 --- a/gnu/usr.bin/cc44/cpp/Makefile +++ b/gnu/usr.bin/cc44/cpp/Makefile @@ -1,22 +1,23 @@ .include "../Makefile.inc" .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= cpp -MAN= cpp${GCCSHORTVER}.1 -MLINKS= cpp${GCCSHORTVER}.1 cpp.1 - -.if ${GCCSHORTVER} != ${GCCDOCSUFFIX} -MLINKS+=cpp${GCCSHORTVER}.1 cpp${GCCDOCSUFFIX}.1 -.endif +MFILE= cpp${GCCSHORTVER}.1 +MAN= ${MFILE} SRCS= ${GCC_SRCS} cppspec.c intl.c prefix.c version.c SRCS+= ${EXTRA_GCC_SRCS} CFLAGS+= -DGCC_DRIVER -cpp${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/cpp.1 +${MFILE}: ${GCCDIR}/gcc/doc/cpp.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= cpp${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} cpp.1 +.endif .include diff --git a/gnu/usr.bin/cc44/doc/Makefile b/gnu/usr.bin/cc44/doc/Makefile index 43b99e6164..5348364ef8 100644 --- a/gnu/usr.bin/cc44/doc/Makefile +++ b/gnu/usr.bin/cc44/doc/Makefile @@ -15,7 +15,7 @@ INFOENTRY_gcint= "* GCC-internals. The GNU Compiler Collection's internals." # Install as nameVER for the time being .for _i in ${ORIGINFO} -.for docsfx in ${GCCSHORTVER} ${GCCDOCSUFFIX} +.for docsfx in ${GCCSHORTVER} .if empty(INFO:M${_i}${docsfx}) INFO+= ${_i}${docsfx} INFOENTRY_${_i}${docsfx}= ${INFOENTRY_${_i}:C/\./${docsfx}./1} @@ -30,9 +30,6 @@ CLEANFILES+= ${_i}${docsufx}.info .endfor .endfor # End magic rewriting -.if ${GCCDOCSUFFIX} == "" -INFO+= ${ORIGINFO} -.endif cpp.info: cpp.texi fdl.texi cppenv.texi cppopts.texi \ gcc-common.texi gcc-vers.texi diff --git a/gnu/usr.bin/cc44/gcov/Makefile b/gnu/usr.bin/cc44/gcov/Makefile index 1cbd18c03a..774e35c7c7 100644 --- a/gnu/usr.bin/cc44/gcov/Makefile +++ b/gnu/usr.bin/cc44/gcov/Makefile @@ -1,19 +1,20 @@ .include "../Makefile.inc" .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= gcov -MAN= gcov${GCCSHORTVER}.1 -MLINKS= gcov${GCCSHORTVER}.1 gcov.1 - -.if ${GCCDOCSUFFIX} != ${GCCSHORTVER} -MLINKS+=gcov${GCCSHORTVER}.1 gcov${GCCDOCSUFFIX}.1 -.endif +MFILE= gcov${GCCSHORTVER}.1 +MAN= ${MFILE} SRCS= gcov.c intl.c errors.c version.c -gcov${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/gcov.1 +${MFILE}: ${GCCDIR}/gcc/doc/gcov.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= gcov${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} gcov.1 +.endif .include diff --git a/gnu/usr.bin/cc47/Makefile.inc b/gnu/usr.bin/cc47/Makefile.inc index 966d674fd4..cc8275cc1c 100644 --- a/gnu/usr.bin/cc47/Makefile.inc +++ b/gnu/usr.bin/cc47/Makefile.inc @@ -6,9 +6,6 @@ GCCDATESTAMP= 2012-09-20 GCCPOINTVER= ${GCCCOMPLETEVER:R} GCCSHORTVER= ${GCCPOINTVER:S/.//} -GCCDOCSUFFIX= ${GCCSHORTVER} - - CFLAGS+= -DGCCPOINTVER=\"${GCCPOINTVER}\" CFLAGS+= -DGCCSHORTVER=\"${GCCSHORTVER}\" CFLAGS+= -DBASEVER=\"${GCCCOMPLETEVER}\" diff --git a/gnu/usr.bin/cc47/Makefile.langs b/gnu/usr.bin/cc47/Makefile.langs index 8cef1d1bcc..0ed188b7b4 100644 --- a/gnu/usr.bin/cc47/Makefile.langs +++ b/gnu/usr.bin/cc47/Makefile.langs @@ -1,5 +1,7 @@ .include "Makefile.inc" +COMPVERSION= gcc47 + .if !defined(NO_CXX) lang_tree_files+= cp/cp-tree.def .endif diff --git a/gnu/usr.bin/cc47/c++/Makefile b/gnu/usr.bin/cc47/c++/Makefile index 83e607f6d2..cc71758faf 100644 --- a/gnu/usr.bin/cc47/c++/Makefile +++ b/gnu/usr.bin/cc47/c++/Makefile @@ -1,14 +1,13 @@ GCC_LANG_DIR= gcc/cp .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= c++ +MFILE= g++${MANPAGEVER}.1 +MAN= ${MFILE} + LINKS= ${BINDIR}/c++ ${BINDIR}/g++ LINKS+= ${BINDIR}/c++ ${BINDIR}/CC -MAN= g++${GCCSHORTVER}.1 - -.if ${GCCDOCSUFFIX} != ${GCCSHORTVER} -MLINKS= g++${GCCSHORTVER}.1 g++${GCCDOCSUFFIX}.1 -.endif OBJS+= ${GCC_SRCS:S/^/..\/cc\//:.c=.o} OBJS+= ${EXTRA_GCC_SRCS:S/^/..\/cc\//:.c=.o} @@ -16,9 +15,13 @@ SRCS= g++spec.c CFLAGS+= -DCONFIGURE_SPECS="\"\"" -g++${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/g++.1 +${MFILE}: ${GCCDIR}/gcc/doc/g++.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= g++${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} g++.1 +.endif .include diff --git a/gnu/usr.bin/cc47/cc/Makefile b/gnu/usr.bin/cc47/cc/Makefile index fe401718d0..7b9667b9d0 100644 --- a/gnu/usr.bin/cc47/cc/Makefile +++ b/gnu/usr.bin/cc47/cc/Makefile @@ -1,30 +1,32 @@ .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= cc LINKS= ${BINDIR}/cc ${BINDIR}/gcc -MAN= gcc${GCCSHORTVER}.1 -MLINKS= gcc${GCCSHORTVER}.1 cc.1 \ - gcc${GCCSHORTVER}.1 c++.1 \ - gcc${GCCSHORTVER}.1 g++.1 \ - gcc${GCCSHORTVER}.1 gcc.1 \ - gcc${GCCSHORTVER}.1 CC.1 - -.if ${GCCDOCSUFFIX} != ${GCCSHORTVER} -MLINKS+=gcc${GCCSHORTVER}.1 cc${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 c++${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 g++${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 gcc${GCCDOCSUFFIX}.1 -MLINKS+=gcc${GCCSHORTVER}.1 CC${GCCDOCSUFFIX}.1 -.endif +MFILE= gcc${MANPAGEVER}.1 +MAN= ${MFILE} SRCS= ${GCC_SRCS} gccspec.c SRCS+= ${EXTRA_GCC_SRCS} CFLAGS+= -DCONFIGURE_SPECS="\"\"" -gcc${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/gcc.1 +${MFILE}: ${GCCDIR}/gcc/doc/gcc.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= gcc${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +MLINKS+= ${MFILE} cc${MANPAGEVER}.1 +MLINKS+= ${MFILE} CC${MANPAGEVER}.1 +MLINKS+= ${MFILE} c++${MANPAGEVER}.1 +MLINKS+= ${MFILE} g++${MANPAGEVER}.1 + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} cc.1 +MLINKS+= ${MFILE} CC.1 +MLINKS+= ${MFILE} gcc.1 +MLINKS+= ${MFILE} c++.1 +MLINKS+= ${MFILE} g++.1 +.endif .include diff --git a/gnu/usr.bin/cc47/cpp/Makefile b/gnu/usr.bin/cc47/cpp/Makefile index cf012d4d20..abf6bf1e88 100644 --- a/gnu/usr.bin/cc47/cpp/Makefile +++ b/gnu/usr.bin/cc47/cpp/Makefile @@ -1,13 +1,10 @@ .include "../Makefile.inc" .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= cpp -MAN= cpp${GCCSHORTVER}.1 -MLINKS= cpp${GCCSHORTVER}.1 cpp.1 - -.if ${GCCSHORTVER} != ${GCCDOCSUFFIX} -MLINKS+=cpp${GCCSHORTVER}.1 cpp${GCCDOCSUFFIX}.1 -.endif +MFILE= cpp${MANPAGEVER}.1 +MAN= ${MFILE} OBJS+= ${GCC_SRCS:S/^/..\/cc\//:.c=.o} OBJS+= ${EXTRA_GCC_SRCS:S/^/..\/cc\//:.c=.o} @@ -15,9 +12,13 @@ SRCS= cppspec.c CFLAGS+= -DCONFIGURE_SPECS="\"\"" -cpp${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/cpp.1 +${MFILE}: ${GCCDIR}/gcc/doc/cpp.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= cpp${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} cpp.1 +.endif .include diff --git a/gnu/usr.bin/cc47/doc/Makefile b/gnu/usr.bin/cc47/doc/Makefile index 5ebec27ea2..0baabee35f 100644 --- a/gnu/usr.bin/cc47/doc/Makefile +++ b/gnu/usr.bin/cc47/doc/Makefile @@ -15,7 +15,7 @@ INFOENTRY_gcint= "* GCC-internals. The GNU Compiler Collection's internals." # Install as nameVER for the time being .for _i in ${ORIGINFO} -.for docsfx in ${GCCSHORTVER} ${GCCDOCSUFFIX} +.for docsfx in ${GCCSHORTVER} .if empty(INFO:M${_i}${docsfx}) INFO+= ${_i}${docsfx} INFOENTRY_${_i}${docsfx}= ${INFOENTRY_${_i}:C/\./${docsfx}./1} @@ -30,9 +30,6 @@ CLEANFILES+= ${_i}${docsufx}.info .endfor .endfor # End magic rewriting -.if ${GCCDOCSUFFIX} == "" -INFO+= ${ORIGINFO} -.endif cpp.info: cpp.texi fdl.texi cppenv.texi cppopts.texi \ gcc-common.texi gcc-vers.texi diff --git a/gnu/usr.bin/cc47/gcov/Makefile b/gnu/usr.bin/cc47/gcov/Makefile index 71b22a50ef..c063d3b548 100644 --- a/gnu/usr.bin/cc47/gcov/Makefile +++ b/gnu/usr.bin/cc47/gcov/Makefile @@ -1,18 +1,19 @@ .include "../Makefile.langs" +.include "../../Makefile.cco" PROG= gcov -MAN= gcov${GCCSHORTVER}.1 -MLINKS= gcov${GCCSHORTVER}.1 gcov.1 - -.if ${GCCDOCSUFFIX} != ${GCCSHORTVER} -MLINKS+=gcov${GCCSHORTVER}.1 gcov${GCCDOCSUFFIX}.1 -.endif +MFILE= gcov${MANPAGEVER}.1 +MAN= ${MFILE} SRCS= gcov.c -gcov${GCCSHORTVER}.1: ${GCCDIR}/gcc/doc/gcov.1 +${MFILE}: ${GCCDIR}/gcc/doc/gcov.1 cp ${.ALLSRC} ${.TARGET} -CLEANFILES+= gcov${GCCSHORTVER}.1 +CLEANFILES+= ${MFILE} + +.if defined(IS_PRIMARY) +MLINKS+= ${MFILE} gcov.1 +.endif .include -- 2.41.0 From f2013535956a7e117cad47f86aa45ff6809efccb Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sun, 21 Oct 2012 18:42:05 +0800 Subject: [PATCH 13/16] objcache: Make sure that percpu_objcache and depot are cache line aligned Since percpu_objcache and depot are embedded in the objcache, so objcache itself must be cache line aligned. --- sys/kern/kern_objcache.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sys/kern/kern_objcache.c b/sys/kern/kern_objcache.c index 0a36ee63c6..ba6dbc9cdd 100644 --- a/sys/kern/kern_objcache.c +++ b/sys/kern/kern_objcache.c @@ -188,8 +188,9 @@ objcache_create(const char *name, int cluster_limit, int nom_cache, /* * Allocate object cache structure */ - oc = kmalloc(__offsetof(struct objcache, cache_percpu[ncpus]), - M_OBJCACHE, M_WAITOK | M_ZERO); + oc = kmalloc_cachealign( + __offsetof(struct objcache, cache_percpu[ncpus]), + M_OBJCACHE, M_WAITOK | M_ZERO); oc->name = kstrdup(name, M_TEMP); oc->ctor = ctor ? ctor : null_ctor; oc->dtor = dtor ? dtor : null_dtor; -- 2.41.0 From 1ad7b4a725493968707b42fff28915984028b0d6 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Sun, 21 Oct 2012 20:23:04 +0800 Subject: [PATCH 14/16] objcache: Make sure magazines are cache line aligned This also makes magazines' size multiple cache line size. --- sys/kern/kern_objcache.c | 46 +++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/sys/kern/kern_objcache.c b/sys/kern/kern_objcache.c index ba6dbc9cdd..f96898bcb7 100644 --- a/sys/kern/kern_objcache.c +++ b/sys/kern/kern_objcache.c @@ -59,6 +59,10 @@ struct magazine { SLIST_HEAD(magazinelist, magazine); +#define MAGAZINE_HDRSIZE __offsetof(struct magazine, objects[0]) +#define MAGAZINE_CAPACITY_MAX 128 +#define MAGAZINE_CAPACITY_MIN 4 + /* * per-cluster cache of magazines * @@ -140,19 +144,37 @@ struct objcache { static struct spinlock objcachelist_spin; static LIST_HEAD(objcachelist, objcache) allobjcaches; +static int magazine_capmin; +static int magazine_capmax; static struct magazine * mag_alloc(int capacity) { struct magazine *mag; + int size; + + size = __offsetof(struct magazine, objects[capacity]); + KASSERT(size > 0 && (size & __VM_CACHELINE_MASK) == 0, + ("magazine size is not multiple cache line size")); - mag = kmalloc(__offsetof(struct magazine, objects[capacity]), - M_OBJMAG, M_INTWAIT | M_ZERO); + mag = kmalloc_cachealign(size, M_OBJMAG, M_INTWAIT | M_ZERO); mag->capacity = capacity; mag->rounds = 0; return (mag); } +static int +mag_capacity_align(int mag_capacity) +{ + int mag_size; + + mag_size = __VM_CACHELINE_ALIGN( + __offsetof(struct magazine, objects[mag_capacity])); + mag_capacity = (mag_size - MAGAZINE_HDRSIZE) / sizeof(void *); + + return mag_capacity; +} + /* * Utility routine for objects that don't require any de-construction. */ @@ -223,13 +245,13 @@ objcache_create(const char *name, int cluster_limit, int nom_cache, /* * Magazine capacity for 2 active magazines per cpu plus 2 - * magazines in the depot. Minimum capacity is 4 objects. + * magazines in the depot. */ - mag_capacity = nom_cache / (ncpus + 1) / 2 + 1; - if (mag_capacity > 128) - mag_capacity = 128; - if (mag_capacity < 4) - mag_capacity = 4; + mag_capacity = mag_capacity_align(nom_cache / (ncpus + 1) / 2 + 1); + if (mag_capacity > magazine_capmax) + mag_capacity = magazine_capmax; + else if (mag_capacity < magazine_capmin) + mag_capacity = magazine_capmin; depot->magcapacity = mag_capacity; /* @@ -972,6 +994,14 @@ static void objcache_init(void) { spin_init(&objcachelist_spin); + + magazine_capmin = mag_capacity_align(MAGAZINE_CAPACITY_MIN); + magazine_capmax = mag_capacity_align(MAGAZINE_CAPACITY_MAX); + if (bootverbose) { + kprintf("objcache: magazine cap [%d, %d]\n", + magazine_capmin, magazine_capmax); + } + #if 0 callout_init_mp(&objcache_callout); objcache_rebalance_period = 60 * hz; -- 2.41.0 From 2508820fcbdbd2efa584a8895c62e87f1fc1e925 Mon Sep 17 00:00:00 2001 From: Sascha Wildner Date: Sun, 21 Oct 2012 15:52:36 +0200 Subject: [PATCH 15/16] UPDATING: Add some words about usb4bsd. --- UPDATING | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/UPDATING b/UPDATING index 0fb26eb938..51014ceb69 100644 --- a/UPDATING +++ b/UPDATING @@ -41,6 +41,21 @@ default compiler in base (default is still GCC 4.4). Users who wish to build only GCC 4.4 have to replace NO_GCC41 with NO_GCC47 in /etc/make.conf. +USB4BSD +------- + +A new USB stack (from FreeBSD) has been brought in. The following +modules have been ported so far: usb, uhci, ohci, ehci, xhci, umass, +usfs, uether, if_axe, if_udav, ukbd, ums, uep, uhid, usb_quirk, +and uaudio. + +It is not yet the default. To activate it, WANT_USB4BSD=yes has to +be put in loader.conf and device "usb4bsd" (quotes needed) has to +replace device usb in the kernel config. + +Note that this is experimental and incomplete, but we are interested +in hearing about issues with it, of course. + +-----------------------------------------------------------------------+ + UPGRADING DRAGONFLY FROM 2.10 to later versions + +-----------------------------------------------------------------------+ -- 2.41.0 From c72002214e51f25c61a97b7138ff788c51cafde4 Mon Sep 17 00:00:00 2001 From: Sascha Wildner Date: Sun, 21 Oct 2012 18:45:32 +0200 Subject: [PATCH 16/16] UPDATING: Fix a typo and mention that for usb4bsd, w&k need rebuilding. Sigh, I had written 'loader.conf' instead of 'make.conf'. It will need a mention on the release page/errata. --- UPDATING | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/UPDATING b/UPDATING index 51014ceb69..016e02fcac 100644 --- a/UPDATING +++ b/UPDATING @@ -50,8 +50,9 @@ usfs, uether, if_axe, if_udav, ukbd, ums, uep, uhid, usb_quirk, and uaudio. It is not yet the default. To activate it, WANT_USB4BSD=yes has to -be put in loader.conf and device "usb4bsd" (quotes needed) has to -replace device usb in the kernel config. +be put in make.conf and device "usb4bsd" (quotes needed) has to +replace device usb in the kernel config. After that, a full +build/install/upgrade cycle is needed. Note that this is experimental and incomplete, but we are interested in hearing about issues with it, of course. -- 2.41.0