1/* Copyright (C) 2005-2015 Free Software Foundation, Inc.
2   Contributed by Richard Henderson <rth@redhat.com>.
3
4   This file is part of the GNU Offloading and Multi Processing Library
5   (libgomp).
6
7   Libgomp is free software; you can redistribute it and/or modify it
8   under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15   more details.
16
17   Under Section 7 of GPL version 3, you are granted additional
18   permissions described in the GCC Runtime Library Exception, version
19   3.1, as published by the Free Software Foundation.
20
21   You should have received a copy of the GNU General Public License and
22   a copy of the GCC Runtime Library Exception along with this program;
23   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24   <http://www.gnu.org/licenses/>.  */
25
26/* This file handles the (bare) PARALLEL construct.  */
27
28#include "libgomp.h"
29#include <limits.h>
30
31
32/* Determine the number of threads to be launched for a PARALLEL construct.
33   This algorithm is explicitly described in OpenMP 3.0 section 2.4.1.
34   SPECIFIED is a combination of the NUM_THREADS clause and the IF clause.
35   If the IF clause is false, SPECIFIED is forced to 1.  When NUM_THREADS
36   is not present, SPECIFIED is 0.  */
37
38unsigned
39gomp_resolve_num_threads (unsigned specified, unsigned count)
40{
41  struct gomp_thread *thr = gomp_thread ();
42  struct gomp_task_icv *icv;
43  unsigned threads_requested, max_num_threads, num_threads;
44  unsigned long busy;
45  struct gomp_thread_pool *pool;
46
47  icv = gomp_icv (false);
48
49  if (specified == 1)
50    return 1;
51  else if (thr->ts.active_level >= 1 && !icv->nest_var)
52    return 1;
53  else if (thr->ts.active_level >= gomp_max_active_levels_var)
54    return 1;
55
56  /* If NUM_THREADS not specified, use nthreads_var.  */
57  if (specified == 0)
58    threads_requested = icv->nthreads_var;
59  else
60    threads_requested = specified;
61
62  max_num_threads = threads_requested;
63
64  /* If dynamic threads are enabled, bound the number of threads
65     that we launch.  */
66  if (icv->dyn_var)
67    {
68      unsigned dyn = gomp_dynamic_max_threads ();
69      if (dyn < max_num_threads)
70	max_num_threads = dyn;
71
72      /* Optimization for parallel sections.  */
73      if (count && count < max_num_threads)
74	max_num_threads = count;
75    }
76
77  /* UINT_MAX stands for infinity.  */
78  if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1)
79      || max_num_threads == 1)
80    return max_num_threads;
81
82  /* The threads_busy counter lives in thread_pool, if there
83     isn't a thread_pool yet, there must be just one thread
84     in the contention group.  If thr->team is NULL, this isn't
85     nested parallel, so there is just one thread in the
86     contention group as well, no need to handle it atomically.  */
87  pool = thr->thread_pool;
88  if (thr->ts.team == NULL)
89    {
90      num_threads = max_num_threads;
91      if (num_threads > icv->thread_limit_var)
92	num_threads = icv->thread_limit_var;
93      if (pool)
94	pool->threads_busy = num_threads;
95      return num_threads;
96    }
97
98#ifdef HAVE_SYNC_BUILTINS
99  do
100    {
101      busy = pool->threads_busy;
102      num_threads = max_num_threads;
103      if (icv->thread_limit_var - busy + 1 < num_threads)
104	num_threads = icv->thread_limit_var - busy + 1;
105    }
106  while (__sync_val_compare_and_swap (&pool->threads_busy,
107				      busy, busy + num_threads - 1)
108	 != busy);
109#else
110  gomp_mutex_lock (&gomp_managed_threads_lock);
111  num_threads = max_num_threads;
112  busy = pool->threads_busy;
113  if (icv->thread_limit_var - busy + 1 < num_threads)
114    num_threads = icv->thread_limit_var - busy + 1;
115  pool->threads_busy += num_threads - 1;
116  gomp_mutex_unlock (&gomp_managed_threads_lock);
117#endif
118
119  return num_threads;
120}
121
122void
123GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads)
124{
125  num_threads = gomp_resolve_num_threads (num_threads, 0);
126  gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads));
127}
128
129void
130GOMP_parallel_end (void)
131{
132  struct gomp_task_icv *icv = gomp_icv (false);
133  if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0))
134    {
135      struct gomp_thread *thr = gomp_thread ();
136      struct gomp_team *team = thr->ts.team;
137      unsigned int nthreads = team ? team->nthreads : 1;
138      gomp_team_end ();
139      if (nthreads > 1)
140	{
141	  /* If not nested, there is just one thread in the
142	     contention group left, no need for atomicity.  */
143	  if (thr->ts.team == NULL)
144	    thr->thread_pool->threads_busy = 1;
145	  else
146	    {
147#ifdef HAVE_SYNC_BUILTINS
148	      __sync_fetch_and_add (&thr->thread_pool->threads_busy,
149				    1UL - nthreads);
150#else
151	      gomp_mutex_lock (&gomp_managed_threads_lock);
152	      thr->thread_pool->threads_busy -= nthreads - 1;
153	      gomp_mutex_unlock (&gomp_managed_threads_lock);
154#endif
155	    }
156	}
157    }
158  else
159    gomp_team_end ();
160}
161ialias (GOMP_parallel_end)
162
163void
164GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, unsigned int flags)
165{
166  num_threads = gomp_resolve_num_threads (num_threads, 0);
167  gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads));
168  fn (data);
169  ialias_call (GOMP_parallel_end) ();
170}
171
172bool
173GOMP_cancellation_point (int which)
174{
175  if (!gomp_cancel_var)
176    return false;
177
178  struct gomp_thread *thr = gomp_thread ();
179  struct gomp_team *team = thr->ts.team;
180  if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
181    {
182      if (team == NULL)
183	return false;
184      return team->work_share_cancelled != 0;
185    }
186  else if (which & GOMP_CANCEL_TASKGROUP)
187    {
188      if (thr->task->taskgroup && thr->task->taskgroup->cancelled)
189	return true;
190      /* FALLTHRU into the GOMP_CANCEL_PARALLEL case,
191	 as #pragma omp cancel parallel also cancels all explicit
192	 tasks.  */
193    }
194  if (team)
195    return gomp_team_barrier_cancelled (&team->barrier);
196  return false;
197}
198ialias (GOMP_cancellation_point)
199
200bool
201GOMP_cancel (int which, bool do_cancel)
202{
203  if (!gomp_cancel_var)
204    return false;
205
206  if (!do_cancel)
207    return ialias_call (GOMP_cancellation_point) (which);
208
209  struct gomp_thread *thr = gomp_thread ();
210  struct gomp_team *team = thr->ts.team;
211  if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
212    {
213      /* In orphaned worksharing region, all we want to cancel
214	 is current thread.  */
215      if (team != NULL)
216	team->work_share_cancelled = 1;
217      return true;
218    }
219  else if (which & GOMP_CANCEL_TASKGROUP)
220    {
221      if (thr->task->taskgroup && !thr->task->taskgroup->cancelled)
222	{
223	  gomp_mutex_lock (&team->task_lock);
224	  thr->task->taskgroup->cancelled = true;
225	  gomp_mutex_unlock (&team->task_lock);
226	}
227      return true;
228    }
229  team->team_cancelled = 1;
230  gomp_team_barrier_cancel (team);
231  return true;
232}
233
234/* The public OpenMP API for thread and team related inquiries.  */
235
236int
237omp_get_num_threads (void)
238{
239  struct gomp_team *team = gomp_thread ()->ts.team;
240  return team ? team->nthreads : 1;
241}
242
243int
244omp_get_thread_num (void)
245{
246  return gomp_thread ()->ts.team_id;
247}
248
249/* This wasn't right for OpenMP 2.5.  Active region used to be non-zero
250   when the IF clause doesn't evaluate to false, starting with OpenMP 3.0
251   it is non-zero with more than one thread in the team.  */
252
253int
254omp_in_parallel (void)
255{
256  return gomp_thread ()->ts.active_level > 0;
257}
258
259int
260omp_get_level (void)
261{
262  return gomp_thread ()->ts.level;
263}
264
265int
266omp_get_ancestor_thread_num (int level)
267{
268  struct gomp_team_state *ts = &gomp_thread ()->ts;
269  if (level < 0 || level > ts->level)
270    return -1;
271  for (level = ts->level - level; level > 0; --level)
272    ts = &ts->team->prev_ts;
273  return ts->team_id;
274}
275
276int
277omp_get_team_size (int level)
278{
279  struct gomp_team_state *ts = &gomp_thread ()->ts;
280  if (level < 0 || level > ts->level)
281    return -1;
282  for (level = ts->level - level; level > 0; --level)
283    ts = &ts->team->prev_ts;
284  if (ts->team == NULL)
285    return 1;
286  else
287    return ts->team->nthreads;
288}
289
290int
291omp_get_active_level (void)
292{
293  return gomp_thread ()->ts.active_level;
294}
295
296ialias (omp_get_num_threads)
297ialias (omp_get_thread_num)
298ialias (omp_in_parallel)
299ialias (omp_get_level)
300ialias (omp_get_ancestor_thread_num)
301ialias (omp_get_team_size)
302ialias (omp_get_active_level)
303