omp_api.cpp 8.6 KB
Newer Older
1
/*************************************************************************************/
Victor Lopez's avatar
Victor Lopez committed
2
/*      Copyright 2009-2018 Barcelona Supercomputing Center                          */
3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*                                                                                   */
/*      This file is part of the NANOS++ library.                                    */
/*                                                                                   */
/*      NANOS++ is free software: you can redistribute it and/or modify              */
/*      it under the terms of the GNU Lesser General Public License as published by  */
/*      the Free Software Foundation, either version 3 of the License, or            */
/*      (at your option) any later version.                                          */
/*                                                                                   */
/*      NANOS++ is distributed in the hope that it will be useful,                   */
/*      but WITHOUT ANY WARRANTY; without even the implied warranty of               */
/*      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the                */
/*      GNU Lesser General Public License for more details.                          */
/*                                                                                   */
/*      You should have received a copy of the GNU Lesser General Public License     */
Victor Lopez's avatar
Victor Lopez committed
17
/*      along with NANOS++.  If not, see <https://www.gnu.org/licenses/>.            */
18 19
/*************************************************************************************/

20 21 22
#include "basethread.hpp"
#include "threadteam.hpp"
#include "system.hpp"
23
#include "omp_wd_data.hpp"
24
#include "omp_threadteam_data.hpp"
25
#include "nanos_omp.h"
26 27 28

using namespace nanos;
using namespace nanos::OpenMP;
29 30 31

extern "C"
{
32
   NANOS_API_DEF(int, omp_get_num_threads, ( void ))
33
   {
34
      return myThread->getTeam()->getFinalSize();
35 36
   }

37 38 39
   int nanos_omp_get_num_threads ( void ) __attribute__ ((alias ("omp_get_num_threads")));
   int nanos_omp_get_num_threads_ ( void ) __attribute__ ((alias ("omp_get_num_threads")));

40
   NANOS_API_DEF(int, omp_get_max_threads, ( void ))
41
   {
42
      return sys.getPMInterface().getMaxThreads();
43
   }
44

45
   int nanos_omp_get_max_threads ( void ) __attribute__ ((alias ("omp_get_max_threads")));
46
   int nanos_omp_get_max_threads_ ( void ) __attribute__ ((alias ("omp_get_max_threads")));
47

48
   void omp_set_num_threads( int nthreads )
49
   {
50
      sys.getPMInterface().setNumThreads( nthreads );
51 52
   }

53 54 55 56 57 58
   void omp_set_num_threads_(int *nthreads);
   void omp_set_num_threads_(int *nthreads)
   {
      omp_set_num_threads(*nthreads);
   }

59 60
   void nanos_omp_set_num_threads ( int nthreads ) __attribute__ ((alias ("omp_set_num_threads")));
   void nanos_omp_set_num_threads_ ( int nthreads ) __attribute__ ((alias ("omp_set_num_threads")));
61

62
   NANOS_API_DEF(int, omp_get_thread_num, ( void ))
63
   {
64
      //! \todo check if master always gets a 0 -> ensure condition ?
65 66 67 68 69
      if (myThread && myThread->getTeamData()) {
         return myThread->getTeamData()->getId();
      } else {
         return -1;
      }
70 71
   }

72 73 74
   int nanos_omp_get_thread_num ( void ) __attribute__ ((alias ("omp_get_thread_num")));
   int nanos_omp_get_thread_num_ ( void ) __attribute__ ((alias ("omp_get_thread_num")));

75
   NANOS_API_DEF(int, omp_get_num_procs, ( void ))
76
   {
77
      return sys.getSMPPlugin()->getCpuCount();
78 79
   }

80
   NANOS_API_DEF(int, omp_in_parallel, ( void ))
81
   {
82
      return myThread->getTeam()->getFinalSize() > 1;
83 84
   }

85
   void omp_set_dynamic( int dynamic_threads )
86
   {
87
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
88
      data->icvs()->setDynamic((bool) dynamic_threads);
89 90
   }

91 92 93 94 95 96
   void omp_set_dynamic_( int* dynamic_threads );
   void omp_set_dynamic_( int* dynamic_threads )
   {
      omp_set_dynamic(*dynamic_threads);
   }

97
   NANOS_API_DEF(int, omp_get_dynamic, ( void ))
98
   {
99
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
100
      return (int) data->icvs()->getDynamic();
101 102
   }

103
   void omp_set_nested ( int nested )
104
   {
105
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
106
      data->icvs()->setNested((bool) nested);
107 108
   }

109 110 111 112 113 114
   void omp_set_nested_ ( int* nested );
   void omp_set_nested_ ( int* nested )
   {
      omp_set_nested(*nested);
   }

115
   NANOS_API_DEF(int, omp_get_nested, ( void ))
116
   {
117
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
118
      return (int) data->icvs()->getNested();
119 120
   }

121
   void omp_set_schedule ( omp_sched_t kind, int modifier )
122
   {
123
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
124
      data->icvs()->setSchedule( LoopSchedule(kind,modifier) );
125 126
   }

127 128 129 130 131 132
   void omp_set_schedule_ ( omp_sched_t *kind, int *modifier );
   void omp_set_schedule_ ( omp_sched_t *kind, int *modifier )
   {
      omp_set_schedule(*kind, *modifier);
   }

133
   NANOS_API_DEF(void, omp_get_schedule, ( omp_sched_t *kind, int *modifier ))
134
   {
135
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
136
      const LoopSchedule &schedule = data->icvs()->getSchedule();
137 138 139

      *kind = schedule._kind;
      *modifier = schedule._modifier;
140 141
   }

142
   NANOS_API_DEF(int, omp_get_thread_limit, ( void ))
143
   {
144
      return globalState->getThreadLimit();
145 146
   }

147
   void omp_set_max_active_levels( int max_active_levels )
148
   {
149 150
      if (!omp_in_parallel() )
         globalState->setMaxActiveLevels(max_active_levels);
151 152
   }

153 154 155 156 157 158
   void omp_set_max_active_levels_( int *max_active_levels );
   void omp_set_max_active_levels_( int *max_active_levels )
   {
      omp_set_max_active_levels(*max_active_levels);
   }

159
   NANOS_API_DEF(int, omp_get_max_active_levels, ( void ))
160
   {
161
      return globalState->getMaxActiveLevels();
162 163
   }

164
   NANOS_API_DEF(int, omp_get_level, ( void ))
165
   {
166
      return getMyThreadSafe()->getTeam()->getLevel();
167 168
   }

169
   int omp_get_ancestor_thread_num ( int level )
170
   {
171 172 173 174 175 176 177 178 179 180 181 182 183
      ThreadTeam* ancestor = getMyThreadSafe()->getTeam();
      int currentLevel = ancestor->getLevel();

      if ( level >= 0 && level <= currentLevel ) {
         while ( level != currentLevel ) {
            ancestor = ancestor->getParent();
            currentLevel = ancestor->getLevel();
         }
         int id = ancestor->getCreatorId();
         ensure ( id != -1, "Error in OpenMP Team initialization, team creator id was not set" );
         return id;
      }
      return -1;
184 185
   }

186 187 188 189 190 191 192
   int omp_get_ancestor_thread_num_(int* level);
   int omp_get_ancestor_thread_num_(int* level)
   {
      return omp_get_ancestor_thread_num(*level);
   }

   int omp_get_team_size( int level )
193
   {
194 195 196 197 198 199 200 201 202 203 204
      ThreadTeam* ancestor = getMyThreadSafe()->getTeam();
      int currentLevel = ancestor->getLevel();

      if ( level >= 0 && level <= currentLevel ) {
         while ( level != currentLevel ) {
            ancestor = ancestor->getParent();
            currentLevel = ancestor->getLevel();
         }
         return ancestor->size();
      }
      return -1;
205 206
   }

207 208 209 210 211 212
   int omp_get_team_size_ ( int *level );
   int omp_get_team_size_ ( int *level )
   {
      return omp_get_team_size(*level);
   }

213
   NANOS_API_DEF(int, omp_get_active_level, ( void ))
214
   {
215 216 217
      return ((OmpThreadTeamData &)getMyThreadSafe()->getTeam()->getThreadTeamData()).getActiveLevel();
   }

218
   NANOS_API_DEF(int, omp_in_final, ( void ))
219 220 221
   {
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
      return (int)data->isFinal();
222
   }
223

224
   NANOS_API_DEF(int, nanos_omp_get_num_threads_next_parallel, ( int threads_requested ))
225 226 227
   {
      OmpData *data = (OmpData *) myThread->getCurrentWD()->getInternalData();
      if ( threads_requested <= 0 ) {
Victor Lopez's avatar
Victor Lopez committed
228 229 230 231 232 233 234 235
         int avail_cpus = sys.getThreadManager()->borrowResources();
         if ( avail_cpus <= 0 ) {
            // If ThreadManager is disabled (default) and the user did not specify nthreads:
            threads_requested = data->icvs()->getNumThreads();
         } else {
            // ThreadManager is enabled:
            threads_requested = avail_cpus;
         }
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
      }

      int num_threads = 0;
      int threads_busy = 1; // FIXME: Should we keep track of it?
      int active_parallel_regions = getMyThreadSafe()->getTeam()->getLevel();
      int threads_available = globalState->getThreadLimit() - threads_busy + 1;

      if ( active_parallel_regions >= 1 && !data->icvs()->getNested() ) {
         num_threads = 1;
      }
      else if ( active_parallel_regions == globalState->getMaxActiveLevels() ) {
         num_threads = 1;
      }
      else if ( threads_requested > threads_available ) {
         num_threads = threads_available;
      }
      else {
         num_threads = threads_requested;
      }

      return num_threads;
   }
258
}