20 #include "kmp_error.h"
23 #include "kmp_stats.h"
27 #include "ompt-specific.h"
33 char const *traits_t<int>::spec =
"d";
34 char const *traits_t<unsigned int>::spec =
"u";
35 char const *traits_t<long long>::spec =
"lld";
36 char const *traits_t<unsigned long long>::spec =
"llu";
37 char const *traits_t<long>::spec =
"ld";
42 #define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 t = (l - u) / (-i) + 1; \
57 KMP_COUNT_VALUE(stat, t); \
58 KMP_POP_PARTITIONED_TIMER(); \
61 #define KMP_STATS_LOOP_END(stat)
65 static inline void check_loc(
ident_t *&loc) {
71 static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
72 kmp_int32 schedtype, kmp_int32 *plastiter,
74 typename traits_t<T>::signed_t *pstride,
75 typename traits_t<T>::signed_t incr,
76 typename traits_t<T>::signed_t chunk
77 #
if OMPT_SUPPORT && OMPT_OPTIONAL
83 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
84 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
86 typedef typename traits_t<T>::unsigned_t UT;
87 typedef typename traits_t<T>::signed_t ST;
89 kmp_int32 gtid = global_tid;
94 __kmp_assert_valid_gtid(gtid);
95 kmp_info_t *th = __kmp_threads[gtid];
97 #if OMPT_SUPPORT && OMPT_OPTIONAL
98 ompt_team_info_t *team_info = NULL;
99 ompt_task_info_t *task_info = NULL;
100 ompt_work_t ompt_work_type = ompt_work_loop;
102 static kmp_int8 warn = 0;
104 if (ompt_enabled.ompt_callback_work) {
106 team_info = __ompt_get_teaminfo(0, NULL);
107 task_info = __ompt_get_task_info_object(0);
111 ompt_work_type = ompt_work_loop;
113 ompt_work_type = ompt_work_sections;
115 ompt_work_type = ompt_work_distribute;
118 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
120 KMP_WARNING(OmptOutdatedWorkshare);
122 KMP_DEBUG_ASSERT(ompt_work_type);
127 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
128 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
133 buff = __kmp_str_format(
134 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
135 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
136 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
137 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
138 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
139 *pstride, incr, chunk));
140 __kmp_str_free(&buff);
144 if (__kmp_env_consistency_check) {
145 __kmp_push_workshare(global_tid, ct_pdo, loc);
147 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
152 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
153 if (plastiter != NULL)
165 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
166 "lower=%%%s upper=%%%s stride = %%%s "
167 "signed?<%s>, loc = %%s\n",
168 traits_t<T>::spec, traits_t<T>::spec,
169 traits_t<ST>::spec, traits_t<T>::spec);
172 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
173 __kmp_str_free(&buff);
176 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
178 #if OMPT_SUPPORT && OMPT_OPTIONAL
179 if (ompt_enabled.ompt_callback_work) {
180 ompt_callbacks.ompt_callback(ompt_callback_work)(
181 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
182 &(task_info->task_data), 0, codeptr);
185 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
197 if (th->th.th_team->t.t_serialized > 1) {
199 team = th->th.th_team;
201 tid = th->th.th_team->t.t_master_tid;
202 team = th->th.th_team->t.t_parent;
205 tid = __kmp_tid_from_gtid(global_tid);
206 team = th->th.th_team;
210 if (team->t.t_serialized) {
212 if (plastiter != NULL)
216 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
222 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
223 "lower=%%%s upper=%%%s stride = %%%s\n",
224 traits_t<T>::spec, traits_t<T>::spec,
226 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
227 __kmp_str_free(&buff);
230 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
232 #if OMPT_SUPPORT && OMPT_OPTIONAL
233 if (ompt_enabled.ompt_callback_work) {
234 ompt_callbacks.ompt_callback(ompt_callback_work)(
235 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
236 &(task_info->task_data), *pstride, codeptr);
239 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
242 nth = team->t.t_nproc;
244 if (plastiter != NULL)
247 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
252 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
253 "lower=%%%s upper=%%%s stride = %%%s\n",
254 traits_t<T>::spec, traits_t<T>::spec,
256 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
257 __kmp_str_free(&buff);
260 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
262 #if OMPT_SUPPORT && OMPT_OPTIONAL
263 if (ompt_enabled.ompt_callback_work) {
264 ompt_callbacks.ompt_callback(ompt_callback_work)(
265 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
266 &(task_info->task_data), *pstride, codeptr);
269 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
275 trip_count = *pupper - *plower + 1;
276 }
else if (incr == -1) {
277 trip_count = *plower - *pupper + 1;
278 }
else if (incr > 0) {
280 trip_count = (UT)(*pupper - *plower) / incr + 1;
282 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
285 #if KMP_STATS_ENABLED
286 if (KMP_MASTER_GTID(gtid)) {
291 if (__kmp_env_consistency_check) {
293 if (trip_count == 0 && *pupper != *plower) {
294 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
302 if (trip_count < nth) {
304 __kmp_static == kmp_sch_static_greedy ||
306 kmp_sch_static_balanced);
307 if (tid < trip_count) {
308 *pupper = *plower = *plower + tid * incr;
311 *plower = *pupper + (incr > 0 ? 1 : -1);
313 if (plastiter != NULL)
314 *plastiter = (tid == trip_count - 1);
316 if (__kmp_static == kmp_sch_static_balanced) {
317 UT small_chunk = trip_count / nth;
318 UT extras = trip_count % nth;
319 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
320 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
321 if (plastiter != NULL)
322 *plastiter = (tid == nth - 1);
324 T big_chunk_inc_count =
325 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
326 T old_upper = *pupper;
328 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
331 *plower += tid * big_chunk_inc_count;
332 *pupper = *plower + big_chunk_inc_count - incr;
334 if (*pupper < *plower)
335 *pupper = traits_t<T>::max_value;
336 if (plastiter != NULL)
337 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
338 if (*pupper > old_upper)
341 if (*pupper > *plower)
342 *pupper = traits_t<T>::min_value;
343 if (plastiter != NULL)
344 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
345 if (*pupper < old_upper)
350 *pstride = trip_count;
353 case kmp_sch_static_chunked: {
358 else if ((UT)chunk > trip_count)
360 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
363 *pstride = span * nchunks;
365 *plower = *plower + (span * tid);
366 *pupper = *plower + span - incr;
368 *plower = *pupper + (incr > 0 ? 1 : -1);
371 *pstride = span * nth;
372 *plower = *plower + (span * tid);
373 *pupper = *plower + span - incr;
375 if (plastiter != NULL)
376 *plastiter = (tid == (nchunks - 1) % nth);
379 case kmp_sch_static_balanced_chunked: {
380 T old_upper = *pupper;
382 UT span = (trip_count + nth - 1) / nth;
385 chunk = (span + chunk - 1) & ~(chunk - 1);
388 *plower = *plower + (span * tid);
389 *pupper = *plower + span - incr;
391 if (*pupper > old_upper)
393 }
else if (*pupper < old_upper)
396 if (plastiter != NULL)
397 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
401 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
407 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
408 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
409 team->t.t_active_level == 1) {
410 kmp_uint64 cur_chunk = chunk;
415 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
418 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
425 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
426 "upper=%%%s stride = %%%s signed?<%s>\n",
427 traits_t<T>::spec, traits_t<T>::spec,
428 traits_t<ST>::spec, traits_t<T>::spec);
429 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
430 __kmp_str_free(&buff);
433 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
435 #if OMPT_SUPPORT && OMPT_OPTIONAL
436 if (ompt_enabled.ompt_callback_work) {
437 ompt_callbacks.ompt_callback(ompt_callback_work)(
438 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
439 &(task_info->task_data), trip_count, codeptr);
443 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
447 template <
typename T>
448 static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
449 kmp_int32 schedule, kmp_int32 *plastiter,
450 T *plower, T *pupper, T *pupperDist,
451 typename traits_t<T>::signed_t *pstride,
452 typename traits_t<T>::signed_t incr,
453 typename traits_t<T>::signed_t chunk) {
455 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
456 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
457 typedef typename traits_t<T>::unsigned_t UT;
458 typedef typename traits_t<T>::signed_t ST;
467 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
468 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
469 __kmp_assert_valid_gtid(gtid);
474 buff = __kmp_str_format(
475 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
476 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
477 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
478 traits_t<ST>::spec, traits_t<T>::spec);
480 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
481 __kmp_str_free(&buff);
485 if (__kmp_env_consistency_check) {
486 __kmp_push_workshare(gtid, ct_pdo, loc);
488 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
491 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
501 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
504 tid = __kmp_tid_from_gtid(gtid);
505 th = __kmp_threads[gtid];
506 nth = th->th.th_team_nproc;
507 team = th->th.th_team;
508 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
509 nteams = th->th.th_teams_size.nteams;
510 team_id = team->t.t_master_tid;
511 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
515 trip_count = *pupper - *plower + 1;
516 }
else if (incr == -1) {
517 trip_count = *plower - *pupper + 1;
518 }
else if (incr > 0) {
520 trip_count = (UT)(*pupper - *plower) / incr + 1;
522 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
525 *pstride = *pupper - *plower;
526 if (trip_count <= nteams) {
528 __kmp_static == kmp_sch_static_greedy ||
530 kmp_sch_static_balanced);
533 if (team_id < trip_count && tid == 0) {
534 *pupper = *pupperDist = *plower = *plower + team_id * incr;
536 *pupperDist = *pupper;
537 *plower = *pupper + incr;
539 if (plastiter != NULL)
540 *plastiter = (tid == 0 && team_id == trip_count - 1);
543 if (__kmp_static == kmp_sch_static_balanced) {
544 UT chunkD = trip_count / nteams;
545 UT extras = trip_count % nteams;
547 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
548 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
549 if (plastiter != NULL)
550 *plastiter = (team_id == nteams - 1);
553 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
555 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
557 *plower += team_id * chunk_inc_count;
558 *pupperDist = *plower + chunk_inc_count - incr;
561 if (*pupperDist < *plower)
562 *pupperDist = traits_t<T>::max_value;
563 if (plastiter != NULL)
564 *plastiter = *plower <= upper && *pupperDist > upper - incr;
565 if (*pupperDist > upper)
567 if (*plower > *pupperDist) {
568 *pupper = *pupperDist;
572 if (*pupperDist > *plower)
573 *pupperDist = traits_t<T>::min_value;
574 if (plastiter != NULL)
575 *plastiter = *plower >= upper && *pupperDist < upper - incr;
576 if (*pupperDist < upper)
578 if (*plower < *pupperDist) {
579 *pupper = *pupperDist;
587 trip_count = *pupperDist - *plower + 1;
588 }
else if (incr == -1) {
589 trip_count = *plower - *pupperDist + 1;
590 }
else if (incr > 1) {
592 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
594 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
596 KMP_DEBUG_ASSERT(trip_count);
599 if (trip_count <= nth) {
601 __kmp_static == kmp_sch_static_greedy ||
603 kmp_sch_static_balanced);
604 if (tid < trip_count)
605 *pupper = *plower = *plower + tid * incr;
607 *plower = *pupper + incr;
608 if (plastiter != NULL)
609 if (*plastiter != 0 && !(tid == trip_count - 1))
612 if (__kmp_static == kmp_sch_static_balanced) {
613 UT chunkL = trip_count / nth;
614 UT extras = trip_count % nth;
615 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
616 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
617 if (plastiter != NULL)
618 if (*plastiter != 0 && !(tid == nth - 1))
622 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
623 T upper = *pupperDist;
624 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
626 *plower += tid * chunk_inc_count;
627 *pupper = *plower + chunk_inc_count - incr;
629 if (*pupper < *plower)
630 *pupper = traits_t<T>::max_value;
631 if (plastiter != NULL)
632 if (*plastiter != 0 &&
633 !(*plower <= upper && *pupper > upper - incr))
638 if (*pupper > *plower)
639 *pupper = traits_t<T>::min_value;
640 if (plastiter != NULL)
641 if (*plastiter != 0 &&
642 !(*plower >= upper && *pupper < upper - incr))
651 case kmp_sch_static_chunked: {
656 *pstride = span * nth;
657 *plower = *plower + (span * tid);
658 *pupper = *plower + span - incr;
659 if (plastiter != NULL)
660 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
666 "__kmpc_dist_for_static_init: unknown loop scheduling type");
675 buff = __kmp_str_format(
676 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
677 "stride=%%%s signed?<%s>\n",
678 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
679 traits_t<ST>::spec, traits_t<T>::spec);
680 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
681 __kmp_str_free(&buff);
684 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
685 KMP_STATS_LOOP_END(OMP_distribute_iterations);
689 template <
typename T>
690 static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
691 kmp_int32 *p_last, T *p_lb, T *p_ub,
692 typename traits_t<T>::signed_t *p_st,
693 typename traits_t<T>::signed_t incr,
694 typename traits_t<T>::signed_t chunk) {
700 typedef typename traits_t<T>::unsigned_t UT;
701 typedef typename traits_t<T>::signed_t ST;
711 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
712 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
713 __kmp_assert_valid_gtid(gtid);
718 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
719 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
720 traits_t<T>::spec, traits_t<T>::spec,
721 traits_t<ST>::spec, traits_t<ST>::spec,
723 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
724 __kmp_str_free(&buff);
730 if (__kmp_env_consistency_check) {
732 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
735 if (incr > 0 ? (upper < lower) : (lower < upper)) {
745 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
748 th = __kmp_threads[gtid];
749 team = th->th.th_team;
750 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
751 nteams = th->th.th_teams_size.nteams;
752 team_id = team->t.t_master_tid;
753 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
757 trip_count = upper - lower + 1;
758 }
else if (incr == -1) {
759 trip_count = lower - upper + 1;
760 }
else if (incr > 0) {
762 trip_count = (UT)(upper - lower) / incr + 1;
764 trip_count = (UT)(lower - upper) / (-incr) + 1;
769 *p_st = span * nteams;
770 *p_lb = lower + (span * team_id);
771 *p_ub = *p_lb + span - incr;
773 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
777 *p_ub = traits_t<T>::max_value;
782 *p_ub = traits_t<T>::min_value;
791 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
792 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
793 traits_t<T>::spec, traits_t<T>::spec,
794 traits_t<ST>::spec, traits_t<ST>::spec);
795 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
796 __kmp_str_free(&buff);
825 kmp_int32 *plastiter, kmp_int32 *plower,
826 kmp_int32 *pupper, kmp_int32 *pstride,
827 kmp_int32 incr, kmp_int32 chunk) {
828 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
829 pupper, pstride, incr, chunk
830 #
if OMPT_SUPPORT && OMPT_OPTIONAL
832 OMPT_GET_RETURN_ADDRESS(0)
841 kmp_int32 schedtype, kmp_int32 *plastiter,
842 kmp_uint32 *plower, kmp_uint32 *pupper,
843 kmp_int32 *pstride, kmp_int32 incr,
845 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
846 pupper, pstride, incr, chunk
847 #
if OMPT_SUPPORT && OMPT_OPTIONAL
849 OMPT_GET_RETURN_ADDRESS(0)
858 kmp_int32 *plastiter, kmp_int64 *plower,
859 kmp_int64 *pupper, kmp_int64 *pstride,
860 kmp_int64 incr, kmp_int64 chunk) {
861 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
862 pupper, pstride, incr, chunk
863 #
if OMPT_SUPPORT && OMPT_OPTIONAL
865 OMPT_GET_RETURN_ADDRESS(0)
874 kmp_int32 schedtype, kmp_int32 *plastiter,
875 kmp_uint64 *plower, kmp_uint64 *pupper,
876 kmp_int64 *pstride, kmp_int64 incr,
878 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
879 pupper, pstride, incr, chunk
880 #
if OMPT_SUPPORT && OMPT_OPTIONAL
882 OMPT_GET_RETURN_ADDRESS(0)
913 kmp_int32 schedule, kmp_int32 *plastiter,
914 kmp_int32 *plower, kmp_int32 *pupper,
915 kmp_int32 *pupperD, kmp_int32 *pstride,
916 kmp_int32 incr, kmp_int32 chunk) {
917 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
918 pupper, pupperD, pstride, incr, chunk);
925 kmp_int32 schedule, kmp_int32 *plastiter,
926 kmp_uint32 *plower, kmp_uint32 *pupper,
927 kmp_uint32 *pupperD, kmp_int32 *pstride,
928 kmp_int32 incr, kmp_int32 chunk) {
929 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
930 pupper, pupperD, pstride, incr, chunk);
937 kmp_int32 schedule, kmp_int32 *plastiter,
938 kmp_int64 *plower, kmp_int64 *pupper,
939 kmp_int64 *pupperD, kmp_int64 *pstride,
940 kmp_int64 incr, kmp_int64 chunk) {
941 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
942 pupper, pupperD, pstride, incr, chunk);
949 kmp_int32 schedule, kmp_int32 *plastiter,
950 kmp_uint64 *plower, kmp_uint64 *pupper,
951 kmp_uint64 *pupperD, kmp_int64 *pstride,
952 kmp_int64 incr, kmp_int64 chunk) {
953 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
954 pupper, pupperD, pstride, incr, chunk);
987 kmp_int32 *p_lb, kmp_int32 *p_ub,
988 kmp_int32 *p_st, kmp_int32 incr,
990 KMP_DEBUG_ASSERT(__kmp_init_serial);
991 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
999 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
1000 kmp_int32 *p_st, kmp_int32 incr,
1002 KMP_DEBUG_ASSERT(__kmp_init_serial);
1003 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1011 kmp_int64 *p_lb, kmp_int64 *p_ub,
1012 kmp_int64 *p_st, kmp_int64 incr,
1014 KMP_DEBUG_ASSERT(__kmp_init_serial);
1015 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1023 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1024 kmp_int64 *p_st, kmp_int64 incr,
1026 KMP_DEBUG_ASSERT(__kmp_init_serial);
1027 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)