Hi!

This patch fixes 3 issues:
1) distribute doesn't support lastprivate clause, so gimplification
   shouldn't add it, it causes ICEs
2) for shared clauses on teams construct we need to at least
   record something in decl_map, otherwise lookup_decl ICEs
3) c_omp_split_clauses ICEd on a couple of combined constructs
   with firstprivate clause

Bootstrapped/regtested on x86_64-linux and i686-linux, committed to trunk
and 4.9 branch.

2014-06-12  Jakub Jelinek  <ja...@redhat.com>

        PR middle-end/61486
        * gimplify.c (struct gimplify_omp_ctx): Add distribute field.
        (gimplify_adjust_omp_clauses): Don't or in GOVD_LASTPRIVATE
        if outer combined construct is distribute.
        (gimplify_omp_for): For OMP_DISTRIBUTE set
        gimplify_omp_ctxp->distribute.
        * omp-low.c (scan_sharing_clauses) <case OMP_CLAUSE_SHARED>: For
        GIMPLE_OMP_TEAMS, if decl isn't global in outer context, record
        mapping into decl map.
c-family/
        * c-omp.c (c_omp_split_clauses): Don't crash on firstprivate in
        #pragma omp target teams or
        #pragma omp {,target }teams distribute simd.
testsuite/
        * c-c++-common/gomp/pr61486-1.c: New test.
        * c-c++-common/gomp/pr61486-2.c: New test.

--- gcc/gimplify.c.jj   2014-06-06 09:19:23.000000000 +0200
+++ gcc/gimplify.c      2014-06-12 16:06:07.992997628 +0200
@@ -139,6 +139,7 @@ struct gimplify_omp_ctx
   enum omp_clause_default_kind default_kind;
   enum omp_region_type region_type;
   bool combined_loop;
+  bool distribute;
 };
 
 static struct gimplify_ctx *gimplify_ctxp;
@@ -6359,7 +6360,11 @@ gimplify_adjust_omp_clauses (tree *list_
                      if (n == NULL
                          || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
                        {
-                         int flags = GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE;
+                         int flags = GOVD_FIRSTPRIVATE;
+                         /* #pragma omp distribute does not allow
+                            lastprivate clause.  */
+                         if (!ctx->outer_context->distribute)
+                           flags |= GOVD_LASTPRIVATE;
                          if (n == NULL)
                            omp_add_variable (ctx->outer_context, decl,
                                              flags | GOVD_SEEN);
@@ -6640,6 +6645,8 @@ gimplify_omp_for (tree *expr_p, gimple_s
          || TREE_CODE (for_stmt) == CILK_SIMD);
   gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
                             simd ? ORT_SIMD : ORT_WORKSHARE);
+  if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
+    gimplify_omp_ctxp->distribute = true;
 
   /* Handle OMP_FOR_INIT.  */
   for_pre_body = NULL;
--- gcc/omp-low.c.jj    2014-06-10 08:02:49.000000000 +0200
+++ gcc/omp-low.c       2014-06-12 16:41:09.438849948 +0200
@@ -1509,11 +1509,19 @@ scan_sharing_clauses (tree clauses, omp_
          break;
 
        case OMP_CLAUSE_SHARED:
+         decl = OMP_CLAUSE_DECL (c);
          /* Ignore shared directives in teams construct.  */
          if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
-           break;
+           {
+             /* Global variables don't need to be copied,
+                the receiver side will use them directly.  */
+             tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
+             if (is_global_var (odecl))
+               break;
+             insert_decl_map (&ctx->cb, decl, odecl);
+             break;
+           }
          gcc_assert (is_taskreg_ctx (ctx));
-         decl = OMP_CLAUSE_DECL (c);
          gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
                      || !is_variable_sized (decl));
          /* Global variables don't need to be copied,
--- gcc/c-family/c-omp.c.jj     2014-05-11 22:20:26.000000000 +0200
+++ gcc/c-family/c-omp.c        2014-06-12 17:11:49.507948417 +0200
@@ -789,8 +789,13 @@ c_omp_split_clauses (location_t loc, enu
          else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
                   != 0)
            {
-             /* This must be #pragma omp {,target }teams distribute.  */
-             gcc_assert (code == OMP_DISTRIBUTE);
+             /* This must be one of
+                #pragma omp {,target }teams distribute
+                #pragma omp target teams
+                #pragma omp {,target }teams distribute simd.  */
+             gcc_assert (code == OMP_DISTRIBUTE
+                         || code == OMP_TEAMS
+                         || code == OMP_SIMD);
              s = C_OMP_CLAUSE_SPLIT_TEAMS;
            }
          else if ((mask & (OMP_CLAUSE_MASK_1
--- gcc/testsuite/c-c++-common/gomp/pr61486-1.c.jj      2014-06-12 
19:11:52.029213158 +0200
+++ gcc/testsuite/c-c++-common/gomp/pr61486-1.c 2014-06-12 19:12:22.427069749 
+0200
@@ -0,0 +1,13 @@
+/* PR middle-end/61486 */
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+int
+foo (int *a)
+{
+  int i, j = 0;
+  #pragma omp target teams distribute simd linear(i, j) map(a[:10])
+  for (i = 0; i < 10; i++)
+    a[i] = j++;
+  return i + j;
+}
--- gcc/testsuite/c-c++-common/gomp/pr61486-2.c.jj      2014-06-12 
19:12:44.620965191 +0200
+++ gcc/testsuite/c-c++-common/gomp/pr61486-2.c 2014-06-12 19:13:03.291874573 
+0200
@@ -0,0 +1,458 @@
+/* PR middle-end/61486 */
+/* { dg-do compile } */
+/* { dg-options "-fopenmp" } */
+
+#pragma omp declare target
+void dosomething (int *a, int n, int m);
+#pragma omp end declare target
+
+void
+test (int n, int o, int p, int q, int r, int s, int *pp)
+{
+  int a[o], i, j;
+  #pragma omp target data device (n + 1) if (n != 6) map (tofrom: n, r)
+  {
+    #pragma omp target device (n + 1) if (n != 6) map (from: n) map (alloc: 
a[2:o-2])
+      dosomething (a, n, 0);
+    #pragma omp target teams device (n + 1) num_teams (n + 4) thread_limit (n 
* 2) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r)
+    {
+      r = r + 1;
+      p = q;
+      dosomething (a, n, p + q);
+    }
+    #pragma omp target teams distribute device (n + 1) num_teams (n + 4) 
collapse (2) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp target teams distribute device (n + 1) num_teams (n + 4) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp target teams distribute parallel for device (n + 1) num_teams 
(n + 4) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+           #pragma omp ordered
+             p = q;
+           s = i * 10 + j;
+         }
+    #pragma omp target teams distribute parallel for device (n + 1) num_teams 
(n + 4) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+       proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       {
+         for (j = 0; j < 10; j++)
+           {
+             r = r + 1;
+             p = q;
+             dosomething (a, n, p + q);
+           }
+         #pragma omp ordered
+           p = q;
+         s = i * 10;
+       }
+    #pragma omp target teams distribute parallel for simd device (n + 1) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       schedule (static, 8) num_teams (n + 4) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp target teams distribute parallel for simd device (n + 1) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+       proc_bind (master) lastprivate (s) schedule (static, 8) \
+       num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+    #pragma omp target teams distribute simd device (n + 1) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+       lastprivate (s) num_teams (n + 4) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp target teams distribute simd device (n + 1) \
+       if (n != 6)map (from: n) map (alloc: a[2:o-2]) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
+       num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams num_teams (n + 4) thread_limit (n * 2) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r)
+    {
+      r = r + 1;
+      p = q;
+      dosomething (a, n, p + q);
+    }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute num_teams (n + 4) collapse (2) 
default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute num_teams (n + 4) default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
+       default(shared) private (p) firstprivate (q) shared (n) reduction (+: 
r) \
+       thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+           #pragma omp ordered
+             p = q;
+           s = i * 10 + j;
+         }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute parallel for num_teams (n + 4) if (n != 6) \
+       default(shared) private (p) firstprivate (q) shared (n) reduction (+: 
r) \
+       thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+       proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       {
+         for (j = 0; j < 10; j++)
+           {
+             r = r + 1;
+             p = q;
+             dosomething (a, n, p + q);
+           }
+         #pragma omp ordered
+           p = q;
+         s = i * 10;
+       }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       schedule (static, 8) num_teams (n + 4) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute parallel for simd if (n != 6)default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) num_threads (n + 4) \
+       proc_bind (master) lastprivate (s) schedule (static, 8) \
+       num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute simd default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) collapse (2) \
+       lastprivate (s) num_teams (n + 4) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp target device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2])
+    #pragma omp teams distribute simd default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       thread_limit (n * 2) dist_schedule (static, 4) lastprivate (s) \
+       num_teams (n + 4) safelen(16) linear(i:1) aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2)default(shared) shared(n) \
+       private (p) reduction (+: r)
+    #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate 
(q)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2) shared(n) private(p) reduction 
(+ : r) \
+       default(shared)
+    #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2)
+    #pragma omp distribute parallel for if (n != 6) \
+       default(shared) private (p) firstprivate (q) shared (n) reduction (+: 
r) \
+       collapse (2) dist_schedule (static, 4) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+           #pragma omp ordered
+             p = q;
+           s = i * 10 + j;
+         }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2)
+    #pragma omp distribute parallel for if (n != 6) \
+       default(shared) private (p) firstprivate (q) shared (n) reduction (+: 
r) \
+       num_threads (n + 4) dist_schedule (static, 4) \
+       proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       {
+         for (j = 0; j < 10; j++)
+           {
+             r = r + 1;
+             p = q;
+             dosomething (a, n, p + q);
+           }
+         #pragma omp ordered
+           p = q;
+         s = i * 10;
+       }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2)
+    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       collapse (2) dist_schedule (static, 4) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       schedule (static, 8) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2)
+    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       num_threads (n + 4) dist_schedule (static, 4) \
+       proc_bind (master) lastprivate (s) schedule (static, 8) \
+       safelen(16) linear(i:1) aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) 
private(p) \
+       reduction(+:r)
+    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+       collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp target teams device (n + 1) if (n != 6)map(from:n) 
map(alloc:a[2:o-2]) \
+       num_teams (n + 4) thread_limit (n * 2) default(shared) shared(n) 
private(p) \
+       reduction(+:r)
+    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+       lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) 
aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+  }
+}
+
+int q, i, j;
+
+void
+test2 (int n, int o, int p, int r, int s, int *pp)
+{
+  int a[o];
+    #pragma omp distribute collapse (2) dist_schedule (static, 4) firstprivate 
(q)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp distribute dist_schedule (static, 4) firstprivate (q)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+         }
+    #pragma omp distribute parallel for if (n != 6) \
+       default(shared) private (p) firstprivate (q) shared (n) reduction (+: 
r) \
+       collapse (2) dist_schedule (static, 4) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           dosomething (a, n, p + q);
+           #pragma omp ordered
+             p = q;
+           s = i * 10 + j;
+         }
+    #pragma omp distribute parallel for if (n != 6) \
+       default(shared) private (p) firstprivate (q) shared (n) reduction (+: 
r) \
+       num_threads (n + 4) dist_schedule (static, 4) \
+       proc_bind (master) lastprivate (s) ordered schedule (static, 8)
+      for (i = 0; i < 10; i++)
+       {
+         for (j = 0; j < 10; j++)
+           {
+             r = r + 1;
+             p = q;
+             dosomething (a, n, p + q);
+           }
+         #pragma omp ordered
+           p = q;
+         s = i * 10;
+       }
+    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       collapse (2) dist_schedule (static, 4) \
+       num_threads (n + 4) proc_bind (spread) lastprivate (s) \
+       schedule (static, 8) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp distribute parallel for simd if (n != 6)default(shared) \
+       private (p) firstprivate (q) shared (n) reduction (+: r) \
+       num_threads (n + 4) dist_schedule (static, 4) \
+       proc_bind (master) lastprivate (s) schedule (static, 8) \
+       safelen(16) linear(i:1) aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+       collapse (2) dist_schedule (static, 4) lastprivate (s) safelen(8)
+      for (i = 0; i < 10; i++)
+       for (j = 0; j < 10; j++)
+         {
+           r = r + 1;
+           p = q;
+           a[2+i*10+j] = p + q;
+           s = i * 10 + j;
+         }
+    #pragma omp distribute simd private (p) firstprivate (q) reduction (+: r) \
+       lastprivate (s) dist_schedule (static, 4) safelen(16) linear(i:1) 
aligned (pp:4)
+      for (i = 0; i < 10; i++)
+       {
+         r = r + 1;
+         p = q;
+         a[2+i] = p + q;
+         s = i * 10;
+       }
+}

        Jakub

Reply via email to