This fixes the corresponding SJLJ part of preserving loops over
EH lowering.  The unfortunate thing is, of course, that factoring
the SJLJ breaks the loop structure quite badly because we create
mutliple entry loops all over the place.

Well.  That's a pre-existing missed optimization.

Kai bootstrapped and tested this on mingw, I'm including it in a
x86_64 bootstrap & regtest cycle.

Richard.

2012-04-11  Richard Guenther  <rguent...@suse.de>

        PR middle-end/52918
        * except.c (sjlj_emit_dispatch_table): Properly update loop
        structure.

        * g++.dg/torture/pr52918-1.C: New testcase.
        * g++.dg/torture/pr52918-2.C: Likewise.

Index: gcc/except.c
===================================================================
--- gcc/except.c        (revision 186302)
+++ gcc/except.c        (working copy)
@@ -1344,6 +1344,28 @@ sjlj_emit_dispatch_table (rtx dispatch_l
        e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
        e->count = bb->count;
        e->probability = REG_BR_PROB_BASE;
+       if (current_loops)
+         {
+           struct loop *loop = bb->next_bb->loop_father;
+           /* If we created a pre-header block, add the new block to the
+              outer loop, otherwise to the loop itself.  */
+           if (bb->next_bb == loop->header)
+             add_bb_to_loop (bb, loop_outer (loop));
+           else
+             add_bb_to_loop (bb, loop);
+           /* ???  For multiple dispatches we will end up with edges
+              from the loop tree root into this loop, making it a
+              multiple-entry loop.  Discard all affected loops.  */
+           if (num_dispatch > 1)
+             {
+               for (loop = bb->loop_father;
+                    loop_outer (loop); loop = loop_outer (loop))
+                 {
+                   loop->header = NULL;
+                   loop->latch = NULL;
+                 }
+             }
+         }
 
        disp_index++;
       }
@@ -1364,6 +1386,24 @@ sjlj_emit_dispatch_table (rtx dispatch_l
       e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
       e->count = bb->count;
       e->probability = REG_BR_PROB_BASE;
+      if (current_loops)
+       {
+         struct loop *loop = bb->next_bb->loop_father;
+         /* If we created a pre-header block, add the new block to the
+            outer loop, otherwise to the loop itself.  */
+         if (bb->next_bb == loop->header)
+           add_bb_to_loop (bb, loop_outer (loop));
+         else
+           add_bb_to_loop (bb, loop);
+       }
+    }
+  else
+    {
+      /* We are not wiring up edges here, but as the dispatcher call
+         is at function begin simply associate the block with the
+        outermost (non-)loop.  */
+      if (current_loops)
+       add_bb_to_loop (bb, current_loops->tree_root);
     }
 }
 
Index: gcc/testsuite/g++.dg/torture/pr52918-1.C
===================================================================
--- gcc/testsuite/g++.dg/torture/pr52918-1.C    (revision 0)
+++ gcc/testsuite/g++.dg/torture/pr52918-1.C    (revision 0)
@@ -0,0 +1,39 @@
+// { dg-do compile }
+
+typedef __SIZE_TYPE__ size_t;
+class bad_alloc   { };
+typedef struct {
+} __gthread_mutex_t;
+int __gthread_mutex_unlock (__gthread_mutex_t *__mutex);
+class __concurrence_unlock_error   {
+};
+inline void   __throw_concurrence_unlock_error()   {
+    throw __concurrence_unlock_error();
+}
+class __mutex   {
+    __gthread_mutex_t _M_mutex;
+public:
+    void unlock()     {
+       if (__gthread_mutex_unlock(&_M_mutex) != 0)
+         __throw_concurrence_unlock_error();      
+    }
+};
+class free_list   {
+    typedef __mutex __mutex_type;
+    __mutex_type&     _M_get_mutex();
+    void _M_get(size_t __sz) throw(bad_alloc);
+};
+void  free_list::_M_get(size_t __sz) throw(bad_alloc)
+{
+  __mutex_type& __bfl_mutex = _M_get_mutex();
+  __bfl_mutex.unlock();
+  int __ctr = 2;
+  while (__ctr)  {
+      size_t* __ret = 0;
+      --__ctr;
+      try {
+         __ret = (size_t*) (::operator new(__sz + sizeof(size_t)));       
+      }
+      catch(const bad_alloc&) { }
+  }
+}
Index: gcc/testsuite/g++.dg/torture/pr52918-2.C
===================================================================
--- gcc/testsuite/g++.dg/torture/pr52918-2.C    (revision 0)
+++ gcc/testsuite/g++.dg/torture/pr52918-2.C    (revision 0)
@@ -0,0 +1,40 @@
+// { dg-do compile }
+
+typedef __SIZE_TYPE__ size_t;
+void*   __cxa_allocate_exception(size_t) throw();
+typedef struct { } __gthread_mutex_t;
+extern int __gthr_win32_mutex_unlock (__gthread_mutex_t *);
+int __gthread_mutex_lock (__gthread_mutex_t *__mutex);
+int __gthread_mutex_unlock (__gthread_mutex_t *__mutex);
+void   __throw_concurrence_lock_error();
+void   __throw_concurrence_unlock_error();
+class __mutex   {
+    __gthread_mutex_t _M_mutex;
+public:
+    void lock()     {
+       if (__gthread_mutex_lock(&_M_mutex) != 0)
+         __throw_concurrence_lock_error();
+    }
+    void unlock()     {
+       if (__gthread_mutex_unlock(&_M_mutex) != 0) 
+         __throw_concurrence_unlock_error();
+    }
+};
+class __scoped_lock   {
+    typedef __mutex __mutex_type;
+    __mutex_type& _M_device;
+public:
+    explicit __scoped_lock(__mutex_type& __name) : _M_device(__name)     {
+       _M_device.lock();
+    }
+    ~__scoped_lock() throw()    {
+       _M_device.unlock();
+    }
+};
+__mutex emergency_mutex;
+void * __cxa_allocate_exception(size_t thrown_size) throw()
+{
+  void *ret;
+  if (! ret)     
+    __scoped_lock sentry(emergency_mutex);
+}

Reply via email to