Re: svn commit: r242314 - head/sys/geom/raid

2013-01-13 Thread Slawa Olhovchenkov
On Mon, Oct 29, 2012 at 02:18:54PM +, Alexander Motin wrote:

 Author: mav
 Date: Mon Oct 29 14:18:54 2012
 New Revision: 242314
 URL: http://svn.freebsd.org/changeset/base/242314
 
 Log:
   Make GEOM RAID more aggressive in marking volumes as clean on shutdown
   and move that action from shutdown_pre_sync to shutdown_post_sync stage
   to avoid extra flapping.
   
   ZFS tends to not close devices on shutdown, that doesn't allow GEOM RAID
   to shutdown gracefully.  To handle that, mark volume as clean just when
   shutdown time comes and there are no active writes.
   
   MFC after:  2 weeks

kern/113957 ?

 Modified:
   head/sys/geom/raid/g_raid.c
 
 Modified: head/sys/geom/raid/g_raid.c
 ==
 --- head/sys/geom/raid/g_raid.c   Mon Oct 29 13:58:11 2012
 (r242313)
 +++ head/sys/geom/raid/g_raid.c   Mon Oct 29 14:18:54 2012
 (r242314)
 @@ -108,8 +108,9 @@ LIST_HEAD(, g_raid_tr_class) g_raid_tr_c
  LIST_HEAD(, g_raid_volume) g_raid_volumes =
  LIST_HEAD_INITIALIZER(g_raid_volumes);
  
 -static eventhandler_tag g_raid_pre_sync = NULL;
 +static eventhandler_tag g_raid_post_sync = NULL;
  static int g_raid_started = 0;
 +static int g_raid_shutdown = 0;
  
  static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
  struct g_geom *gp);
 @@ -881,7 +882,7 @@ g_raid_orphan(struct g_consumer *cp)
   G_RAID_EVENT_DISK);
  }
  
 -static int
 +static void
  g_raid_clean(struct g_raid_volume *vol, int acw)
  {
   struct g_raid_softc *sc;
 @@ -892,22 +893,21 @@ g_raid_clean(struct g_raid_volume *vol, 
   sx_assert(sc-sc_lock, SX_XLOCKED);
  
  //   if ((sc-sc_flags  G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
 -//   return (0);
 +//   return;
   if (!vol-v_dirty)
 - return (0);
 + return;
   if (vol-v_writes  0)
 - return (0);
 + return;
   if (acw  0 || (acw == -1 
   vol-v_provider != NULL  vol-v_provider-acw  0)) {
   timeout = g_raid_clean_time - (time_uptime - vol-v_last_write);
 - if (timeout  0)
 - return (timeout);
 + if (!g_raid_shutdown  timeout  0)
 + return;
   }
   vol-v_dirty = 0;
   G_RAID_DEBUG1(1, sc, Volume %s marked as clean.,
   vol-v_name);
   g_raid_write_metadata(sc, vol, NULL, NULL);
 - return (0);
  }
  
  static void
 @@ -1520,8 +1520,7 @@ process:
   g_raid_disk_done_request(bp);
   } else if (rv == EWOULDBLOCK) {
   TAILQ_FOREACH(vol, sc-sc_volumes, v_next) {
 - if (vol-v_writes == 0  vol-v_dirty)
 - g_raid_clean(vol, -1);
 + g_raid_clean(vol, -1);
   if (bioq_first(vol-v_inflight) == NULL 
   vol-v_tr) {
   t.tv_sec = g_raid_idle_threshold / 
 100;
 @@ -1783,7 +1782,7 @@ g_raid_access(struct g_provider *pp, int
   error = ENXIO;
   goto out;
   }
 - if (dcw == 0  vol-v_dirty)
 + if (dcw == 0)
   g_raid_clean(vol, dcw);
   vol-v_provider_open += acr + acw + ace;
   /* Handle delayed node destruction. */
 @@ -2379,21 +2378,25 @@ g_raid_dumpconf(struct sbuf *sb, const c
  }
  
  static void
 -g_raid_shutdown_pre_sync(void *arg, int howto)
 +g_raid_shutdown_post_sync(void *arg, int howto)
  {
   struct g_class *mp;
   struct g_geom *gp, *gp2;
   struct g_raid_softc *sc;
 + struct g_raid_volume *vol;
   int error;
  
   mp = arg;
   DROP_GIANT();
   g_topology_lock();
 + g_raid_shutdown = 1;
   LIST_FOREACH_SAFE(gp, mp-geom, geom, gp2) {
   if ((sc = gp-softc) == NULL)
   continue;
   g_topology_unlock();
   sx_xlock(sc-sc_lock);
 + TAILQ_FOREACH(vol, sc-sc_volumes, v_next)
 + g_raid_clean(vol, -1);
   g_cancel_event(sc);
   error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
   if (error != 0)
 @@ -2408,9 +2411,9 @@ static void
  g_raid_init(struct g_class *mp)
  {
  
 - g_raid_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
 - g_raid_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
 - if (g_raid_pre_sync == NULL)
 + g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
 + g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
 + if (g_raid_post_sync == NULL)
   G_RAID_DEBUG(0, Warning! Cannot register shutdown event.);
   g_raid_started = 1;
  }
 @@ -2419,8 +2422,8 @@ static void
  g_raid_fini(struct g_class *mp)
  {
  
 - if (g_raid_pre_sync != NULL)
 - EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid_pre_sync);
 + if (g_raid_post_sync != 

svn commit: r242314 - head/sys/geom/raid

2012-10-29 Thread Alexander Motin
Author: mav
Date: Mon Oct 29 14:18:54 2012
New Revision: 242314
URL: http://svn.freebsd.org/changeset/base/242314

Log:
  Make GEOM RAID more aggressive in marking volumes as clean on shutdown
  and move that action from shutdown_pre_sync to shutdown_post_sync stage
  to avoid extra flapping.
  
  ZFS tends to not close devices on shutdown, that doesn't allow GEOM RAID
  to shutdown gracefully.  To handle that, mark volume as clean just when
  shutdown time comes and there are no active writes.
  
  MFC after:2 weeks

Modified:
  head/sys/geom/raid/g_raid.c

Modified: head/sys/geom/raid/g_raid.c
==
--- head/sys/geom/raid/g_raid.c Mon Oct 29 13:58:11 2012(r242313)
+++ head/sys/geom/raid/g_raid.c Mon Oct 29 14:18:54 2012(r242314)
@@ -108,8 +108,9 @@ LIST_HEAD(, g_raid_tr_class) g_raid_tr_c
 LIST_HEAD(, g_raid_volume) g_raid_volumes =
 LIST_HEAD_INITIALIZER(g_raid_volumes);
 
-static eventhandler_tag g_raid_pre_sync = NULL;
+static eventhandler_tag g_raid_post_sync = NULL;
 static int g_raid_started = 0;
+static int g_raid_shutdown = 0;
 
 static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
 struct g_geom *gp);
@@ -881,7 +882,7 @@ g_raid_orphan(struct g_consumer *cp)
G_RAID_EVENT_DISK);
 }
 
-static int
+static void
 g_raid_clean(struct g_raid_volume *vol, int acw)
 {
struct g_raid_softc *sc;
@@ -892,22 +893,21 @@ g_raid_clean(struct g_raid_volume *vol, 
sx_assert(sc-sc_lock, SX_XLOCKED);
 
 // if ((sc-sc_flags  G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
-// return (0);
+// return;
if (!vol-v_dirty)
-   return (0);
+   return;
if (vol-v_writes  0)
-   return (0);
+   return;
if (acw  0 || (acw == -1 
vol-v_provider != NULL  vol-v_provider-acw  0)) {
timeout = g_raid_clean_time - (time_uptime - vol-v_last_write);
-   if (timeout  0)
-   return (timeout);
+   if (!g_raid_shutdown  timeout  0)
+   return;
}
vol-v_dirty = 0;
G_RAID_DEBUG1(1, sc, Volume %s marked as clean.,
vol-v_name);
g_raid_write_metadata(sc, vol, NULL, NULL);
-   return (0);
 }
 
 static void
@@ -1520,8 +1520,7 @@ process:
g_raid_disk_done_request(bp);
} else if (rv == EWOULDBLOCK) {
TAILQ_FOREACH(vol, sc-sc_volumes, v_next) {
-   if (vol-v_writes == 0  vol-v_dirty)
-   g_raid_clean(vol, -1);
+   g_raid_clean(vol, -1);
if (bioq_first(vol-v_inflight) == NULL 
vol-v_tr) {
t.tv_sec = g_raid_idle_threshold / 
100;
@@ -1783,7 +1782,7 @@ g_raid_access(struct g_provider *pp, int
error = ENXIO;
goto out;
}
-   if (dcw == 0  vol-v_dirty)
+   if (dcw == 0)
g_raid_clean(vol, dcw);
vol-v_provider_open += acr + acw + ace;
/* Handle delayed node destruction. */
@@ -2379,21 +2378,25 @@ g_raid_dumpconf(struct sbuf *sb, const c
 }
 
 static void
-g_raid_shutdown_pre_sync(void *arg, int howto)
+g_raid_shutdown_post_sync(void *arg, int howto)
 {
struct g_class *mp;
struct g_geom *gp, *gp2;
struct g_raid_softc *sc;
+   struct g_raid_volume *vol;
int error;
 
mp = arg;
DROP_GIANT();
g_topology_lock();
+   g_raid_shutdown = 1;
LIST_FOREACH_SAFE(gp, mp-geom, geom, gp2) {
if ((sc = gp-softc) == NULL)
continue;
g_topology_unlock();
sx_xlock(sc-sc_lock);
+   TAILQ_FOREACH(vol, sc-sc_volumes, v_next)
+   g_raid_clean(vol, -1);
g_cancel_event(sc);
error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
if (error != 0)
@@ -2408,9 +2411,9 @@ static void
 g_raid_init(struct g_class *mp)
 {
 
-   g_raid_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
-   g_raid_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
-   if (g_raid_pre_sync == NULL)
+   g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
+   g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
+   if (g_raid_post_sync == NULL)
G_RAID_DEBUG(0, Warning! Cannot register shutdown event.);
g_raid_started = 1;
 }
@@ -2419,8 +2422,8 @@ static void
 g_raid_fini(struct g_class *mp)
 {
 
-   if (g_raid_pre_sync != NULL)
-   EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid_pre_sync);
+   if (g_raid_post_sync != NULL)
+   EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync);
 

Re: svn commit: r242314 - head/sys/geom/raid

2012-10-29 Thread Andriy Gapon
on 29/10/2012 16:18 Alexander Motin said the following:
 Author: mav
 Date: Mon Oct 29 14:18:54 2012
 New Revision: 242314
 URL: http://svn.freebsd.org/changeset/base/242314
 
 Log:
   Make GEOM RAID more aggressive in marking volumes as clean on shutdown
   and move that action from shutdown_pre_sync to shutdown_post_sync stage
   to avoid extra flapping.
   
   ZFS tends to not close devices on shutdown, that doesn't allow GEOM RAID
   to shutdown gracefully.  To handle that, mark volume as clean just when
   shutdown time comes and there are no active writes.

Perhaps something like what zfs_modevent(MOD_UNLOAD) does should also be done in
one of the shutdown hooks?
Maybe at the start of shutdown_post_sync (so that it is run before the hooks of
lower level drivers like graid)?

-- 
Andriy Gapon
___
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to svn-src-head-unsubscr...@freebsd.org


Re: svn commit: r242314 - head/sys/geom/raid

2012-10-29 Thread Alexander Motin

On 29.10.2012 16:27, Andriy Gapon wrote:

on 29/10/2012 16:18 Alexander Motin said the following:

Author: mav
Date: Mon Oct 29 14:18:54 2012
New Revision: 242314
URL: http://svn.freebsd.org/changeset/base/242314

Log:
   Make GEOM RAID more aggressive in marking volumes as clean on shutdown
   and move that action from shutdown_pre_sync to shutdown_post_sync stage
   to avoid extra flapping.

   ZFS tends to not close devices on shutdown, that doesn't allow GEOM RAID
   to shutdown gracefully.  To handle that, mark volume as clean just when
   shutdown time comes and there are no active writes.


Perhaps something like what zfs_modevent(MOD_UNLOAD) does should also be done in
one of the shutdown hooks?


May be, but I don't know ZFS very good.


Maybe at the start of shutdown_post_sync (so that it is run before the hooks of
lower level drivers like graid)?


graid would be happy if device would be closed at any point of time, 
either before or after. But before is preferable to avoid some flapping 
between dirtying and added here cleaning.


--
Alexander Motin
___
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to svn-src-head-unsubscr...@freebsd.org