Linus,
 The current raid1/raid5 resync code requests resync in units of 1k
 (though the raid personality can round up requests if it likes).
 This interacts badly with filesystems that do IO in 512 byte blocks,
 such as XFS (because raid5 need to use the same blocksize for IO and
 resync).

 The attached patch changes the resync code to work in units of
 sectors which makes more sense and plays nicely with XFS.

NeilBrown



--- ./drivers/md/md.c   2001/05/17 05:50:51     1.1
+++ ./drivers/md/md.c   2001/05/17 06:11:50     1.2
@@ -2997,7 +2997,7 @@
        int sz = 0;
        unsigned long max_blocks, resync, res, dt, db, rt;
 
-       resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
+       resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
        max_blocks = mddev->sb->size;
 
        /*
@@ -3042,7 +3042,7 @@
         */
        dt = ((jiffies - mddev->resync_mark) / HZ);
        if (!dt) dt++;
-       db = resync - mddev->resync_mark_cnt;
+       db = resync - (mddev->resync_mark_cnt/2);
        rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
        
        sz += sprintf(page + sz, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
@@ -3217,7 +3217,7 @@
 
 void md_done_sync(mddev_t *mddev, int blocks, int ok)
 {
-       /* another "blocks" (1K) blocks have been synced */
+       /* another "blocks" (512byte) blocks have been synced */
        atomic_sub(blocks, &mddev->recovery_active);
        wake_up(&mddev->recovery_wait);
        if (!ok) {
@@ -3230,7 +3230,7 @@
 int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
 {
        mddev_t *mddev2;
-       unsigned int max_blocks, currspeed,
+       unsigned int max_sectors, currspeed,
                j, window, err, serialize;
        kdev_t read_disk = mddev_to_kdev(mddev);
        unsigned long mark[SYNC_MARKS];
@@ -3267,7 +3267,7 @@
 
        mddev->curr_resync = 1;
 
-       max_blocks = mddev->sb->size;
+       max_sectors = mddev->sb->size<<1;
 
        printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
        printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d 
KB/sec/disc.\n",
@@ -3291,23 +3291,23 @@
        /*
         * Tune reconstruction:
         */
-       window = MAX_READAHEAD*(PAGE_SIZE/1024);
-       printk(KERN_INFO "md: using %dk window, over a total of %d 
blocks.\n",window,max_blocks);
+       window = MAX_READAHEAD*(PAGE_SIZE/512);
+       printk(KERN_INFO "md: using %dk window, over a total of %d 
+blocks.\n",window/2,max_sectors/2);
 
        atomic_set(&mddev->recovery_active, 0);
        init_waitqueue_head(&mddev->recovery_wait);
        last_check = 0;
-       for (j = 0; j < max_blocks;) {
-               int blocks;
+       for (j = 0; j < max_sectors;) {
+               int sectors;
 
-               blocks = mddev->pers->sync_request(mddev, j);
+               sectors = mddev->pers->sync_request(mddev, j);
 
-               if (blocks < 0) {
-                       err = blocks;
+               if (sectors < 0) {
+                       err = sectors;
                        goto out;
                }
-               atomic_add(blocks, &mddev->recovery_active);
-               j += blocks;
+               atomic_add(sectors, &mddev->recovery_active);
+               j += sectors;
                mddev->curr_resync = j;
 
                if (last_check + window > j)
@@ -3325,7 +3325,7 @@
                        mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
                        last_mark = next;
                }
-                       
+
 
                if (md_signal_pending(current)) {
                        /*
@@ -3350,7 +3350,7 @@
                if (md_need_resched(current))
                        schedule();
 
-               currspeed = 
(j-mddev->resync_mark_cnt)/((jiffies-mddev->resync_mark)/HZ +1) +1;
+               currspeed = 
+(j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
 
                if (currspeed > sysctl_speed_limit_min) {
                        current->nice = 19;
--- ./drivers/md/raid5.c        2001/05/17 05:50:51     1.1
+++ ./drivers/md/raid5.c        2001/05/17 06:11:51     1.2
@@ -886,7 +886,7 @@
                        }
                }
                if (syncing) {
-                       md_done_sync(conf->mddev, (sh->size>>10) - sh->sync_redone,0);
+                       md_done_sync(conf->mddev, (sh->size>>9) - sh->sync_redone,0);
                        clear_bit(STRIPE_SYNCING, &sh->state);
                        syncing = 0;
                }                       
@@ -1059,7 +1059,7 @@
                }
        }
        if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
-               md_done_sync(conf->mddev, (sh->size>>10) - sh->sync_redone,1);
+               md_done_sync(conf->mddev, (sh->size>>9) - sh->sync_redone,1);
                clear_bit(STRIPE_SYNCING, &sh->state);
        }
        
@@ -1153,13 +1153,13 @@
        return correct_size;
 }
 
-static int raid5_sync_request (mddev_t *mddev, unsigned long block_nr)
+static int raid5_sync_request (mddev_t *mddev, unsigned long sector_nr)
 {
        raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
        struct stripe_head *sh;
        int sectors_per_chunk = conf->chunk_size >> 9;
-       unsigned long stripe = (block_nr<<1)/sectors_per_chunk;
-       int chunk_offset = (block_nr<<1) % sectors_per_chunk;
+       unsigned long stripe = sector_nr/sectors_per_chunk;
+       int chunk_offset = sector_nr % sectors_per_chunk;
        int dd_idx, pd_idx;
        unsigned long first_sector;
        int raid_disks = conf->raid_disks;
@@ -1167,9 +1167,9 @@
        int redone = 0;
        int bufsize;
 
-       sh = get_active_stripe(conf, block_nr<<1, 0, 0);
+       sh = get_active_stripe(conf, sector_nr, 0, 0);
        bufsize = sh->size;
-       redone = block_nr-(sh->sector>>1);
+       redone = sector_nr - sh->sector;
        first_sector = raid5_compute_sector(stripe*data_disks*sectors_per_chunk
                + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
        sh->pd_idx = pd_idx;
@@ -1182,7 +1182,7 @@
        handle_stripe(sh);
        release_stripe(sh);
 
-       return (bufsize>>10)-redone;
+       return (bufsize>>9)-redone;
 }
 
 /*
--- ./drivers/md/raid1.c        2001/05/17 05:50:51     1.1
+++ ./drivers/md/raid1.c        2001/05/17 06:11:51     1.2
@@ -1198,7 +1198,7 @@
                                raid1_map (mddev, &bh->b_dev, bh->b_size >> 9);
                                if (bh->b_dev == dev) {
                                        printk (IO_ERROR, partition_name(bh->b_dev), 
bh->b_blocknr);
-                                       md_done_sync(mddev, bh->b_size>>10, 0);
+                                       md_done_sync(mddev, bh->b_size>>9, 0);
                                } else {
                                        printk (REDIRECT_SECTOR,
                                                partition_name(bh->b_dev), 
bh->b_blocknr);
@@ -1305,7 +1305,7 @@
  * issue suitable write requests
  */
 
-static int raid1_sync_request (mddev_t *mddev, unsigned long block_nr)
+static int raid1_sync_request (mddev_t *mddev, unsigned long sector_nr)
 {
        raid1_conf_t *conf = mddev_to_conf(mddev);
        struct mirror_info *mirror;
@@ -1315,7 +1315,7 @@
        int disk;
 
        spin_lock_irq(&conf->segment_lock);
-       if (!block_nr) {
+       if (!sector_nr) {
                /* initialize ...*/
                int buffs;
                conf->start_active = 0;
@@ -1335,9 +1335,9 @@
                if (conf->cnt_ready || conf->cnt_active)
                        MD_BUG();
        }
-       while ((block_nr<<1) >= conf->start_pending) {
+       while (sector_nr >= conf->start_pending) {
                PRINTK("wait .. sect=%lu start_active=%d ready=%d pending=%d 
future=%d, cnt_done=%d active=%d ready=%d pending=%d future=%d\n",
-                       block_nr<<1, conf->start_active, conf->start_ready, 
conf->start_pending, conf->start_future,
+                       sector_nr, conf->start_active, conf->start_ready, 
+conf->start_pending, conf->start_future,
                        conf->cnt_done, conf->cnt_active, conf->cnt_ready, 
conf->cnt_pending, conf->cnt_future);
                wait_event_lock_irq(conf->wait_done,
                                        !conf->cnt_active,
@@ -1383,10 +1383,10 @@
        r1_bh->cmd = SPECIAL;
        bh = &r1_bh->bh_req;
 
-       bh->b_blocknr = block_nr;
-       bsize = 1024;
+       bh->b_blocknr = sector_nr;
+       bsize = 512;
        while (!(bh->b_blocknr & 1) && bsize < PAGE_SIZE
-                       && (bh->b_blocknr+2)*(bsize>>10) < mddev->sb->size) {
+                       && (bh->b_blocknr+2)*(bsize>>9) < (mddev->sb->size *2)) {
                bh->b_blocknr >>= 1;
                bsize <<= 1;
        }
@@ -1403,13 +1403,13 @@
                BUG();
        bh->b_end_io = end_sync_read;
        bh->b_private = r1_bh;
-       bh->b_rsector = block_nr<<1;
+       bh->b_rsector = sector_nr;
        init_waitqueue_head(&bh->b_wait);
 
        generic_make_request(READ, bh);
        md_sync_acct(bh->b_dev, bh->b_size/512);
 
-       return (bsize >> 10);
+       return (bsize >> 9);
 
 nomem:
        raid1_shrink_buffers(conf);
@@ -1444,7 +1444,7 @@
                int size = bh->b_size;
                raid1_free_buf(r1_bh);
                sync_request_done(sect, mddev_to_conf(mddev));
-               md_done_sync(mddev,size>>10, uptodate);
+               md_done_sync(mddev,size>>9, uptodate);
        }
 }
 
-
To unsubscribe from this list: send the line "unsubscribe linux-raid" in
the body of a message to [EMAIL PROTECTED]

Reply via email to