Hello,

I have setup two Vmware virtual machines to replicate their storage with DRBD 
using protocol C.  So far the write performance isn't as good as I had hoped.

I have tested write performance using fio and right now the best I can get is 
write throughput and IOPS that are about 50-75% as good as with drbd 
replication disabled.

My setup is two identical Centos 5.5 VMs running drbd 8.3.8.  The link between 
them is two dedicated 10Gb adapters, no switch in between.  I feel the link is 
not the bottleneck but something in my drbd config.

I have followed the tuning recommendations on the linbit site and have seen 
some small improvements, but still don't have the performance I was hoping for. 
 I thought I would be able to get a write penalty more like 10%, not the 25-50% 
I am seeing.

I added no-disk-flushes; and no-md-flushes;.
I set max buffers and max-epoch-size to 8196, and also experimented with 
several unplug-watermark sizes.
I also set my hardware RAID controller to 100% write cache.

I attached my drbd.conf.  Any other ideas would be most appreciated.  Thank you

Sean

global {
    usage-count no;
}

common {
  syncer { rate 200M; }
}

resource disk01 {

  protocol C;

  handlers {
    pri-on-incon-degr "echo O > /proc/sysrq-trigger ; halt -f";
    pri-lost-after-sb "echo O > /proc/sysrq-trigger ; halt -f";
    local-io-error "echo O > /proc/sysrq-trigger ; halt -f";
  }

 startup {
   degr-wfc-timeout 120;  
  }

  disk {
    on-io-error   detach;

  }

  net {
    after-sb-0pri disconnect;
    after-sb-1pri disconnect;
    after-sb-2pri disconnect;
    rr-conflict disconnect;
  }

  syncer {
  al-extents 3389;
  }

  on node01 {
    device     /dev/drbd0;
    disk       /dev/sdb;
    address    192.168.69.11:7788;
    meta-disk internal;
 }

   on node02 {
    device    /dev/drbd0;
    disk      /dev/sdb;
    address   192.168.69.12:7788;
    meta-disk internal;

resource disk02 {

  protocol C;
  startup {
    wfc-timeout         0; 
    degr-wfc-timeout  120; 
  }

  disk {
    on-io-error detach;
    no-disk-barrier;
    no-disk-flushes;
    no-md-flushes;

  }

  net {
    # timeout           60;
    # connect-int       10;
    # ping-int          10;
     max-buffers    8196;
     max-epoch-size 8196;
     unplug-watermark 8196;
     sndbuf-size 0;

  }

  syncer {
  after "cluster_metadata";
  al-extents 3389;
  }

 on Node01 {
  device     /dev/drbd1;
  disk       /dev/sdc;
  address    192.168.69.11:7789;
  meta-disk internal;
  }

  on node02 {
  device    /dev/drbd1;
  disk      /dev/sdc;
  address   192.168.69.12:7789;
  meta-disk internal;
  }
}
_______________________________________________
drbd-user mailing list
[email protected]
http://lists.linbit.com/mailman/listinfo/drbd-user

Reply via email to