Hello 
    
The directory of GFS server export was on one server .
client ARF 
volume client1
  type protocol/client
  option transport-type tcp     
  option remote-host 172.20.92.249      
  option transport.socket.remote-port 6996              
  option remote-subvolume brick1       
end-volume

volume client2
  type protocol/client
  option transport-type tcp     
  option remote-host 172.20.92.249      
  option transport.socket.remote-port 6997              
  option remote-subvolume brick2        
end-volume
volume client3
  type protocol/client
  option transport-type tcp     
  option remote-host 172.20.92.249      
  option transport.socket.remote-port 6998              
  option remote-subvolume brick3        
end-volume

volume client4
  type protocol/client
  option transport-type tcp     
  option remote-host 172.20.92.249      
  option transport.socket.remote-port 6999              
  option remote-subvolume brick4        
end-volume

volume  ns1 
 type protocol/client
  option transport-type tcp     
  option remote-host 172.20.92.249      
  option transport.socket.remote-port 6996              
  option remote-subvolume name1        
end-volume

volume  ns2 
 type protocol/client
  option transport-type tcp     
  option remote-host 172.20.92.249     
  option transport.socket.remote-port 6997              
  option remote-subvolume name2        
end-volume

volume rep1
  type cluster/replicate
  option  data-self-heal on 
  option  entry-self-heal  on
  option  metadata-self-heal  on
  option  data-lock-server-count 2
  option  entry-lock-server-count 2
  subvolumes client1 client2 
end-volume

volume rep2
  type cluster/replicate
  option  data-self-heal on 
  option  entry-self-heal  on
  option  metadata-self-heal  on
  option  data-lock-server-count 2
  option  entry-lock-server-count 2
  subvolumes client3 client4 
end-volume

volume rep-ns
  type cluster/replicate
  option  data-self-heal on
  option  entry-self-heal  on 
  option  metadata-self-heal  on
  option  data-lock-server-count 2
  option  entry-lock-server-count 2
  subvolumes ns1 ns2 
end-volume
 
olume bricks
  type cluster/unify
  option namespace rep-ns # this will not be storage child of unify.
  subvolumes rep1 rep2
  option self-heal background # foreground off # default is foreground
  option scheduler rr
end-volume 

  When i test  replicate mode , i "rm " a file in GFS server ,and execute "ll 
-h " in GFS client ,the DEBUG log  is this :

2009-03-04 15:38:00 D [fuse-bridge.c:368:fuse_entry_cbk] glusterfs-fuse: 41: 
LOOKUP() / => 1 (1)
2009-03-04 15:38:00 D [fuse-bridge.c:1738:fuse_opendir] glusterfs-fuse: 42: 
OPENDIR /
2009-03-04 15:38:00 D [fuse-bridge.c:652:fuse_fd_cbk] glusterfs-fuse: 42: 
OPENDIR() / => 0x8280cc0
2009-03-04 15:38:00 D [fuse-bridge.c:368:fuse_entry_cbk] glusterfs-fuse: 43: 
LOOKUP() / => 1 (1)
2009-03-04 15:38:00 D [fuse-bridge.c:1825:fuse_readdir] glusterfs-fuse: 44: 
READDIR (0x8280cc0, size=4096, offset=0)
2009-03-04 15:38:00 D [fuse-bridge.c:1771:fuse_readdir_cbk] glusterfs-fuse: 44: 
READDIR => 6/4096,0
2009-03-04 15:38:00 D [fuse-bridge.c:1825:fuse_readdir] glusterfs-fuse: 45: 
READDIR (0x8280cc0, size=4096, offset=2147483647)
2009-03-04 15:38:00 D [fuse-bridge.c:1771:fuse_readdir_cbk] glusterfs-fuse: 45: 
READDIR => 0/4096,2147483647
2009-03-04 15:38:00 D [fuse-bridge.c:1843:fuse_releasedir] glusterfs-fuse: 46: 
RELEASEDIR 0x8280cc0
2009-03-04 15:38:00 D [inode.c:293:__inode_activate] fuse/inode: activating 
inode(3538958), lru=3/0 active=2 purge=0
2009-03-04 15:38:00 D [fuse-bridge.c:461:fuse_lookup] glusterfs-fuse: 47: 
LOOKUP /11(3538958)
2009-03-04 15:38:00 D [afr-self-heal-common.c:1041:afr_self_heal] rep1: 
performing self heal on /11 (metadata=1 data=1 entry=1)
2009-03-04 15:38:00 D 
[afr-self-heal-common.c:998:afr_self_heal_missing_entries] rep1: attempting to 
recreate missing entries for path=/11
2009-03-04 15:38:00 D [afr-self-heal-common.c:962:sh_missing_entries_lk_cbk] 
rep1: inode of /11 on child 136837152 locked
2009-03-04 15:38:00 D [afr-self-heal-common.c:962:sh_missing_entries_lk_cbk] 
rep1: inode of /11 on child 136839776 locked
2009-03-04 15:38:00 D [afr-self-heal-common.c:915:sh_missing_entries_lookup] 
rep1: looking up /11 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-common.c:915:sh_missing_entries_lookup] 
rep1: looking up /11 on subvolume client2
2009-03-04 15:38:00 W 
[afr-self-heal-common.c:871:sh_missing_entries_lookup_cbk] rep1: path /11 on 
subvolume client1 => -1 (No such file or directory)
2009-03-04 15:38:00 D 
[afr-self-heal-common.c:863:sh_missing_entries_lookup_cbk] rep1: path /11 on 
subvolume client2 is of mode 0100644
2009-03-04 15:38:00 D [afr-self-heal-common.c:608:sh_missing_entries_mknod] 
rep1: mknod /11 mode 0100644 on 1 subvolumes
2009-03-04 15:38:00 D 
[afr-self-heal-common.c:555:sh_missing_entries_newentry_cbk] rep1: chown /11 to 
0 0 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-common.c:502:sh_missing_entries_finish] 
rep1: unlocking 1/11 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-common.c:502:sh_missing_entries_finish] 
rep1: unlocking 1/11 on subvolume client2
2009-03-04 15:38:00 D [afr-self-heal-common.c:441:afr_sh_missing_entries_done] 
rep1: proceeding to metadata check on /11
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:752:afr_sh_metadata_lock] rep1: 
locking /11 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:752:afr_sh_metadata_lock] rep1: 
locking /11 on subvolume client2
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:706:afr_sh_metadata_lk_cbk] 
rep1: inode of /11 on child 0 locked
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:706:afr_sh_metadata_lk_cbk] 
rep1: inode of /11 on child 1 locked
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:658:afr_sh_metadata_lookup] 
rep1: looking up /11 on client1
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:658:afr_sh_metadata_lookup] 
rep1: looking up /11 on client2
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:604:afr_sh_metadata_lookup_cbk] 
rep1: path /11 on subvolume client1 is of mode 0100644
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:604:afr_sh_metadata_lookup_cbk] 
rep1: path /11 on subvolume client2 is of mode 0100644
2009-03-04 15:38:00 D [afr-self-heal-common.c:170:afr_sh_print_pending_matrix] 
rep1: pending_matrix: [ 0 0 ]
2009-03-04 15:38:00 D [afr-self-heal-common.c:170:afr_sh_print_pending_matrix] 
rep1: pending_matrix: [ 0 0 ]
2009-03-04 15:38:00 D 
[afr-self-heal-metadata.c:491:afr_sh_metadata_sync_prepare] rep1: syncing 
metadata of /11 from subvolume client2 to 1 active sinks
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:383:afr_sh_metadata_sync] rep1: 
syncing metadata of /11 from client2 to client1
2009-03-04 15:38:00 D 
[afr-self-heal-metadata.c:249:afr_sh_metadata_erase_pending] rep1: erasing 
pending flags from /11 on client1
2009-03-04 15:38:00 D 
[afr-self-heal-metadata.c:249:afr_sh_metadata_erase_pending] rep1: erasing 
pending flags from /11 on client2
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:156:afr_sh_metadata_finish] 
rep1: unlocking /11 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:156:afr_sh_metadata_finish] 
rep1: unlocking /11 on subvolume client2
2009-03-04 15:38:00 D [afr-self-heal-metadata.c:83:afr_sh_metadata_done] rep1: 
proceeding to data check on /11
2009-03-04 15:38:00 D [afr-self-heal-data.c:992:afr_sh_data_lock] rep1: locking 
/11 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-data.c:992:afr_sh_data_lock] rep1: locking 
/11 on subvolume client2
2009-03-04 15:38:00 D [afr-self-heal-data.c:944:afr_sh_data_lock_cbk] rep1: 
inode of /11 on child 0 locked
2009-03-04 15:38:00 D [afr-self-heal-data.c:944:afr_sh_data_lock_cbk] rep1: 
inode of /11 on child 1 locked
2009-03-04 15:38:00 D [afr-self-heal-common.c:170:afr_sh_print_pending_matrix] 
rep1: pending_matrix: [ 0 0 ]
2009-03-04 15:38:00 D [afr-self-heal-common.c:170:afr_sh_print_pending_matrix] 
rep1: pending_matrix: [ 0 0 ]
2009-03-04 15:38:00 D [afr-self-heal-data.c:752:afr_sh_data_sync_prepare] rep1: 
syncing data of /11 from subvolume client2 to 1 active sinks
2009-03-04 15:38:00 D [afr-self-heal-data.c:642:afr_sh_data_open_cbk] rep1: fd 
for /11 opened, commencing sync
2009-03-04 15:38:00 W [afr-self-heal-data.c:646:afr_sh_data_open_cbk] rep1: 
sourcing file /11 from client2 to other sinks
2009-03-04 15:38:00 D [afr-self-heal-data.c:501:afr_sh_data_read_cbk] rep1: 
read 0 bytes of data from /11 on child 1, offset 0
2009-03-04 15:38:00 D [afr-self-heal-data.c:379:afr_sh_data_trim_cbk] rep1: 
ftruncate of /11 on subvolume client1 completed
2009-03-04 15:38:00 D [afr-self-heal-data.c:328:afr_sh_data_erase_pending] 
rep1: erasing pending flags from /11 on client1
2009-03-04 15:38:00 D [afr-self-heal-data.c:328:afr_sh_data_erase_pending] 
rep1: erasing pending flags from /11 on client2
2009-03-04 15:38:00 D [afr-self-heal-data.c:252:afr_sh_data_finish] rep1: 
finishing data selfheal of /11
2009-03-04 15:38:00 D [afr-self-heal-data.c:228:afr_sh_data_unlock] rep1: 
unlocking /11 on subvolume client1
2009-03-04 15:38:00 D [afr-self-heal-data.c:228:afr_sh_data_unlock] rep1: 
unlocking /11 on subvolume client2
2009-03-04 15:38:00 D [afr-self-heal-data.c:185:afr_sh_data_unlck_cbk] rep1: 
inode of /11 on child 0 locked
2009-03-04 15:38:00 D [afr-self-heal-data.c:185:afr_sh_data_unlck_cbk] rep1: 
inode of /11 on child 1 locked
2009-03-04 15:38:00 D [afr-self-heal-data.c:134:afr_sh_data_close] rep1: 
closing fd of /11 on client2
2009-03-04 15:38:00 D [afr-self-heal-data.c:149:afr_sh_data_close] rep1: 
closing fd of /11 on client1
2009-03-04 15:38:00 D [afr-self-heal-data.c:70:afr_sh_data_done] rep1: self 
heal of /11 completed
2009-03-04 15:38:00 D [fuse-bridge.c:368:fuse_entry_cbk] glusterfs-fuse: 47: 
LOOKUP() /11 => 3538958 (3538958)
2009-03-04 15:38:00 D [inode.c:112:__dentry_unhash] fuse/inode: dentry unhashed 
11 (3538958)
2009-03-04 15:38:00 D [inode.c:94:__dentry_hash] fuse/inode: dentry hashed 11 
(3538958)
2009-03-04 15:38:00 D [inode.c:312:__inode_passivate] fuse/inode: passivating 
inode(3538958) lru=4/0 active=1 purge=0
2009-03-04 15:38:00 D [inode.c:293:__inode_activate] fuse/inode: activating 
inode(3538958), lru=3/0 active=2 purge=0
2009-03-04 15:38:00 D [fuse-bridge.c:1512:fuse_open] glusterfs-fuse: 48: OPEN 
/11
2009-03-04 15:38:00 D [fuse-bridge.c:652:fuse_fd_cbk] glusterfs-fuse: 48: 
OPEN() /11 => 0x827e918
2009-03-04 15:38:00 D [fuse-bridge.c:1573:fuse_readv] glusterfs-fuse: 49: READ 
(0x827e918, size=4096, offset=0)
2009-03-04 15:38:00 D [fuse-bridge.c:1538:fuse_readv_cbk] glusterfs-fuse: 49: 
READ => 0/4096,0/88
2009-03-04 15:38:00 D [fuse-bridge.c:1657:fuse_flush] glusterfs-fuse: 50: FLUSH 
0x827e918
2009-03-04 15:38:00 D [fuse-bridge.c:896:fuse_err_cbk] glusterfs-fuse: 50: 
FLUSH() ERR => 0
2009-03-04 15:38:00 D [fuse-bridge.c:1677:fuse_release] glusterfs-fuse: 51: 
RELEASE 0x827e918

Why  D [afr-self-heal-data.c:501:afr_sh_data_read_cbk] rep1: read 0 bytes of 
data from /11 on child 1, offset 0 ???

Wait for your return ,thanks a lot 
2009-03-04 



eagleeyes 
_______________________________________________
Gluster-users mailing list
[email protected]
http://zresearch.com/cgi-bin/mailman/listinfo/gluster-users

Reply via email to