During my performance testing of various combinations of glfs and nfs using the kernel build process, I came across something in some configurations.

Specifically, during make clean after building:

find: WARNING: Hard link count is wrong for ./arch/x86_64/kernel: this may be a bug in your filesystem driver. Automatically turning on find's -noleaf option. Earlier results may have failed to include directories that should have been searched.

In the logs:
[2010-01-08 11:08:08] E [posix.c:3156:do_xattrop] src-store: getxattr failed on /redhat/BUILD/kernel-2.6.18/linux-2.6.18.x86_64/.tmp_versions while doing xattrop: No such file or directory

Is there some kind of write re-ordering optimization going on that breaks things?

The volume spec file I'm using is attached. The server 1 in AFR is powered down, and server 3 uses a similar spec file but without the caching applied since it is a slave-only backup server that no applications connect to.

The client machine is connecting via unfsd.

Gordan
volume src1
        type protocol/client
        option transport-type socket
        option transport.address-family inet
        option remote-host 10.2.0.11
        option remote-port 7001
        option remote-subvolume src1
end-volume

volume src1-writebehind
        type performance/write-behind
        option cache-size 2MB           # default is equal to aggregate-size
        option flush-behind on          # default is 'off'
        option enable-O_SYNC on
        subvolumes src1
end-volume

volume src1-iocache
        type performance/io-cache
        option cache-size 64MB
        option cache-timeout 2          # default is 1 second
        subvolumes src1-writebehind
end-volume

##############################################################################

volume src3
        type protocol/client
        option transport-type socket
        option transport.address-family inet
        option remote-host 10.2.0.13
        option remote-port 7001
        option remote-subvolume src3
end-volume

volume src3-writebehind
        type performance/write-behind
        option cache-size 2MB           # default is equal to aggregate-size
        option flush-behind on          # default is 'off'
        option enable-O_SYNC on
        subvolumes src3
end-volume

volume src3-iocache
        type performance/io-cache
        option cache-size 64MB
        option cache-timeout 2          # default is 1 second
        subvolumes src3-writebehind
end-volume

##############################################################################

volume src-store
        type storage/posix
        option directory /gluster/src
end-volume

volume src2
        type features/posix-locks
        subvolumes src-store
end-volume

volume src2-writebehind
        type performance/write-behind
        option cache-size 2MB           # default is equal to aggregate-size
        option flush-behind on          # default is 'off'
        option enable-O_SYNC on
        subvolumes src2
end-volume

volume src2-iocache
        type performance/io-cache
        option cache-size 64MB
        option cache-timeout 2          # default is 1 second
        subvolumes src2-writebehind
end-volume

##############################################################################

volume server
        type protocol/server
        option transport-type socket
        option transport.address-family inet
        option transport.socket.listen-port 7001
        subvolumes src2
        option auth.addr.src2.allow 127.0.0.1,10.*
end-volume

volume src
        type cluster/replicate
        subvolumes src2-iocache src1-iocache src3-iocache
        option read-subvolume src2-iocache
        option favorite-child src2-iocache
end-volume

volume server-src
        type protocol/server
        option transport-type socket
        option transport.address-family inet
        option transport.socket.listen-port 7002
        subvolumes src
        option auth.addr.src.allow 127.0.0.1,10.*
end-volume
_______________________________________________
Gluster-devel mailing list
[email protected]
http://lists.nongnu.org/mailman/listinfo/gluster-devel

Reply via email to