[Gluster-users] GlusterFS + ZFS

Regan_James jamreg at emeritus-consultants.com
Thu Jul 8 20:06:53 UTC 2010


We have two servers each with ZFS running in raidz on three hard drives of
the same size and glusterFS between the two servers.  The ZFS has two
filesystems tank and tank/home each created using zfs create and GlusterFS
is mounted based on the tank/home filesystem to /mnt.  At this point
everything works great but when I then run zfs create tank/home/virtual and
then cd to /mnt/virtual and run ls I get the errors i reported to printed to
the command line.

Volume files:
glusterfsd.vol
## file auto generated by /bin/glusterfs-volgen
# Cmd line:
# $ /bin/glusterfs-volgen --name home --raid 1 mx.server.edu:/tank/home
mx2.server.edu:/tank/home

volume posix1
    type storage/posix
#   option o-direct enable # (default: disable) boolean type only
#   option export-statfs-size no # (default: yes) boolean type only
#   option mandate-attribute off # (default: on) boolean type only
#   option span-devices 8 # (default: 0) integer value
#   option background-unlink yes # (default: no) boolean type
    option directory /tank/home
end-volume

volume locks1
    type features/locks
#   option mandatory on # Default off, used in specific applications
    subvolumes posix1
end-volume

volume brick1
    type performance/io-threads
    option thread-count 8
#   option autoscaling yes # Heuristic for autoscaling threads on demand
#   option min-threads 2 # min count for thread pool
#   option max-threads 64 # max count for thread pool
    subvolumes locks1
end-volume

volume server-tcp
    type protocol/server
    option transport-type tcp
    option auth.addr.brick1.allow *
    option transport.socket.listen-port 6996
    option transport.socket.nodelay on
    subvolumes brick1
end-volume

glusterfs.vol
## file auto generated by /bin/glusterfs-volgen
# Cmd line:
# $ /bin/glusterfs-volgen --name home --raid 1 mx.server.edu:/tank/home
mx2.server.edu:/tank/home

# RAID 1
# TRANSPORT-TYPE tcp
volume mx.server.edu-1
    type protocol/client
    option transport-type tcp
    option remote-host mx.server.edu
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume mx2.server.edu-1
    type protocol/client
    option transport-type tcp
    option remote-host mx2.server.edu
    option transport.socket.nodelay on
    option transport.remote-port 6996
    option remote-subvolume brick1
end-volume

volume mirror-0
    type cluster/replicate
    subvolumes mx.server.edu-1 mx2.server.edu-1
end-volume

volume writebehind
    type performance/write-behind
    option cache-size 4MB
#   option enable-trickling-writes yes # Flush final write calls when
network is free
#   option enable-O_SYNC yes # Enable O_SYNC for write-behind
#   option disable-for-first-nbytes 1 # Disable first nbytes with very small
initial writes
    subvolumes mirror-0
end-volume

volume readahead
    type performance/read-ahead
    option page-count 4
#   option force-atime-update yes # force updating atimes, default off
    subvolumes writebehind
end-volume

volume iocache
    type performance/io-cache
    option cache-size `echo $(( $(grep 'MemTotal' /proc/meminfo | sed
's/[^0-9]//g') / 5120 ))`MB
    option cache-timeout 1
#   option priority *.html:1,abc*:2 # Priority list for iocaching files
    subvolumes readahead
end-volume

volume quickread
    type performance/quick-read
    option cache-timeout 1
    option max-file-size 64kB
    subvolumes iocache
end-volume

volume statprefetch
    type performance/stat-prefetch
    subvolumes quickread
end-volume

James J Regan IV
COO Emeritus Consulting
B.S. Computer Science
B.A. Linguistics


More information about the Gluster-users mailing list