https://www.mediawiki.org/wiki/Special:Code/MediaWiki/113455
Revision: 113455
Author: laner
Date: 2012-03-09 06:05:35 +0000 (Fri, 09 Mar 2012)
Log Message:
-----------
Adding script for managing gluster volumes in labs
Added Paths:
-----------
trunk/tools/subversion/user-management/manage-volumes
Added: trunk/tools/subversion/user-management/manage-volumes
===================================================================
--- trunk/tools/subversion/user-management/manage-volumes
(rev 0)
+++ trunk/tools/subversion/user-management/manage-volumes 2012-03-09
06:05:35 UTC (rev 113455)
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+import sys, traceback, ldapsupportlib, datetime, paramiko, socket
+from optparse import OptionParser
+
+try:
+ import ldap
+ import ldap.modlist
+except ImportError:
+ sys.stderr.write("Unable to import LDAP library.\n")
+
+NONE = 0
+INFO = 10
+DEBUG = 20
+
+class VolumeManager:
+ def __init__(self):
+ # TODO: Pull this info from a configuration file
+ self.base_dir = '/a/'
+ self.user = 'glustermanager'
+ self.gluster_vol_dir = '/etc/glusterd/'
+ # Volumes in projects listed as global; so: { 'dumps': ['xml']
} would be
+ # an xml share in the dumps project being listed as global.
+ self.global_shares = {}
+ self.volume_quotas = {'home': '50GB','default': '300GB'}
+ self.default_options = ['nfs.disable on']
+ self.bricks = ['labstore1.pmtpa.wmnet',
'labstore2.pmtpa.wmnet', 'labstore3.pmtpa.wmnet', 'labstore4.pmtpa.wmnet']
+ self.volume_names = ['home', 'project']
+ self.loglevel = INFO
+ self.logfile = None
+
+ def run(self):
+ parser = OptionParser(conflict_handler="resolve")
+ parser.set_usage('manage-volumes [options]')
+
+ ldapSupportLib = ldapsupportlib.LDAPSupportLib()
+ ldapSupportLib.addParserOptions(parser)
+
+ parser.add_option("--logfile", dest="logfile", help="Write
output to the specified log file. (default: stdin)")
+ parser.add_option("--loglevel", dest="loglevel", help="Change
level of logging; NONE, INFO, DEBUG (default: INFO)")
+ (options, args) = parser.parse_args()
+ ldapSupportLib.setBindInfoByOptions(options, parser)
+
+ if options.logfile:
+ self.logfile = options.logfile
+ if options.loglevel:
+ self.loglevel = options.loglevel
+
+ base = ldapSupportLib.getBase()
+ ds = ldapSupportLib.connect()
+
+ projectdata = self.search_s(ds,"ou=groups," +
base,ldap.SCOPE_SUBTREE,"(&(cn=*)(owner=*))")
+ hostdata = self.search_s(ds,"ou=hosts," +
base,ldap.SCOPE_SUBTREE,"(puppetvar=instanceproject=*)",
['puppetvar','aRecord'])
+ volumedata = self.ssh_exec_command('sudo gluster volume info',
True, True)
+ project_hosts = self.get_hosts(hostdata)
+ project_volumes = self.get_volumes(volumedata)
+ for project in projectdata:
+ project_name = project[1]["cn"][0]
+ hosts = []
+ if project_name in project_hosts:
+ hosts = project_hosts[project_name]
+ hosts.sort()
+ for volume_name in self.volume_names:
+ project_volume = project_name + '-' +
volume_name
+ if project_volume not in project_volumes:
+ # First, make the volume directories.
This function runs on all
+ # bricks and returns a list of return
values. If the return value
+ # isn't 1 or 0, the command failed on a
brick, and we shouldn't
+ # create the volume.
+ ret_vals =
set(self.mkvolumedir(project_name, volume_name))
+ make_vol =
set([1,0]).difference(ret_vals)
+ if make_vol:
+ vol_ret =
self.mkvolume(project_name, volume_name)
+ if vol_ret:
+ self.log("Created
volume: " + project_name + "-" + volume_name)
+ else:
+ # No point going on if
the volume creation failed
+ continue
+ else:
+ continue
+ volume_hosts = []
+ if project_volume in project_volumes and
'auth.allow' in project_volumes[project_volume]:
+ volume_hosts =
project_volumes[project_volume]['auth.allow']
+ volume_hosts.sort()
+ if project_name in self.global_shares and
volume_name in self.global_shares[project_name]:
+ # This is a global share
+ if volume_hosts != ['*']:
+
self.setallow(project_name,volume_name,['*'])
+ elif hosts:
+ # A host has been added or deleted,
modify the auth.allow
+ if volume_hosts != hosts:
+
self.setallow(project_name,volume_name,hosts)
+ else:
+ # All hosts have been deleted, or none
have been created, ensure we
+ # aren't sharing to anything
+ if volume_hosts != []:
+
self.setallow(project_name,volume_name,[])
+ # TODO: Unshare and stop deleted projects
+ ds.unbind()
+ return 0
+
+ def mkvolumedir(self, project_name, volume_name):
+ # We ensure a volume directory is unique by setting
<base_dir>/project_name/volume_name
+ # as we know every project is unique and volumes within it also
will be unique
+ return self.ssh_exec_command('sudo mkdir -p ' + self.base_dir +
project_name + '/' + volume_name)
+
+ def mkvolume(self, project_name, volume_name):
+ # We ensure volumes are unique by setting
project_name-volume_name, as the combo is
+ # known to be unique
+ volume = project_name + '-' + volume_name
+ bricks = ''
+ for brick in self.bricks:
+ bricks = bricks + brick + ':' + self.base_dir +
project_name + '/' + volume_name + ' '
+ ret = self.ssh_exec_command('sudo gluster volume create ' +
volume + ' replica 2 transport tcp ' + bricks, True)
+ if ret == 0:
+ # We initially set the auth.allow to NONE, since the
default is *, and that's stupid
+ self.ssh_exec_command('sudo gluster volume set ' +
volume + ' auth.allow NONE', True)
+ for option in self.default_options:
+ self.ssh_exec_command('sudo gluster volume set
' + volume + ' ' + option, True)
+ self.ssh_exec_command('sudo gluster volume profile ' +
volume + ' start', True)
+ self.ssh_exec_command('sudo gluster volume quota ' +
volume + ' enable', True)
+ if volume_name in self.volume_quotas:
+ quota = self.volume_quotas[volume_name]
+ else:
+ quota = self.volume_quotas['default']
+ self.ssh_exec_command('sudo gluster volume quota ' +
volume + ' limit-usage / ' + quota, True)
+ self.ssh_exec_command('sudo gluster volume start ' +
volume, True)
+ return True
+ else:
+ return False
+
+ def setallow(self, project_name, volume_name, hosts):
+ if hosts:
+ hosts = ','.join(hosts)
+ else:
+ hosts = 'NONE'
+ volume = project_name + '-' + volume_name
+ self.ssh_exec_command('sudo gluster volume set ' + volume + '
auth.allow ' +hosts, True)
+
+ def ssh_exec_command(self, command, single=False, return_stdout=False):
+ if single:
+ # Only run this on a single brick, we arbitrarily pick
the first one
+ return
self._ssh_exec_command(command,self.bricks[0],return_stdout)
+ else:
+ # Run this on all bricks
+ returnvals = []
+ for brick in self.bricks:
+ ret =
self._ssh_exec_command(command,brick,return_stdout)
+ returnvals.append(ret)
+ return returnvals
+
+ def _ssh_exec_command(self, command, brick, return_stdout=False):
+ ssh = paramiko.SSHClient()
+ ssh.load_host_keys('/var/lib/' + self.user +
'/.ssh/known_hosts')
+ if return_stdout:
+ ret = ''
+ else:
+ ret = -1
+ try:
+ ssh.connect(brick, 22, self.user,
key_filename='/var/lib/' + self.user + '/.ssh/id_rsa')
+ chan = ssh.get_transport().open_session()
+ if self.loglevel >= DEBUG:
+ self.log(brick + ' - "' + command + '"')
+ chan.exec_command(command)
+ ret = chan.recv_exit_status()
+ if return_stdout:
+ # Since we are using a channel, we need to keep
reading until there isn't
+ # any output left
+ ret = []
+ while chan.recv_ready():
+ ret.append(chan.recv(1024))
+ ret = "".join(ret)
+ ret = ret.split('\n')
+ ssh.close()
+ except (paramiko.SSHException, socket.error):
+ sys.stderr.write("Failed to connect to %s." % brick)
+ traceback.print_exc(file=sys.stderr)
+ return None
+ return ret
+
+ def get_hosts(self,hostdata):
+ project_hosts = {}
+ if hostdata:
+ for host in hostdata:
+ host_ip = host[1]["aRecord"][0]
+ puppet_vars = host[1]["puppetvar"]
+ for puppet_var in puppet_vars:
+ var_arr = puppet_var.split('=')
+ if len(var_arr) == 2 and var_arr[0] ==
"instanceproject":
+ project = var_arr[1]
+ if project in project_hosts:
+
project_hosts[project].append(host_ip)
+ else:
+ project_hosts[project]
= [host_ip]
+ # No need to go any further, we
aren't reading other variables
+ break
+ return project_hosts
+
+ def get_volumes(self, volumedata):
+ volumes = {}
+ if volumedata:
+ current_volume = ''
+ for line in volumedata:
+ line = line.strip()
+ line_arr = line.split(': ')
+ if len(line_arr) == 2 and line_arr[0] ==
"Volume Name":
+ current_volume = line_arr[1]
+ elif len(line_arr) == 2 and line_arr[0] ==
"auth.allow":
+ if line_arr[1] == "NONE":
+ hosts = []
+ else:
+ hosts = line_arr[1].split(',')
+ volumes[current_volume] =
{'auth.allow': hosts}
+ # Let's reset the current_volume, in
case there are any
+ # weird formatting errors, we wouldn't
want to add another
+ # project's IPs to this volume.
+ current_volume = ''
+ return volumes
+
+ def search_s(self,ds,base,scope,query,attrlist=None):
+ try:
+ data = ds.search_s(base,scope,query,attrlist)
+ if not data:
+ raise ldap.NO_SUCH_OBJECT()
+ return data
+ except ldap.NO_SUCH_OBJECT:
+ sys.stderr.write("The search returned no entries.\n")
+ return None
+ except ldap.PROTOCOL_ERROR:
+ sys.stderr.write("There was an LDAP protocol error; see
traceback.\n")
+ traceback.print_exc(file=sys.stderr)
+ return None
+ except Exception:
+ try:
+ sys.stderr.write("There was a general error,
this is unexpected; see traceback.\n")
+ traceback.print_exc(file=sys.stderr)
+ return None
+ except Exception:
+ traceback.print_exc(file=sys.stderr)
+ return None
+
+ def log(self, logstring):
+ if self.loglevel >= INFO:
+ log = datetime.datetime.now().strftime("%m/%d/%Y -
%H:%M:%S - ") + logstring + "\n"
+ if self.logfile:
+ lf = open(self.logfile, 'a')
+ lf.write(log)
+ lf.close()
+ else:
+ print log
+
+def main():
+ volume_manager = VolumeManager()
+ volume_manager.run()
+
+if __name__ == "__main__":
+ main()
Property changes on: trunk/tools/subversion/user-management/manage-volumes
___________________________________________________________________
Added: svn:executable
+ *
_______________________________________________
MediaWiki-CVS mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-cvs