CVSROOT:        /cvs/cluster
Module name:    conga
Branch:         RHEL5
Changes by:     [EMAIL PROTECTED]       2007-08-08 21:14:38

Modified files:
        luci/site/luci/Extensions: HelperFunctions.py LuciClusterInfo.py 
                                   LuciZope.py LuciZopeExternal.py 
                                   RicciQueries.py cluster_adapters.py 
                                   conga_constants.py 
                                   homebase_adapters.py 
Added files:
        luci/site/luci/Extensions: LuciZopeAsync.py 

Log message:
        Fix 230451, pass 2
        - luci backend support for managing fence_xvm keys, pass 1

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeAsync.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/HelperFunctions.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.4&r2=1.4.2.5
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciClusterInfo.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.6&r2=1.1.4.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZope.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.2&r2=1.1.4.3
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciZopeExternal.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.1&r2=1.1.4.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.5&r2=1.1.4.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.35&r2=1.120.2.36
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/conga_constants.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.19.2.13&r2=1.19.2.14
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/homebase_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.34.2.13&r2=1.34.2.14

/cvs/cluster/conga/luci/site/luci/Extensions/LuciZopeAsync.py,v  -->  standard 
output
revision 1.1.2.1
--- conga/luci/site/luci/Extensions/HelperFunctions.py  2007/06/27 06:19:22     
1.4.2.4
+++ conga/luci/site/luci/Extensions/HelperFunctions.py  2007/08/08 21:14:38     
1.4.2.5
@@ -40,6 +40,10 @@
                        try:
                                rc = RicciCommunicator(host)
                                r['ricci'] = rc
+                               try:
+                                       r['cluster_name'] = rc.cluster_info()[0]
+                               except:
+                                       pass
 
                                if self.query_func is not None:
                                        if self.query_args:
--- conga/luci/site/luci/Extensions/LuciClusterInfo.py  2007/07/27 22:07:54     
1.1.4.6
+++ conga/luci/site/luci/Extensions/LuciClusterInfo.py  2007/08/08 21:14:38     
1.1.4.7
@@ -1609,3 +1609,12 @@
                clu_map['os'] = 'rhel5'
                clu_map['isVirtualized'] = False
        return clu_map
+
+def getClusterConfNodes(conf_dom):
+       try:
+               cluster_nodes = conf_dom.getElementsByTagName('clusternode')
+               return map(lambda x: str(x.getAttribute('name')), cluster_nodes)
+       except Exception, e:
+               if LUCI_DEBUG_MODE is True:
+                       luci_log.debug_verbose('GCCN0: %r %s' % (e, str(e)))
+       return None
--- conga/luci/site/luci/Extensions/LuciZope.py 2007/07/26 05:32:38     1.1.4.2
+++ conga/luci/site/luci/Extensions/LuciZope.py 2007/08/08 21:14:38     1.1.4.3
@@ -126,15 +126,21 @@
 
 def GetReqVars(req, varlist):
        ret = {}
+       from types import ListType;
+
        for i in varlist:
                pval = None
                if req and req.has_key(i):
-                       pval = req[i].strip()
+                       pval = req[i]
+                       if type(req[i]) is not ListType:
+                               pval = req[i].strip()
                        if not pval:
                                pval = None
                if req and pval is None:
                        if req.form and req.form.has_key(i):
-                               pval = req.form[i].strip()
+                               pval = req.form[i]
+                               if type(req.form[i]) is not ListType:
+                                       pval.strip()
                                if not pval:
                                        pval = None
                ret[i] = pval
--- conga/luci/site/luci/Extensions/LuciZopeExternal.py 2007/06/18 18:39:32     
1.1.4.1
+++ conga/luci/site/luci/Extensions/LuciZopeExternal.py 2007/08/08 21:14:38     
1.1.4.2
@@ -49,3 +49,5 @@
        get_mappings_info, get_storage_batch_result, get_storage_report, \
        group_systems_by_cluster, is_storage_report_cached, validate, \
        get_content_data
+
+from LuciZopeAsync import get_cluster_nodes_async, get_sysinfo_async
--- conga/luci/site/luci/Extensions/RicciQueries.py     2007/07/27 21:17:41     
1.1.4.5
+++ conga/luci/site/luci/Extensions/RicciQueries.py     2007/08/08 21:14:38     
1.1.4.6
@@ -721,3 +721,8 @@
        if LUCI_DEBUG_MODE is True:
                luci_log.debug_verbose('GCC2: no conf node found')
        return None
+
+def set_xvm_key(rc, key_base64):
+       batch_str = '<module name="cluster"><request 
API_version="1.0"><function_call name="set_xvm_key"><var mutable="false" 
name="key_base64" type="string" 
value="%s"/></function_call></request></module>' % key_base64
+       ricci_xml = rc.batch_run(batch_str)
+       return batchAttemptResult(ricci_xml)
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2007/07/27 19:11:32     
1.120.2.35
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2007/08/08 21:14:38     
1.120.2.36
@@ -21,12 +21,14 @@
 from ClusterModel.Method import Method
 
 import RicciQueries as rq
-from HelperFunctions import resolveOSType
+from HelperFunctions import resolveOSType, send_batch_to_hosts
 from LuciSyslog import get_logger
 from ResourceHandler import create_resource
 from homebase_adapters import parseHostForm
 from LuciClusterActions import propagateClusterConfAsync
 
+from LuciZopeAsync import get_cluster_nodes_async
+
 from LuciClusterInfo import getClusterInfo, \
        getModelBuilder, LuciExtractCluModel
 
@@ -41,7 +43,7 @@
        PRE_JOIN, REBOOT_TASK, REDIRECT_MSG, RESOURCES, RICCI_CONNECT_FAILURE, \
        RICCI_CONNECT_FAILURE_MSG, SEND_CONF, SERVICE_ADD, SERVICE_CONFIG, \
        SERVICE_LIST, SERVICES, START_NODE, TASKTYPE, VM_ADD, VM_CONFIG, \
-       REDIRECT_SEC, LUCI_CLUSTER_BASE_URL
+       REDIRECT_SEC, LUCI_CLUSTER_BASE_URL, FENCE_XVM_KEY_CREATE
 
 from FenceHandler import validateNewFenceDevice, \
        validateFenceDevice, validate_fenceinstance, FD_VAL_SUCCESS
@@ -2125,6 +2127,56 @@
        
request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
                % (baseurl, SERVICES, clustername))
 
+def validate_xvm_key_dist(self, request):
+       fvars = GetReqVars(request, [ '__NODE_HOSTNAME__', 'URL', 'clustername' 
])
+
+       clustername = fvars['clustername']
+       if clustername is None:
+               return (False, { 'errors': [ 'No cluster name was given' ]})
+
+       host_list = fvars['__NODE_HOSTNAME__']
+       if not host_list:
+               return (False, { 'errors': [ 'No cluster node addresses were 
given' ]})
+
+       baseurl = fvars['URL'] or LUCI_CLUSTER_BASE_URL
+
+       try:
+               import base64
+               f = open('/dev/urandom', 'r')
+               new_key = f.read(4096)
+               f.close()
+               new_key = base64.encodestring(new_key)
+               if not new_key:
+                       raise Exception, 'base64 encode failed'
+               new_key = new_key.replace('\n', '')
+       except Exception, e:
+               if LUCI_DEBUG_MODE is True:
+                       luci_log.debug_verbose('VXKD0: /dev/urandom: %r %s' % 
(e, str(e)))
+               return (False, { 'errors': [ 'Unable to create a new fence_xvm 
key' ]})
+
+       errors = list()
+       ret = send_batch_to_hosts(host_list, 10, rq.set_xvm_key, new_key)
+       del new_key
+
+       for i in ret.iterkeys():
+               batch_num = None
+               batch_res = None
+
+               if ret[i].has_key('batch_result'):
+                       batch_num, batch_res = ret[i]['batch_result']
+
+               if batch_num is None or batch_res is None:
+                       errors.append('fence_xvm key creation failed for node 
"%s"' % i)
+                       if LUCI_DEBUG_MODE is True:
+                               luci_log.info('Setting fence_xvm key for node 
"%s" failed' % i)
+                       continue
+
+               set_node_flag(self, clustername, i, batch_num,
+                       FENCE_XVM_KEY_CREATE, 'Creating a fence_xvm key file')
+
+       
request.RESPONSE.redirect('%s?pagetype=%s&clustername=%s&tab=2&busyfirst=true' \
+               % (baseurl, CLUSTER_CONFIG, clustername))
+
 def process_cluster_conf_editor(self, req):
        if req.has_key('clustername'):
                clustername = req['clustername'].strip() or None
@@ -2203,6 +2255,7 @@
        55: validateDaemonProperties,
        57: deleteFenceDevice,
        58: validateNodeFenceConfig,
+       60: validate_xvm_key_dist,
        80: process_cluster_conf_editor
 }
 
--- conga/luci/site/luci/Extensions/conga_constants.py  2007/07/26 04:20:59     
1.19.2.13
+++ conga/luci/site/luci/Extensions/conga_constants.py  2007/08/08 21:14:38     
1.19.2.14
@@ -54,6 +54,7 @@
 FENCEDEV_DELETE                        = '57'
 FENCEDEV_NODE_CONFIG   = '58'
 SERVICE_MIGRATE                        = '59'
+FENCE_XVM_KEY_CREATE   = '60'
 CONF_EDITOR                            = '80'
 SYS_SERVICE_MANAGE             = '90'
 SYS_SERVICE_UPDATE             = '91'
--- conga/luci/site/luci/Extensions/homebase_adapters.py        2007/06/18 
18:39:33     1.34.2.13
+++ conga/luci/site/luci/Extensions/homebase_adapters.py        2007/08/08 
21:14:38     1.34.2.14
@@ -17,6 +17,8 @@
        manageCluster, \
        CLUSTER_NODE_NEED_AUTH
 
+from LuciClusterInfo import getClusterConfNodes
+
 from LuciZopePerm import havePermAddCluster, havePermRemCluster, \
        havePermAddUser, havePermDelUser, havePermEditPerms, \
        havePermRemStorage, havePermAddStorage
@@ -39,7 +41,7 @@
        errors = list()
        messages = list()
 
-       if '__SYSTEM' in request.form:
+       if request.form.has_key('__SYSTEM'):
                system_names = request.form['__SYSTEM']
                for i in system_names:
                        if not i:
@@ -50,7 +52,7 @@
                        else:
                                messages.append('Removed storage system "%s" 
successfully' % i)
 
-       if '__CLUSTER' in request.form:
+       if request.form.has_key('__CLUSTER'):
                cluster_names = request.form['__CLUSTER']
                for i in cluster_names:
                        if not i:
@@ -70,9 +72,8 @@
        if not request.form.has_key('deluserId'):
                return (False, { 'errors': [ 'No User ID given' ] })
 
-       userId = request.form['deluserId']
-
        try:
+               userId = request.form['deluserId'].strip()
                user = self.portal_membership.getMemberById(userId)
                if not user:
                        raise Exception, 'user %s does not exist' % userId
@@ -117,8 +118,10 @@
 
        if not request.form.has_key('newUserName'):
                return (False, { 'errors': [ 'No user name given' ] })
+
        if not request.form.has_key('newPassword'):
                return (False, { 'errors': [ 'No password given' ] })
+
        if not request.form.has_key('newPasswordConfirm'):
                return (False, { 'errors': [ 'You didn\'t confirm the password' 
] })
 
@@ -133,12 +136,21 @@
        if passwd != pwconfirm:
                return (False, { 'errors': [ 'The passwords given do not match' 
]})
 
+       user_props = {
+               'username': user,
+               'password': passwd,
+               'confirm': passwd,
+               'roles': [ 'Member' ],
+               'domains': [],
+               'email': '[EMAIL PROTECTED]' % user
+       }
+
        try:
-               self.portal_registration.addMember(user, passwd, properties = { 
'username': user, 'password': passwd, 'confirm': passwd, 'roles': [ 'Member' ], 
'domains': [], 'email': '[EMAIL PROTECTED]' % user })
+               self.portal_registration.addMember(user, passwd, 
properties=user_props)
        except Exception, e:
                if LUCI_DEBUG_MODE is True:
                        luci_log.debug_verbose('VAU0: %s: %r %s' % (user, e, 
str(e)))
-               return (False, { 'errors': [ 'Unable to add new user "%s"' % 
user ] })
+               return (False, { 'errors': [ 'Unable to add new user "%s": %s ' 
% (user, str(e)) ] })
 
        if not self.portal_membership.getMemberById(user):
                return (False, { 'errors': [ 'Unable to add new user "%s"' % 
user ] })
@@ -542,11 +554,13 @@
        except:
                pass_num = 1
 
-       add_cluster = { 'name': cluster_name,
-                                       'pass': pass_num,
-                                       'cluster_os': cluster_os,
-                                       'identical_passwds': same_node_passwds,
-                                       'check_certs': check_certs }
+       add_cluster = {
+               'name': cluster_name,
+               'pass': pass_num,
+               'cluster_os': cluster_os,
+               'identical_passwds': same_node_passwds,
+               'check_certs': check_certs
+       }
 
        system_list, incomplete, new_errors, messages = parseHostForm(request, 
check_certs)
        errors.extend(new_errors)
@@ -1285,7 +1299,3 @@
                                if LUCI_DEBUG_MODE is True:
                                        luci_log.debug_verbose('getUserPerms2: 
user %s, obj %s: %r %s' % (userName, s[0], e, str(e)))
        return perms
-
-def getClusterConfNodes(conf_dom):
-       cluster_nodes = conf_dom.getElementsByTagName('clusternode')
-       return map(lambda x: str(x.getAttribute('name')), cluster_nodes)

Reply via email to