Send Linux-ha-cvs mailing list submissions to
[email protected]
To subscribe or unsubscribe via the World Wide Web, visit
http://lists.community.tummy.com/mailman/listinfo/linux-ha-cvs
or, via email, send a message with subject or body 'help' to
[EMAIL PROTECTED]
You can reach the person managing the list at
[EMAIL PROTECTED]
When replying, please edit your Subject line so it is more specific
than "Re: Contents of Linux-ha-cvs digest..."
Today's Topics:
1. Linux-HA CVS: cts by andrew from
([email protected])
2. Linux-HA CVS: cts by andrew from
([email protected])
----------------------------------------------------------------------
Message: 1
Date: Wed, 19 Apr 2006 04:28:23 -0600 (MDT)
From: [email protected]
Subject: [Linux-ha-cvs] Linux-HA CVS: cts by andrew from
To: [EMAIL PROTECTED]
Message-ID: <[EMAIL PROTECTED]>
linux-ha CVS committal
Author : andrew
Host :
Project : linux-ha
Module : cts
Dir : linux-ha/cts
Modified Files:
CIB.py.in
Log Message:
if self.CM.Env["CIBResource"] is false, strangely we shouldn't create any
resources. grumble.
set a cluster property to identify CIBs created for BSC
===================================================================
RCS file: /home/cvs/linux-ha/linux-ha/cts/CIB.py.in,v
retrieving revision 1.12
retrieving revision 1.13
diff -u -3 -r1.12 -r1.13
--- CIB.py.in 9 Apr 2006 14:39:54 -0000 1.12
+++ CIB.py.in 19 Apr 2006 10:28:22 -0000 1.13
@@ -212,43 +212,55 @@
resources = ""
constraints = ""
- if self.CM.Env["CIBResource"] == 1:
- self.CM.log("Enabling DC resource")
- resources += self.dc_ipaddr_resource_template %
self.CM.Env["IPBase"]
- constraints += self.dc_ipaddr_location_constraint
- if self.CM.cluster_monitor == 1:
- resources += self.clustermon_resource_template
- constraints += self.clustermon_location_constraint
-
+ if self.CM.Env["DoBSC"] == 1:
+ cib_options = cib_options + '''
+ <cluster_property_set id="bsc-options">
+ <attributes>
+ <nvpair id="ident-string" name="ident-string" value="Linux-HA TEST
configuration file - REMOVEME!!"/>
+ </attributes>
+ </cluster_property_set>'''
+
+ if self.CM.Env["CIBResource"] != 1:
+ # generate cib
+ self.cts_cib = self.cib_template % (cib_options, resources,
constraints)
+ return
+
+ self.CM.log("Enabling DC resource")
+ resources += self.dc_ipaddr_resource_template % self.CM.Env["IPBase"]
+ constraints += self.dc_ipaddr_location_constraint
+ if self.CM.cluster_monitor == 1:
+ resources += self.clustermon_resource_template
+ constraints += self.clustermon_location_constraint
+
ip1=self.NextIP()
ip2=self.NextIP()
ip3=self.NextIP()
- resources += self.resource_group_template % (ip1, ip1, ip1, ip1, ip2,
ip2, ip2, ip2, ip3, ip3, ip3, ip3)
+ resources += self.resource_group_template % (ip1, ip1, ip1, ip1, ip2,
ip2, ip2, ip2, ip3, ip3, ip3, ip3)
# lsb resource
resources += self.lsb_resource
- # per node resource
- fields = string.split(self.CM.Env["IPBase"], '.')
- for node in self.CM.Env["nodes"]:
- ip = self.NextIP()
- per_node_resources = self.per_node_resource_template % \
- ("rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node,
"rsc_"+node, ip)
-
- per_node_constraint = self.per_node_constraint_template % \
- ("rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node, node)
-
- resources += per_node_resources
- constraints += per_node_constraint
+ # per node resource
+ fields = string.split(self.CM.Env["IPBase"], '.')
+ for node in self.CM.Env["nodes"]:
+ ip = self.NextIP()
+ per_node_resources = self.per_node_resource_template % \
+ ("rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node,
"rsc_"+node, ip)
+
+ per_node_constraint = self.per_node_constraint_template % \
+ ("rsc_"+node, "rsc_"+node, "rsc_"+node, "rsc_"+node, node)
+
+ resources += per_node_resources
+ constraints += per_node_constraint
- # fencing resource
- nodelist = ""
- len = 0
- for node in self.CM.Env["nodes"]:
- nodelist += node + " "
- len = len + 1
- stonith_resource = self.stonith_resource_template % (len, nodelist)
- resources += stonith_resource
+ # fencing resource
+ nodelist = ""
+ len = 0
+ for node in self.CM.Env["nodes"]:
+ nodelist += node + " "
+ len = len + 1
+ stonith_resource = self.stonith_resource_template % (len, nodelist)
+ resources += stonith_resource
#master slave resource
resources += self.master_slave_resource % (2*len, 2, len, 1)
------------------------------
Message: 2
Date: Wed, 19 Apr 2006 05:57:03 -0600 (MDT)
From: [email protected]
Subject: [Linux-ha-cvs] Linux-HA CVS: cts by andrew from
To: [EMAIL PROTECTED]
Message-ID: <[EMAIL PROTECTED]>
linux-ha CVS committal
Author : andrew
Host :
Project : linux-ha
Module : cts
Dir : linux-ha/cts
Modified Files:
CTS.py.in
Log Message:
echo_cp() - copy files from one host to another by cat'ing their contents
useful because it can use remote_py() and therefor work locally without
using ssh
Fail startall() and stopall() if one of sub-actions fails
Use the more portable -k instead of -B in calls to df
Fix up the BSC scenario
===================================================================
RCS file: /home/cvs/linux-ha/linux-ha/cts/CTS.py.in,v
retrieving revision 1.55
retrieving revision 1.56
diff -u -3 -r1.55 -r1.56
--- CTS.py.in 18 Apr 2006 16:09:53 -0000 1.55
+++ CTS.py.in 19 Apr 2006 11:57:02 -0000 1.56
@@ -73,11 +73,29 @@
sysname = args[0]
command = args[1]
- if sysname == self.OurNode or sysname == "localhost":
- ret = self._fixcmd(command)
+ #print "sysname: %s, us: %s" % (sysname, self.OurNode)
+ if sysname == None or string.lower(sysname) == self.OurNode or sysname
== "localhost":
+ ret = command
else:
ret = self.Command + " " + sysname + " '" + self._fixcmd(command) +
"'"
- #print ("About to run %s\n" % ret)
+ #print ("About to run %s\n" % ret)
+ return ret
+
+ def _cmd_noblock(self, *args):
+
+ '''Compute the string that will run the given command on the
+ given remote system'''
+
+ args= args[0]
+ sysname = args[0]
+ command = args[1]
+
+ #print "sysname: %s, us: %s" % (sysname, self.OurNode)
+ if sysname == None or string.lower(sysname) == self.OurNode or sysname
== "localhost":
+ ret = command + " &"
+ else:
+ ret = self.CommandnoBlock + " " + sysname + " '" +
self._fixcmd(command) + "'"
+ #print ("About to run %s\n" % ret)
return ret
def __call__(self, *args):
@@ -139,11 +157,28 @@
print "Retrying command %s" % cpstring
return rc
+ def echo_cp(self, src_host, src_file, dest_host, dest_file):
+ '''Perform a remote copy via echo'''
+ (rc, lines) = self.remote_py(src_host, "os", "system", "cat %s" %
src_file)
+ if rc != 0:
+ print "Copy of %s:%s failed" % (src_host, src_file)
+
+ elif dest_host == None:
+ fd = open(dest_file, "w")
+ fd.writelines(lines)
+ fd.close()
+
+ else:
+ big_line=""
+ for line in lines:
+ big_line = big_line + line
+ (rc, lines) = self.remote_py(dest_host, "os", "system", "echo '%s'
> %s" % (big_line, dest_file))
+
+ return rc
+
def noBlock(self, *args):
'''Perform a remote execution without waiting for it to finish'''
- sshnoBlock = self.CommandnoBlock
- for arg in args:
- sshnoBlock = sshnoBlock + " \'" + arg + "\'"
+ sshnoBlock = self._cmd_noblock(args)
count=0;
rc = 0;
@@ -160,8 +195,10 @@
If the call fail, lastrc == 1 and return the reason (string)
'''
encode_args = binascii.b2a_base64(pickle.dumps(args))
- result = self.readlines(node, \
-
string.join(["@libdir@/heartbeat/cts/CTSproxy.py",module,func,encode_args]))
+ encode_cmd =
string.join(["@libdir@/heartbeat/cts/CTSproxy.py",module,func,encode_args])
+
+ #print "%s: %s.%s %s" % (node, module, func, repr(args))
+ result = self.readlines(node, encode_cmd)
if result != None:
result.pop()
@@ -624,13 +661,15 @@
'''Start the cluster manager on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
-
+ ret = 1
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in nodelist:
if self.ShouldBeStatus[node] == self["down"]:
- self.StartaCM(node)
+ if not self.StartaCM(node):
+ ret = 0
+ return ret
def stopall(self, nodelist=None):
@@ -638,13 +677,15 @@
We can do it on a subset of the cluster if nodelist is not None.
'''
+ ret = 1
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == self["up"]:
- self.StopaCM(node)
-
+ if not self.StopaCM(node):
+ ret = 0
+ return ret
def rereadall(self, nodelist=None):
@@ -814,8 +855,8 @@
return 1
def CheckDf(self):
- dfcmd="df -B1000000 /var/log | tail -1 | tr -s ' ' | cut -d' ' -f2"
- dfmin=500
+ dfcmd="df -k /var/log | tail -1 | tr -s ' ' | cut -d' ' -f2"
+ dfmin=500000
result=1
for node in self.Env["nodes"]:
dfout=self.rsh.readaline(node, dfcmd)
@@ -1025,11 +1066,7 @@
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all nodes.")
- CM.startall()
-
-
- return 1
-
+ return CM.startall()
def TearDown(self, CM):
'''Set up the given ScenarioComponent'''
@@ -1037,7 +1074,7 @@
# Stop the cluster manager everywhere
CM.log("Stopping Cluster Manager on all nodes")
- CM.stopall()
+ return CM.stopall()
class PingFest(ScenarioComponent):
(
@@ -1159,15 +1196,16 @@
return self.Env["DoBSC"]
def SetUp(self, CM):
- for node in CM.Env["nodes"]:
- CM.log("Starting BSC node")
- if not CM.StartaCM(node):
- return 0
- return 1
+
+ CM.prepare()
+
+ # Clear out the cobwebs
+ self.TearDown(CM)
+
+ # Now start the Cluster Manager on all the nodes.
+ CM.log("Starting Cluster Manager on BSC node(s).")
+ return CM.startall()
def TearDown(self, CM):
- for node in CM.Env["nodes"]:
- CM.log("Shutting down BSC node")
- if not CM.StopaCM(node):
- return 0
- return 1
+ CM.log("Stopping Cluster Manager on BSC node(s).")
+ return CM.stopall()
------------------------------
_______________________________________________
Linux-ha-cvs mailing list
[email protected]
http://lists.community.tummy.com/mailman/listinfo/linux-ha-cvs
End of Linux-ha-cvs Digest, Vol 29, Issue 96
********************************************