CVSROOT: /cvs/cluster
Module name: conga
Branch: RHEL5
Changes by: [EMAIL PROTECTED] 2008-01-25 17:19:05
Modified files:
. : conga.spec.in.in
luci/cluster : busy_wait-macro form-macros
luci/homebase : validate_cluster_add.js
luci/site/luci/Extensions: LuciDB.py RicciQueries.py
cluster_adapters.py
luci/site/luci/var: Data.fs
ricci/modules/rpm: PackageHandler.cpp RpmModule.cpp
ricci/modules/service: ServiceManager.cpp
ricci/ricci : RicciWorker.cpp
Log message:
- Fix a bug that prevented the fix for bz230462 from working
- Fix a few other bugs found while testing
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/conga.spec.in.in.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.45.2.64&r2=1.45.2.65
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/busy_wait-macro.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2.2.1&r2=1.2.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.90.2.32&r2=1.90.2.33
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/homebase/validate_cluster_add.js.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.4.2.6&r2=1.4.2.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/LuciDB.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.6&r2=1.1.4.7
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/RicciQueries.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1.4.8&r2=1.1.4.9
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.120.2.41&r2=1.120.2.42
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/var/Data.fs.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.15.2.26&r2=1.15.2.27
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/rpm/PackageHandler.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.9.2.5&r2=1.9.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/rpm/RpmModule.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.3.2.1&r2=1.3.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/modules/service/ServiceManager.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.5.2.3&r2=1.5.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/ricci/ricci/RicciWorker.cpp.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.11.2.1&r2=1.11.2.2
--- conga/conga.spec.in.in 2008/01/23 05:20:44 1.45.2.64
+++ conga/conga.spec.in.in 2008/01/25 17:18:37 1.45.2.65
@@ -292,6 +292,9 @@
### changelog ###
%changelog
+* Fri Jan 25 2008 Ryan McCabe <[EMAIL PROTECTED]> 0.12.0-1
+- Fix a bug that prevented the fix for bz230462 from working
+
* Tue Jan 22 2008 Ryan McCabe <[EMAIL PROTECTED]> 0.12.0-0
- Fixed bz230462 (RFE: Only reboot installation target machines when needed)
- Fixed bz238655 (conga does not set the "nodename" attribute for manual
fencing)
--- conga/luci/cluster/busy_wait-macro 2008/01/23 04:44:30 1.2.2.1
+++ conga/luci/cluster/busy_wait-macro 2008/01/25 17:18:37 1.2.2.2
@@ -65,6 +65,6 @@
tal:attributes="onclick
python:'javascript:document.stop_waiting_form%s.submit()' %
nodereport.get('report_index')">Stop waiting for this job to complete</a>
</form>
</div>
+ <hr/>
</div>
- <hr/>
</div>
--- conga/luci/cluster/form-macros 2008/01/23 04:44:30 1.90.2.32
+++ conga/luci/cluster/form-macros 2008/01/25 17:18:37 1.90.2.33
@@ -225,7 +225,7 @@
checked
add_cluster/shared_storage |string:checked" />Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2"
class="systemsTable">
- <input type="checkbox"
name="reboot_nodes"
+ <input type="checkbox"
id="reboot_nodes" name="reboot_nodes"
tal:attributes="checked
python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''"
/>Reboot nodes before joining cluster
</td></tr>
<tr class="systemsTable"><td colspan="2"
class="systemsTable">
@@ -1135,7 +1135,7 @@
checked
add_cluster/shared_storage | string:checked" />Enable Shared Storage Support
</td></tr>
<tr class="systemsTable"><td colspan="2"
class="systemsTable">
- <input type="checkbox"
name="reboot_nodes"
+ <input type="checkbox"
id="reboot_nodes" name="reboot_nodes"
tal:attributes="checked
python:(add_cluster and add_cluster.get('reboot_nodes')) and 'checked' or ''"
/>Reboot nodes before joining cluster
</td></tr>
<tr class="systemsTable"><td colspan="2"
class="systemsTable">
--- conga/luci/homebase/validate_cluster_add.js 2008/01/23 04:44:31 1.4.2.6
+++ conga/luci/homebase/validate_cluster_add.js 2008/01/25 17:18:38 1.4.2.7
@@ -48,14 +48,21 @@
return (-1);
}
+ var reboot_nodes = document.getElementById('reboot_nodes');
var view_certs = document.getElementById('view_certs');
if (!view_certs || !view_certs.checked) {
var confirm_str = '';
if (form.addnode) {
- confirm_str = 'Add ' + (added_storage.length > 1 ?
'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?\nEach
node added will be rebooted during this process.';
+ confirm_str = 'Add ' + (added_storage.length > 1 ?
'these nodes' : 'this node') + ' to the \"' + clustername + '\" cluster?';
+ if (reboot_nodes && reboot_nodes.checked) {
+ confirm_str += '\nEach node added will be
rebooted during this process.';
+ }
} else {
if (form.cluster_create) {
- confirm_str = 'All nodes added to this cluster
will be rebooted as part of this process.\n\nCreate cluster \"' + clustername +
'\"?';
+ confirm_str = 'Create cluster \"' + clustername
+ '\"?\n\n';
+ if (reboot_nodes && reboot_nodes.checked) {
+ confirm_str += 'All nodes added to this
cluster will be rebooted as part of this process.\n\n';
+ }
} else {
confirm_str = 'Add the cluster \"' +
clustername + '\" to the Luci management interface?';
}
--- conga/luci/site/luci/Extensions/LuciDB.py 2008/01/23 04:44:32 1.1.4.6
+++ conga/luci/site/luci/Extensions/LuciDB.py 2008/01/25 17:18:38 1.1.4.7
@@ -335,14 +335,17 @@
objname = '%s____flag' % key
clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+
# now designate this new object properly
objpath = str('%s/%s' % (path, objname))
flag = self.restrictedTraverse(objpath)
flag.manage_addProperty(BATCH_ID, batch_id, 'string')
flag.manage_addProperty(TASKTYPE, CLUSTER_ADD, 'string')
- flag.manage_addProperty(FLAG_DESC, 'Creating node "%s"
for cluster "%s"' % (key, clustername), 'string')
flag.manage_addProperty(LAST_STATUS, 0, 'int')
+ flag.manage_addProperty(FLAG_DESC,
+ 'Creating node "%s" for cluster "%s"' % (key,
clustername),
+ 'string')
except Exception, e:
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose('buildCCF1: error
creating flag for %s: %r %s' % (key, e, str(e)))
--- conga/luci/site/luci/Extensions/RicciQueries.py 2008/01/23 04:44:32
1.1.4.8
+++ conga/luci/site/luci/Extensions/RicciQueries.py 2008/01/25 17:18:38
1.1.4.9
@@ -68,7 +68,7 @@
batch.append('</request>')
batch.append('</module>')
- need_reboot = reboot_nodes or install_base or install_services or
install_shared_storage or install_LVS
+ need_reboot = reboot_nodes
if need_reboot:
batch.append('<module name="reboot">')
batch.append('<request API_version="1.0">')
@@ -183,7 +183,7 @@
batch.append('</request>')
batch.append('</module>')
- need_reboot = reboot_nodes or install_base or install_services or
install_shared_storage or install_LVS
+ need_reboot = reboot_nodes
if need_reboot:
batch.append('<module name="reboot">')
batch.append('<request API_version="1.0">')
@@ -738,11 +738,12 @@
install_shared_storage,
install_LVS,
upgrade_rpms,
- gulm_lockservers):
+ gulm_lockservers,
+ reboot_nodes=False):
batch_str = createClusterBatch(os_str, cluster_name, cluster_alias,
nodeList, install_base, install_services,
install_shared_storage, install_LVS, upgrade_rpms,
- gulm_lockservers)
+ gulm_lockservers, reboot_nodes)
ricci_xml = rc.batch_run(batch_str)
return batchAttemptResult(ricci_xml)
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2008/01/23 04:44:32
1.120.2.41
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2008/01/25 17:18:38
1.120.2.42
@@ -280,7 +280,8 @@
ret = send_batch_to_hosts(node_list, 10, rq.create_cluster,
add_cluster['cluster_os'], clustername, clustername,
node_list, True, True, add_cluster['shared_storage'],
False,
- add_cluster['download_pkgs'], lockservers,
add_cluster['reboot_nodes'])
+ add_cluster['download_pkgs'], lockservers,
+ add_cluster['reboot_nodes'])
batch_id_map = {}
for i in ret.iterkeys():
@@ -291,14 +292,13 @@
if LUCI_DEBUG_MODE is True:
luci_log.debug_verbose(msg)
continue
- batch_id_map[i] = ret[i]['batch_result']
+ batch_id_map[i] = ret[i]['batch_result'][0]
if len(batch_id_map) == 0:
request.SESSION.set('create_cluster', add_cluster)
return (False, { 'errors': errors, 'messages': messages })
buildClusterCreateFlags(self, batch_id_map, clustername)
-
response = request.RESPONSE
response.redirect('%s?pagetype=%s&clustername=%s&busyfirst=true' \
% (request['URL'], CLUSTER_CONFIG, clustername))
Binary files /cvs/cluster/conga/luci/site/luci/var/Data.fs 2008/01/23
04:44:34 1.15.2.26 and /cvs/cluster/conga/luci/site/luci/var/Data.fs
2008/01/25 17:18:38 1.15.2.27 differ
rcsdiff: /cvs/cluster/conga/luci/site/luci/var/Data.fs: diff failed
--- conga/ricci/modules/rpm/PackageHandler.cpp 2008/01/17 17:38:38 1.9.2.5
+++ conga/ricci/modules/rpm/PackageHandler.cpp 2008/01/25 17:19:05 1.9.2.6
@@ -576,24 +576,24 @@
String name(iter->name);
map<String, Package>::iterator pack_iter =
h_pre.packages().find(name);
if (pack_iter == h_pre.packages().end()) {
- throw String("package ") + name
- + " is present neither locally nor in
repository";
+ throw String("Package \"") + name
+ + "\" is present neither locally nor in
any available repository";
} else {
String curr_ver(pack_iter->second.version);
String repo_ver(pack_iter->second.repo_version);
if (curr_ver.empty()) {
// not installed
if (repo_ver.empty()) {
- throw String("package ") + name
- + " is not present in
repository";
+ throw String("Package \"") + name
+ + "\" is not present in
any available repository";
} else
rpms.push_back(name);
} else {
// already installed
if (upgrade) {
if (repo_ver.empty()) {
- throw String("package ") + name
- + " is not
present in repository";
+ throw String("Package \"") +
name
+ + "\" is not
present in any available repository";
} else if (repo_ver > curr_ver)
rpms.push_back(name);
}
@@ -610,8 +610,8 @@
map<String, PackageSet>::iterator set_iter =
h_pre.sets().find(name);
if (set_iter == h_pre.sets().end()) {
- throw String("packages of set ") + name
- + " present neither locally nor in
repository";
+ throw String("Packages of set \"") + name
+ + "\" are neither present neither
locally nor in any available repository";
} else {
PackageSet& p_set = set_iter->second;
if (p_set.installed) {
@@ -627,8 +627,7 @@
rpms.push_back(*name_iter);
}
} else {
- throw String("packages
of set ") + name +
- " are not
present in any available repository";
+ /* Packages are already
up-to-date */
}
}
}
@@ -643,8 +642,8 @@
rpms.push_back(*name_iter);
}
} else {
- throw String("packages of set ") + name
+
- " are not present in any
available repository";
+ throw String("Packages of set \"") +
name +
+ "\" are not present in any
available repository";
}
}
}
@@ -679,6 +678,8 @@
FC6 = true;
else if (release.find("Moonshine") != release.npos)
FC6 = true;
+ else if (release.find("Werewolf") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else
--- conga/ricci/modules/rpm/RpmModule.cpp 2008/01/17 17:38:38 1.3.2.1
+++ conga/ricci/modules/rpm/RpmModule.cpp 2008/01/25 17:19:05 1.3.2.2
@@ -92,24 +92,24 @@
rpms.push_back(pack);
}
}
+ }
- list<PackageSet> sets;
- for (list<XMLObject>::const_iterator
- iter = sets_list.begin() ;
- iter != sets_list.end() ;
- iter++)
- {
- if (iter->tag() == "set") {
- String name(iter->get_attr("name"));
- if (name.size()) {
- PackageSet set(name);
- sets.push_back(set);
- }
+ list<PackageSet> sets;
+ for (list<XMLObject>::const_iterator
+ iter = sets_list.begin() ;
+ iter != sets_list.end() ;
+ iter++)
+ {
+ if (iter->tag() == "set") {
+ String name(iter->get_attr("name"));
+ if (name.size()) {
+ PackageSet set(name);
+ sets.push_back(set);
}
}
- PackageHandler::install(rpms, sets, upgrade);
}
+ PackageHandler::install(rpms, sets, upgrade);
return VarMap();
}
@@ -219,7 +219,7 @@
}
if (installable) {
- if (!set.installed && set.in_repo)
+ if (set.in_repo)
add = true;
}
--- conga/ricci/modules/service/ServiceManager.cpp 2008/01/17 17:38:39
1.5.2.3
+++ conga/ricci/modules/service/ServiceManager.cpp 2008/01/25 17:19:05
1.5.2.4
@@ -311,9 +311,9 @@
name();
for (list<Service>::const_iterator
- iter = servs.begin() ;
- iter != servs.end() ;
- iter++)
+ iter = servs.begin() ;
+ iter != servs.end() ;
+ iter++)
{
if (!iter->enabled())
return false;
@@ -478,13 +478,14 @@
list<String> servs;
String name = "Cluster Base";
- String descr = "Cluster infrastructure: ccs, cman, fence";
-
+ String descr;
if (RHEL4 || FC5) {
+ descr = "Cluster infrastructure (RHEL4): ccs, cman, fence";
servs.push_back("ccsd");
servs.push_back("cman");
servs.push_back("fenced");
} else if (RHEL5 || FC6) {
+ descr = "Cluster infrastructure (RHEL5): ccs, cman, fence";
servs.push_back("cman");
servs.push_back("qdiskd");
}
@@ -830,6 +831,8 @@
FC6 = true;
else if (release.find("Moonshine") != release.npos)
FC6 = true;
+ else if (release.find("Werewolf") != release.npos)
+ FC6 = true;
else if (release.find("Tikanga") != release.npos)
RHEL5 = true;
else {
--- conga/ricci/ricci/RicciWorker.cpp 2008/01/17 17:38:39 1.11.2.1
+++ conga/ricci/ricci/RicciWorker.cpp 2008/01/25 17:19:05 1.11.2.2
@@ -258,6 +258,7 @@
_path(path)
{
QueueLocker lock;
+ struct stat st;
_fd = open(_path.c_str(), O_RDONLY);
if (_fd == -1)
@@ -276,18 +277,24 @@
}
}
+ if (fstat(_fd, &st) != 0)
+ throw String("Unable to stat file: ") +
String(strerror(errno));
+
// read file
String xml_str;
- char buff[4096];
- ssize_t res;
- res = read_restart(_fd, buff, sizeof(buff));
- if (res <= 0) {
- throw String("error reading batch file: ")
+ while ((off_t) xml_str.size() < st.st_size) {
+ char buff[4096];
+ ssize_t res;
+
+ res = read_restart(_fd, buff, sizeof(buff));
+ if (res <= 0) {
+ throw String("error reading batch file: ")
+ String(strerror(-res));
+ }
+ xml_str.append(buff, res);
+ memset(buff, 0, sizeof(buff));
}
- xml_str.append(buff, res);
- memset(buff, 0, sizeof(buff));
// _xml
_xml = parseXML(xml_str);