commit ae26196e477972f47a85d3b61787044d2fc3a8e0
Merge: 661515f adc523a
Author: Michael Hanselmann <[email protected]>
Date: Fri Apr 29 14:14:15 2011 +0200
Merge branch 'devel-2.4'
* devel-2.4: (24 commits)
mlock: fail gracefully if libc.so.6 cannot be loaded
Allow creating the DRBD metadev in a different VG
Make _GenerateDRBD8Branch accept different VG names
Fix WriteFile with unicode data
Replace disks: keep the meta device in the same VG
Fix for multiple VGs - PlainToDrbd and replace-disks
Fix potential data-loss in utils.WriteFile
Improve error messages in cluster verify/OS
Prevent readding of the master node
Fix punctuation in an error message
cli: Fix wrong argument kind for groups
Quote filename in gnt-instance.8
Fix typo in LUGroupAssignNodes
gnt-instance info: automatically request locking
Document the dependency on OOB for gnt-node power
Fix master IP activation in failover with no-voting
disk wiping: fix bug in chunk size computation
Fix bug in watcher
Release locks before wiping disks during instance creation
utils.WriteFile: Close file before renaming
...
Conflicts:
lib/cmdlib.py: Disk parameter constants
man/gnt-instance.rst: Trivial
man/gnt-node.rst: Trivial
diff --cc Makefile.am
index a9f3272,d50d472..20f795e
--- a/Makefile.am
+++ b/Makefile.am
@@@ -979,12 -817,12 +979,17 @@@ check-local: check-dir
$(CHECK_PYTHON_CODE) $(check_python_code)
$(CHECK_VERSION) $(VERSION) $(top_srcdir)/NEWS
$(CHECK_NEWS) < $(top_srcdir)/NEWS
+ expver=$(VERSION_MAJOR).$(VERSION_MINOR); \
+ if test "`head -n 1 $(top_srcdir)/README`" != "Ganeti $$expver"; then \
+ echo "Incorrect version in README, expected $$expver"; \
+ exit 1; \
+ fi
+.PHONY: hs-check
+hs-check: htools/test
+ @rm -f test.tix
+ ./htools/test
+
.PHONY: lint
lint: $(BUILT_SOURCES)
@test -n "$(PYLINT)" || { echo 'pylint' not found during configure;
exit 1; }
diff --cc lib/cmdlib.py
index 0ecfc2f,159ff8e..539d5df
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@@ -7046,13 -6592,14 +7054,15 @@@ def _GenerateDiskTemplate(lu, template_
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
- vg = disk.get(constants.IDISK_VG, vgname)
- data_vg = disk.get("vg", vgname)
- meta_vg = disk.get("metavg", data_vg)
++ data_vg = disk.get(constants.IDISK_VG, vgname)
++ meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
- disk[constants.IDISK_SIZE], vg,
- disk["size"], [data_vg, meta_vg],
- names[idx*2:idx*2+2],
++ disk[constants.IDISK_SIZE],
++ [data_vg, meta_vg],
+ names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
- minors[idx*2], minors[idx*2+1])
- disk_dev.mode = disk["mode"]
+ minors[idx * 2], minors[idx * 2 + 1])
+ disk_dev.mode = disk[constants.IDISK_MODE]
disks.append(disk_dev)
elif template_name == constants.DT_FILE:
if len(secondary_nodes) != 0:
@@@ -7964,13 -7478,11 +7979,16 @@@ class LUInstanceCreate(LogicalUnit)
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
- data_vg = disk.get("vg", self.cfg.GetVGName())
- meta_vg = disk.get("metavg", data_vg)
- new_disk = {"size": size, "mode": mode, "vg": data_vg, "metavg":
meta_vg}
- if "adopt" in disk:
- new_disk["adopt"] = disk["adopt"]
++
++ data_vg = disk.get(constants.IDISK_VG, default_vg)
+ new_disk = {
+ constants.IDISK_SIZE: size,
+ constants.IDISK_MODE: mode,
- constants.IDISK_VG: disk.get(constants.IDISK_VG, default_vg),
++ constants.IDISK_VG: data_vg,
++ constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
+ }
+ if constants.IDISK_ADOPT in disk:
+ new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
self.disks.append(new_disk)
if self.op.mode == constants.INSTANCE_IMPORT:
@@@ -8266,9 -7726,22 +8272,22 @@@
self.context.glm.release(locking.LEVEL_NODE)
del self.acquired_locks[locking.LEVEL_NODE]
- if self.op.wait_for_sync:
+ disk_abort = False
+ if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
+ feedback_fn("* wiping instance disks...")
+ try:
+ _WipeDisks(self, iobj)
+ except errors.OpExecError, err:
+ logging.exception("Wiping disks failed")
+ self.LogWarning("Wiping instance disks failed (%s)", err)
+ disk_abort = True
+
+ if disk_abort:
+ # Something is already wrong with the disks, don't do anything else
+ pass
+ elif self.op.wait_for_sync:
disk_abort = not _WaitForSync(self, iobj)
- elif iobj.disk_template in constants.DTS_NET_MIRROR:
+ elif iobj.disk_template in constants.DTS_INT_MIRROR:
# make sure the disks are not degraded (still sync-ing is ok)
time.sleep(15)
feedback_fn("* checking mirrors status")
@@@ -10025,8 -9448,8 +10045,9 @@@ class LUInstanceSetParams(LogicalUnit)
snode = self.op.remote_node
# create a fake disk info for _GenerateDiskTemplate
- disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode}
- disk_info = [{"size": d.size, "mode": d.mode,
- "vg": d.logical_id[0]} for d in instance.disks]
++ disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
++ constants.IDISK_VG: d.logical_id[0]}
+ for d in instance.disks]
new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
instance.name, pnode, [snode],
disk_info, None, None, 0, feedback_fn)
diff --cc man/gnt-instance.rst
index 9f78629,6e8010a..70ca9bb
--- a/man/gnt-instance.rst
+++ b/man/gnt-instance.rst
@@@ -1346,11 -1493,5 +1350,11 @@@ existing on the node, the entire operat
If the ``--from`` option is given, the list of tags to be removed will
be extended with the contents of that file (each line becomes a tag).
In this case, there is not need to pass tags on the command line (if
- you do, tags from both sources will be removed). A file name of - will
- be interpreted as stdin.
+ you do, tags from both sources will be removed). A file name of ``-``
+ will be interpreted as stdin.
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End:
diff --cc man/gnt-node.rst
index 1874cca,1e619b1..a5aeba8
--- a/man/gnt-node.rst
+++ b/man/gnt-node.rst
@@@ -494,45 -607,12 +494,49 @@@ node
POWER
~~~~~
-**power** on|off|cycle|status {*node*}
+**power** [``--force``] [``--ignore-status``] [``--all``]
+[``--power-delay``] on|off|cycle|status [*nodes*]
This commands calls out to out-of-band management to change the power
- state of given node. With ``status`` you get the power status as
- reported by the out-of-band management script.
+ state of given node. With ``status`` you get the power status as reported
+ by the out-of-band managment script.
+
+ Note that this command will only work if the out-of-band functionality
+ is configured and enabled on the cluster. If this is not the case,
+ please use the **powercycle** command above.
+
+Using ``--force`` you skip the confirmation to do the operation.
+Currently this only has effect on ``off`` and ``cycle``. On those two
+you can *not* operate on the master. However, the command will provide
+you with the command to invoke to operate on the master nerver-mind.
+This is considered harmful and Ganeti does not support the use of it.
+
+Providing ``--ignore-status`` will ignore the offline=N state of a node
+and continue with power off.
+
+``--power-delay`` specifies the time in seconds (factions allowed)
+waited between powering on the next node. This is by default 2 seconds
+but can increased if needed with this option.
+
+*nodes* are optional. If not provided it will call out for every node in
+the cluster. Except for the ``off`` and ``cycle`` command where you've
+to explicit use ``--all`` to select all.
+
+
+HEALTH
+~~~~~~
+
+**health** [*nodes*]
+
+This commands calls out to out-pf-band management to ask for the health status
+of all or given nodes. The health contains the node name and then the items
+element with their status in a ``item=status`` manner. Where ``item`` is
script
+specific and ``status`` can be one of ``OK``, ``WARNING``, ``CRITICAL`` or
+``UNKNOWN``. Items with status ``WARNING`` or ``CRITICAL`` are logged and
+annotated in the command line output.
+
+.. vim: set textwidth=72 :
+.. Local Variables:
+.. mode: rst
+.. fill-column: 72
+.. End: