- Add new monitor definition syntax that allows definition of multiple monitors.
  Monitors are now defined like other objects in the config file:

      monitors = MyMonitor SomeOtherMonitor YetAnotherMonitor   # defines 3 
monitors
      monitor_type = human                    # default for all monitors
      monitor_type_SomeOtherMonitor = qmp     # applies only to SomeOtherMonitor
      monitor_type_YetAnotherMonitor = qmp    # applies only to 
YetAnotherMonitor
      main_monitor = MyMonitor                # defines the main monitor to use
                                              # in the test

- Use the new syntax in tests_base.cfg.sample.

- Establish monitor connections using kvm_monitor in VM.create().
  Store all monitors in self.monitors.  Store main monitor in self.monitor.

- Replace calls to send_monitor_cmd() with appropriate calls to methods of
  self.monitor (the main monitor).

- For now, ignore the parameter screendump_verbose because currently monitor
  commands are always silent (when successful).

Signed-off-by: Michael Goldish <mgold...@redhat.com>
---
 client/tests/kvm/kvm_preprocessing.py              |   33 ++--
 client/tests/kvm/kvm_test_utils.py                 |   35 +--
 client/tests/kvm/kvm_vm.py                         |  226 +++++++++-----------
 client/tests/kvm/tests/balloon_check.py            |   12 +-
 client/tests/kvm/tests/boot_savevm.py              |   41 ++--
 client/tests/kvm/tests/ksm_overcommit.py           |    8 +-
 client/tests/kvm/tests/pci_hotplug.py              |   13 +-
 client/tests/kvm/tests/physical_resources_check.py |   40 ++--
 client/tests/kvm/tests/shutdown.py                 |    2 +-
 client/tests/kvm/tests/stepmaker.py                |   44 ++--
 client/tests/kvm/tests/steps.py                    |   12 +-
 client/tests/kvm/tests_base.cfg.sample             |    7 +-
 12 files changed, 232 insertions(+), 241 deletions(-)

diff --git a/client/tests/kvm/kvm_preprocessing.py 
b/client/tests/kvm/kvm_preprocessing.py
index 76c8268..ee3e9b2 100644
--- a/client/tests/kvm/kvm_preprocessing.py
+++ b/client/tests/kvm/kvm_preprocessing.py
@@ -1,7 +1,7 @@
 import sys, os, time, commands, re, logging, signal, glob, threading, shutil
 from autotest_lib.client.bin import test, utils
 from autotest_lib.client.common_lib import error
-import kvm_vm, kvm_utils, kvm_subprocess, ppm_utils
+import kvm_vm, kvm_utils, kvm_subprocess, kvm_monitor, ppm_utils
 try:
     import PIL.Image
 except ImportError:
@@ -83,7 +83,11 @@ def preprocess_vm(test, params, env, name):
         raise error.TestError("Could not start VM")
 
     scrdump_filename = os.path.join(test.debugdir, "pre_%s.ppm" % name)
-    vm.send_monitor_cmd("screendump %s" % scrdump_filename)
+    try:
+        if vm.monitor:
+            vm.monitor.screendump(scrdump_filename)
+    except kvm_monitor.MonitorError, e:
+        logging.warn(e)
 
 
 def postprocess_image(test, params):
@@ -117,7 +121,11 @@ def postprocess_vm(test, params, env, name):
         return
 
     scrdump_filename = os.path.join(test.debugdir, "post_%s.ppm" % name)
-    vm.send_monitor_cmd("screendump %s" % scrdump_filename)
+    try:
+        if vm.monitor:
+            vm.monitor.screendump(scrdump_filename)
+    except kvm_monitor.MonitorError, e:
+        logging.warn(e)
 
     if params.get("kill_vm") == "yes":
         kill_vm_timeout = float(params.get("kill_vm_timeout", 0))
@@ -356,8 +364,9 @@ def postprocess(test, params, env):
         for vm in kvm_utils.env_get_all_vms(env):
             if not vm.is_dead():
                 logging.info("VM '%s' is alive.", vm.name)
-                logging.info("The monitor unix socket of '%s' is: %s",
-                             vm.name, vm.monitor_file_name)
+                for m in vm.monitors:
+                    logging.info("'%s' has a %s monitor unix socket at: %s",
+                                 vm.name, m.protocol, m.filename)
                 logging.info("The command line used to start '%s' was:\n%s",
                              vm.name, vm.make_qemu_command())
         raise error.JobError("Abort requested (%s)" % exc_string)
@@ -403,10 +412,6 @@ def _take_screendumps(test, params, env):
                                  kvm_utils.generate_random_string(6))
     delay = float(params.get("screendump_delay", 5))
     quality = int(params.get("screendump_quality", 30))
-    if params.get("screendump_verbose") == 'yes':
-        screendump_verbose = True
-    else:
-        screendump_verbose = False
 
     cache = {}
 
@@ -414,11 +419,11 @@ def _take_screendumps(test, params, env):
         for vm in kvm_utils.env_get_all_vms(env):
             if vm.is_dead():
                 continue
-            if screendump_verbose:
-                vm.send_monitor_cmd("screendump %s" % temp_filename)
-            else:
-                vm.send_monitor_cmd("screendump %s" % temp_filename,
-                                    verbose=False)
+            try:
+                vm.monitor.screendump(temp_filename)
+            except kvm_monitor.MonitorError, e:
+                logging.warn(e)
+                continue
             if not os.path.exists(temp_filename):
                 logging.warn("VM '%s' failed to produce a screendump", vm.name)
                 continue
diff --git a/client/tests/kvm/kvm_test_utils.py 
b/client/tests/kvm/kvm_test_utils.py
index 24e2bf5..c3b6b8a 100644
--- a/client/tests/kvm/kvm_test_utils.py
+++ b/client/tests/kvm/kvm_test_utils.py
@@ -85,9 +85,9 @@ def reboot(vm, session, method="shell", 
sleep_before_reset=10, nic_index=0,
         # Sleep for a while before sending the command
         time.sleep(sleep_before_reset)
         # Send a system_reset monitor command
-        vm.send_monitor_cmd("system_reset")
+        vm.monitor.cmd("system_reset")
         logging.info("Monitor command system_reset sent. Waiting for guest to "
-                     "go down")
+                     "go down...")
     else:
         logging.error("Unknown reboot method: %s", method)
 
@@ -119,21 +119,16 @@ def migrate(vm, env=None):
     """
     # Helper functions
     def mig_finished():
-        s, o = vm.send_monitor_cmd("info migrate")
-        return s == 0 and not "Migration status: active" in o
+        o = vm.monitor.info("migrate")
+        return "status: active" not in o
 
     def mig_succeeded():
-        s, o = vm.send_monitor_cmd("info migrate")
-        return s == 0 and "Migration status: completed" in o
+        o = vm.monitor.info("migrate")
+        return "status: completed" in o
 
     def mig_failed():
-        s, o = vm.send_monitor_cmd("info migrate")
-        return s == 0 and "Migration status: failed" in o
-
-    # See if migration is supported
-    s, o = vm.send_monitor_cmd("help info")
-    if not "info migrate" in o:
-        raise error.TestError("Migration is not supported")
+        o = vm.monitor.info("migrate")
+        return "status: failed" in o
 
     # Clone the source VM and ask the clone to wait for incoming migration
     dest_vm = vm.clone()
@@ -141,21 +136,15 @@ def migrate(vm, env=None):
         raise error.TestError("Could not create dest VM")
 
     try:
-        # Define the migration command
-        cmd = "migrate -d tcp:localhost:%d" % dest_vm.migration_port
-        logging.debug("Migrating with command: %s" % cmd)
-
         # Migrate
-        s, o = vm.send_monitor_cmd(cmd)
-        if s:
-            logging.error("Migration command failed (command: %r, output: %r)"
-                          % (cmd, o))
-            raise error.TestFail("Migration command failed")
+        uri = "tcp:localhost:%d" % dest_vm.migration_port
+        logging.debug("Migrating to: %s" % uri)
+        o = vm.monitor.migrate(uri)
 
         # Wait for migration to finish
         if not kvm_utils.wait_for(mig_finished, 90, 2, 2,
                                   "Waiting for migration to finish..."):
-            raise error.TestFail("Timeout elapsed while waiting for migration "
+            raise error.TestFail("Timeout expired while waiting for migration "
                                  "to finish")
 
         # Report migration status
diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py
index e40abb4..6aae053 100755
--- a/client/tests/kvm/kvm_vm.py
+++ b/client/tests/kvm/kvm_vm.py
@@ -5,8 +5,8 @@ Utility classes and functions to handle Virtual Machine 
creation using qemu.
 @copyright: 2008-2009 Red Hat Inc.
 """
 
-import time, socket, os, logging, fcntl, re, commands
-import kvm_utils, kvm_subprocess
+import time, socket, os, logging, fcntl, re, commands, glob
+import kvm_utils, kvm_subprocess, kvm_monitor
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 
@@ -109,25 +109,20 @@ class VM:
         self.redirs = {}
         self.vnc_port = 5900
         self.uuid = None
+        self.monitors = []
+        self.monitor = None
+        self.pci_assignable = None
 
         self.name = name
         self.params = params
         self.root_dir = root_dir
         self.address_cache = address_cache
-        self.pci_assignable = None
 
-        # Find available monitor filename
+        # Find a unique identifier for this VM
         while True:
-            # A unique identifier for this VM
             self.instance = (time.strftime("%Y%m%d-%H%M%S-") +
                              kvm_utils.generate_random_string(4))
-            # Monitor
-            self.monitor_file_name = "/tmp/monitor-" + self.instance
-            # Test log for unit tests
-            self.testlog_file_name = "/tmp/testlog-" + self.instance
-            # Verify uniqueness
-            if True not in map(os.path.exists, [self.monitor_file_name,
-                                                self.testlog_file_name]):
+            if not glob.glob("/tmp/*%s" % self.instance):
                 break
 
 
@@ -203,9 +198,12 @@ class VM:
         def add_name(help, name):
             return " -name '%s'" % name
 
-        def add_unix_socket_monitor(help, filename):
+        def add_human_monitor(help, filename):
             return " -monitor unix:'%s',server,nowait" % filename
 
+        def add_qmp_monitor(help, filename):
+            return " -qmp unix:'%s',server,nowait" % filename
+
         def add_mem(help, mem):
             return " -m %s" % mem
 
@@ -307,8 +305,14 @@ class VM:
         qemu_cmd += qemu_binary
         # Add the VM's name
         qemu_cmd += add_name(help, name)
-        # Add the monitor socket parameter
-        qemu_cmd += add_unix_socket_monitor(help, self.monitor_file_name)
+        # Add monitors
+        for monitor_name in kvm_utils.get_sub_dict_names(params, "monitors"):
+            monitor_params = kvm_utils.get_sub_dict(params, monitor_name)
+            monitor_filename = self.get_monitor_filename(monitor_name)
+            if monitor_params.get("monitor_type") == "qmp":
+                qemu_cmd += add_qmp_monitor(help, monitor_filename)
+            else:
+                qemu_cmd += add_human_monitor(help, monitor_filename)
 
         for image_name in kvm_utils.get_sub_dict_names(params, "images"):
             image_params = kvm_utils.get_sub_dict(params, image_name)
@@ -410,7 +414,7 @@ class VM:
             qemu_cmd += add_uuid(help, params.get("uuid"))
 
         if params.get("testdev") == "yes":
-            qemu_cmd += add_testdev(help, self.testlog_file_name)
+            qemu_cmd += add_testdev(help, self.get_testlog_filename())
 
         # If the PCI assignment step went OK, add each one of the PCI assigned
         # devices to the qemu command line.
@@ -567,6 +571,7 @@ class VM:
             self.process = kvm_subprocess.run_bg(qemu_command, None,
                                                  logging.debug, "(qemu) ")
 
+            # Make sure the process was started successfully
             if not self.process.is_alive():
                 logging.error("VM could not be created; "
                               "qemu command failed:\n%s" % qemu_command)
@@ -576,11 +581,41 @@ class VM:
                 self.destroy()
                 return False
 
-            if not kvm_utils.wait_for(self.is_alive, timeout, 0, 1):
-                logging.error("VM is not alive for some reason; "
-                              "qemu command:\n%s" % qemu_command)
-                self.destroy()
-                return False
+            # Establish monitor connections
+            self.monitors = []
+            self.monitor = None
+            for monitor_name in kvm_utils.get_sub_dict_names(params, 
"monitors"):
+                monitor_params = kvm_utils.get_sub_dict(params, monitor_name)
+                # Wait for monitor connection to succeed
+                end_time = time.time() + timeout
+                while time.time() < end_time:
+                    try:
+                        if monitor_params.get("monitor_type") == "qmp":
+                            # Add a QMP monitor: not implemented yet
+                            monitor = None
+                        else:
+                            # Add a "human" monitor
+                            monitor = kvm_monitor.HumanMonitor(
+                                self.get_monitor_filename(monitor_name))
+                    except kvm_monitor.MonitorError, e:
+                        logging.warn(e)
+                    else:
+                        if monitor and monitor.is_responsive():
+                            break
+                    time.sleep(1)
+                else:
+                    logging.error("Could not connect to monitor '%s'" %
+                                  monitor_name)
+                    self.destroy()
+                    return False
+                # Add this monitor to the list
+                self.monitors += [monitor]
+                # Define the main monitor
+                if params.get("main_monitor") == monitor_name:
+                    self.monitor = monitor
+            # If the main monitor hasn't been defined yet, use the first one
+            if self.monitors and not self.monitor:
+                self.monitor = self.monitors[0]
 
             # Get the output so far, to see if we have any problems with
             # hugepage setup.
@@ -602,89 +637,6 @@ class VM:
             lockfile.close()
 
 
-    def send_monitor_cmd(self, command, block=True, timeout=20.0, 
verbose=True):
-        """
-        Send command to the QEMU monitor.
-
-        Connect to the VM's monitor socket and wait for the (qemu) prompt.
-        If block is True, read output from the socket until the (qemu) prompt
-        is found again, or until timeout expires.
-
-        Return a tuple containing an integer indicating success or failure,
-        and the data read so far. The integer is 0 on success and 1 on failure.
-        A failure is any of the following cases: connection to the socket
-        failed, or the first (qemu) prompt could not be found, or block is
-        True and the second prompt could not be found.
-
-        @param command: Command that will be sent to the monitor
-        @param block: Whether the output from the socket will be read until
-                the timeout expires
-        @param timeout: Timeout (seconds) before giving up on reading from
-                socket
-        """
-        def read_up_to_qemu_prompt(s, timeout):
-            """
-            Read data from socket s until the (qemu) prompt is found.
-
-            If the prompt is found before timeout expires, return a tuple
-            containing True and the data read. Otherwise return a tuple
-            containing False and the data read so far.
-
-            @param s: Socket object
-            @param timeout: Time (seconds) before giving up trying to get the
-                    qemu prompt.
-            """
-            o = ""
-            end_time = time.time() + timeout
-            while time.time() < end_time:
-                try:
-                    o += s.recv(1024)
-                    if o.splitlines()[-1].split()[-1] == "(qemu)":
-                        return (True, o)
-                except:
-                    time.sleep(0.01)
-            return (False, o)
-
-        # In certain conditions printing this debug output might be too much
-        # Just print it if verbose is enabled (True by default)
-        if verbose:
-            logging.debug("Sending monitor command: %s" % command)
-        # Connect to monitor
-        try:
-            s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            s.setblocking(False)
-            s.connect(self.monitor_file_name)
-        except:
-            logging.debug("Could not connect to monitor socket")
-            return (1, "")
-
-        # Send the command and get the resulting output
-        try:
-            status, data = read_up_to_qemu_prompt(s, timeout)
-            if not status:
-                logging.debug("Could not find (qemu) prompt; output so far:" +
-                              kvm_utils.format_str_for_message(data))
-                return (1, "")
-            # Send command
-            s.sendall(command + "\n")
-            # Receive command output
-            data = ""
-            if block:
-                status, data = read_up_to_qemu_prompt(s, timeout)
-                data = "\n".join(data.splitlines()[1:])
-                if not status:
-                    logging.debug("Could not find (qemu) prompt after command; 
"
-                                  "output so far:" +
-                                  kvm_utils.format_str_for_message(data))
-                    return (1, data)
-            return (0, data)
-
-        # Clean up before exiting
-        finally:
-            s.shutdown(socket.SHUT_RDWR)
-            s.close()
-
-
     def destroy(self, gracefully=True):
         """
         Destroy the VM.
@@ -721,15 +673,18 @@ class VM:
                     finally:
                         session.close()
 
-            # Try to destroy with a monitor command
-            logging.debug("Trying to kill VM with monitor command...")
-            status, output = self.send_monitor_cmd("quit", block=False)
-            # Was the command sent successfully?
-            if status == 0:
-                # Wait for the VM to be really dead
-                if kvm_utils.wait_for(self.is_dead, 5, 0.5, 0.5):
-                    logging.debug("VM is down")
-                    return
+            if self.monitor:
+                # Try to destroy with a monitor command
+                logging.debug("Trying to kill VM with monitor command...")
+                try:
+                    self.monitor.quit()
+                except kvm_monitor.MonitorError, e:
+                    logging.warn(e)
+                else:
+                    # Wait for the VM to be really dead
+                    if kvm_utils.wait_for(self.is_dead, 5, 0.5, 0.5):
+                        logging.debug("VM is down")
+                        return
 
             # If the VM isn't dead yet...
             logging.debug("Cannot quit normally; sending a kill to close the "
@@ -747,7 +702,10 @@ class VM:
                 self.pci_assignable.release_devs()
             if self.process:
                 self.process.close()
-            for f in [self.monitor_file_name, self.testlog_file_name]:
+            self.monitors = []
+            self.monitor = None
+            for f in ([self.get_testlog_filename()] +
+                      self.get_monitor_filenames()):
                 try:
                     os.unlink(f)
                 except OSError:
@@ -762,10 +720,7 @@ class VM:
         if self.is_dead():
             return False
         # Try sending a monitor command
-        (status, output) = self.send_monitor_cmd("help")
-        if status:
-            return False
-        return True
+        return bool(self.monitor) and self.monitor.is_responsive()
 
 
     def is_dead(self):
@@ -791,6 +746,29 @@ class VM:
         return self.params
 
 
+    def get_monitor_filename(self, monitor_name):
+        """
+        Return the filename corresponding to a given monitor name.
+        """
+        return "/tmp/monitor-%s-%s" % (monitor_name, self.instance)
+
+
+    def get_monitor_filenames(self):
+        """
+        Return a list of all monitor filenames (as specified in the VM's
+        params).
+        """
+        return [self.get_monitor_filename(m) for m in
+                kvm_utils.get_sub_dict_names(self.params, "monitors")]
+
+
+    def get_testlog_filename(self):
+        """
+        Return the testlog filename.
+        """
+        return "/tmp/testlog-%s" % self.instance
+
+
     def get_address(self, index=0):
         """
         Return the address of a NIC of the guest, in host space.
@@ -988,12 +966,12 @@ class VM:
         # For compatibility with versions of QEMU that do not recognize all
         # key names: replace keyname with the hex value from the dict, which
         # QEMU will definitely accept
-        dict = { "comma": "0x33",
-                 "dot": "0x34",
-                 "slash": "0x35" }
-        for key in dict.keys():
-            keystr = keystr.replace(key, dict[key])
-        self.send_monitor_cmd("sendkey %s 1" % keystr)
+        dict = {"comma": "0x33",
+                "dot":   "0x34",
+                "slash": "0x35"}
+        for key, value in dict.items():
+            keystr = keystr.replace(key, value)
+        self.monitor.sendkey(keystr)
         time.sleep(0.2)
 
 
diff --git a/client/tests/kvm/tests/balloon_check.py 
b/client/tests/kvm/tests/balloon_check.py
index 2d483c6..bbab95f 100644
--- a/client/tests/kvm/tests/balloon_check.py
+++ b/client/tests/kvm/tests/balloon_check.py
@@ -1,6 +1,6 @@
 import re, string, logging, random, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils
+import kvm_test_utils, kvm_utils, kvm_monitor
 
 def run_balloon_check(test, params, env):
     """
@@ -21,9 +21,10 @@ def run_balloon_check(test, params, env):
         @return: Number of failures occurred during operation.
         """
         fail = 0
-        status, output = vm.send_monitor_cmd("info balloon")
-        if status != 0:
-            logging.error("qemu monitor command failed: info balloon")
+        try:
+            vm.monitor.info("balloon")
+        except kvm_monitor.MonitorError, e:
+            logging.error(e)
             fail += 1
             return 0
         return int(re.findall("\d+", output)[0]), fail
@@ -39,7 +40,8 @@ def run_balloon_check(test, params, env):
         """
         fail = 0
         logging.info("Changing VM memory to %s", new_mem)
-        vm.send_monitor_cmd("balloon %s" % new_mem)
+        # This should be replaced by proper monitor method call
+        vm.monitor.cmd("balloon %s" % new_mem)
         time.sleep(20)
 
         ballooned_mem, cfail = check_ballooned_memory()
diff --git a/client/tests/kvm/tests/boot_savevm.py 
b/client/tests/kvm/tests/boot_savevm.py
index e8ea724..3305695 100644
--- a/client/tests/kvm/tests/boot_savevm.py
+++ b/client/tests/kvm/tests/boot_savevm.py
@@ -1,6 +1,6 @@
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils, kvm_utils, kvm_monitor
 
 def run_boot_savevm(test, params, env):
     """
@@ -23,23 +23,30 @@ def run_boot_savevm(test, params, env):
     while time.time() < end_time:
         time.sleep(savevm_delay)
 
-        s, o = vm.send_monitor_cmd("stop")
-        if s:
-            logging.error("stop failed: %r" % o)
-        s, o = vm.send_monitor_cmd("savevm 1")
-        if s:
-            logging.error("savevm failed: %r" % o)
-        s, o = vm.send_monitor_cmd("system_reset")
-        if s:
-            logging.error("system_reset: %r" % o)
-        s, o = vm.send_monitor_cmd("loadvm 1")
-        if s:
-            logging.error("loadvm failed: %r" % o)
-        s, o = vm.send_monitor_cmd("cont")
-        if s:
-            logging.error("cont failed: %r" % o)
+        try:
+            vm.monitor.cmd("stop")
+        except kvm_monitor.MonitorError, e:
+            logging.error(e)
+        try:
+            # This should be replaced by a proper monitor method call
+            vm.monitor.cmd("savevm 1")
+        except kvm_monitor.MonitorError, e:
+            logging.error(e)
+        try:
+            vm.monitor.cmd("system_reset")
+        except kvm_monitor.MonitorError, e:
+            logging.error(e)
+        try:
+            # This should be replaced by a proper monitor method call
+            vm.monitor.cmd("loadvm 1")
+        except kvm_monitor.MonitorError, e:
+            logging.error(e)
+        try:
+            vm.monitor.cmd("cont")
+        except kvm_monitor.MonitorError, e:
+            logging.error(e)
 
-         # Log in
+        # Log in
         if (time.time() > login_expire):
             login_expire = time.time() + savevm_login_delay
             logging.info("Logging in after loadvm...")
diff --git a/client/tests/kvm/tests/ksm_overcommit.py 
b/client/tests/kvm/tests/ksm_overcommit.py
index 2b791f1..ddf1670 100644
--- a/client/tests/kvm/tests/ksm_overcommit.py
+++ b/client/tests/kvm/tests/ksm_overcommit.py
@@ -173,11 +173,11 @@ def run_ksm_overcommit(test, params, env):
                     # We need to keep some memory for python to run.
                     if (free_mem < 64000) or (ksm_swap and
                                               free_mem < (450000 * 
perf_ratio)):
-                        vm.send_monitor_cmd('stop')
+                        vm.monitor.cmd("stop")
                         for j in range(0, i):
                             lvms[j].destroy(gracefully = False)
                         time.sleep(20)
-                        vm.send_monitor_cmd('c')
+                        vm.monitor.cmd("c")
                         logging.debug("Only %s free memory, killing %d guests" 
%
                                       (free_mem, (i-1)))
                         last_vm = i
@@ -188,12 +188,12 @@ def run_ksm_overcommit(test, params, env):
                 logging.debug("Only %s host free memory, killing %d guests" %
                               (free_mem, (i - 1)))
                 logging.debug("Stopping %s", vm.name)
-                vm.send_monitor_cmd('stop')
+                vm.monitor.cmd("stop")
                 for j in range(0, i):
                     logging.debug("Destroying %s", lvms[j].name)
                     lvms[j].destroy(gracefully = False)
                 time.sleep(20)
-                vm.send_monitor_cmd('c')
+                vm.monitor.cmd("c")
                 last_vm = i
 
             if last_vm != 0:
diff --git a/client/tests/kvm/tests/pci_hotplug.py 
b/client/tests/kvm/tests/pci_hotplug.py
index d8f34f8..f50a67e 100644
--- a/client/tests/kvm/tests/pci_hotplug.py
+++ b/client/tests/kvm/tests/pci_hotplug.py
@@ -27,7 +27,7 @@ def run_pci_hotplug(test, params, env):
             raise error.TestError("Modprobe module '%s' failed" % module)
 
     # Get output of command 'info pci' as reference
-    s, info_pci_ref = vm.send_monitor_cmd("info pci")
+    info_pci_ref = vm.monitor.info("pci")
 
     # Get output of command as reference
     reference = session.get_command_output(params.get("reference_cmd"))
@@ -43,21 +43,22 @@ def run_pci_hotplug(test, params, env):
         pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
                        (image_filename, tested_model))
 
-    # Implement pci_add
-    s, add_output = vm.send_monitor_cmd(pci_add_cmd)
+    # Execute pci_add (should be replaced by a proper monitor method call)
+    add_output = vm.monitor.cmd(pci_add_cmd)
     if not "OK domain" in add_output:
         raise error.TestFail("Add device failed. Hypervisor command is: %s. "
                              "Output: %r" % (pci_add_cmd, add_output))
-    s, after_add = vm.send_monitor_cmd("info pci")
+    after_add = vm.monitor.info("pci")
 
     # Define a helper function to delete the device
     def pci_del(ignore_failure=False):
         slot_id = "0" + add_output.split(",")[2].split()[1]
         cmd = "pci_del pci_addr=%s" % slot_id
-        vm.send_monitor_cmd(cmd)
+        # This should be replaced by a proper monitor method call
+        vm.monitor.cmd(cmd)
 
         def device_removed():
-            s, after_del = vm.send_monitor_cmd("info pci")
+            after_del = vm.monitor.info("pci")
             return after_del != after_add
 
         if (not kvm_utils.wait_for(device_removed, 10, 0, 1)
diff --git a/client/tests/kvm/tests/physical_resources_check.py 
b/client/tests/kvm/tests/physical_resources_check.py
index af9613e..946760f 100644
--- a/client/tests/kvm/tests/physical_resources_check.py
+++ b/client/tests/kvm/tests/physical_resources_check.py
@@ -1,6 +1,6 @@
 import re, string, logging
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils
+import kvm_test_utils, kvm_utils, kvm_monitor
 
 
 def run_physical_resources_check(test, params, env):
@@ -48,13 +48,15 @@ def run_physical_resources_check(test, params, env):
         logging.error("    Reported by OS: %s" % actual_mem)
 
     # Define a function for checking number of hard drivers & NICs
-    def check_num(devices, cmd, check_str):
+    def check_num(devices, info_cmd, check_str):
         f_fail = 0
         expected_num = kvm_utils.get_sub_dict_names(params, devices).__len__()
-        s, o = vm.send_monitor_cmd(cmd)
-        if s != 0:
+        try:
+            o = vm.monitor.info(info_cmd)
+        except kvm_monitor.MonitorError, e:
             f_fail += 1
-            logging.error("qemu monitor command failed: %s" % cmd)
+            logging.error(e)
+            logging.error("info/query monitor command failed (%s)", info_cmd)
 
         actual_num = string.count(o, check_str)
         if expected_num != actual_num:
@@ -65,25 +67,28 @@ def run_physical_resources_check(test, params, env):
         return expected_num, f_fail
 
     logging.info("Hard drive count check")
-    drives_num, f_fail = check_num("images", "info block", "type=hd")
+    drives_num, f_fail = check_num("images", "block", "type=hd")
     n_fail += f_fail
 
     logging.info("NIC count check")
-    nics_num, f_fail = check_num("nics", "info network", "model=")
+    nics_num, f_fail = check_num("nics", "network", "model=")
     n_fail += f_fail
 
     # Define a function for checking hard drives & NICs' model
-    def chk_fmt_model(device, fmt_model, cmd, str):
+    def chk_fmt_model(device, fmt_model, info_cmd, str):
         f_fail = 0
         devices = kvm_utils.get_sub_dict_names(params, device)
         for chk_device in devices:
             expected = kvm_utils.get_sub_dict(params, 
chk_device).get(fmt_model)
             if not expected:
                 expected = "rtl8139"
-            s, o = vm.send_monitor_cmd(cmd)
-            if s != 0:
+            try:
+                o = vm.monitor.info(info_cmd)
+            except kvm_monitor.MonitorError, e:
                 f_fail += 1
-                logging.error("qemu monitor command failed: %s" % cmd)
+                logging.error(e)
+                logging.error("info/query monitor command failed (%s)",
+                              info_cmd)
 
             device_found = re.findall(str, o)
             logging.debug("Found devices: %s" % device_found)
@@ -100,19 +105,20 @@ def run_physical_resources_check(test, params, env):
         return f_fail
 
     logging.info("NICs model check")
-    f_fail = chk_fmt_model("nics", "nic_model", "info network", "model=(.*),")
+    f_fail = chk_fmt_model("nics", "nic_model", "network", "model=(.*),")
     n_fail += f_fail
 
     logging.info("Drive format check")
-    f_fail = chk_fmt_model("images", "drive_format", "info block",
-                           "(.*)\: type=hd")
+    f_fail = chk_fmt_model("images", "drive_format", "block", "(.*)\: type=hd")
     n_fail += f_fail
 
     logging.info("Network card MAC check")
-    s, o = vm.send_monitor_cmd("info network")
-    if s != 0:
+    try:
+        o = vm.monitor.info("network")
+    except kvm_monitor.MonitorError, e:
         n_fail += 1
-        logging.error("qemu monitor command failed: info network")
+        logging.error(e)
+        logging.error("info/query monitor command failed (network)")
     found_mac_addresses = re.findall("macaddr=(.*)", o)
     logging.debug("Found MAC adresses: %s" % found_mac_addresses)
 
diff --git a/client/tests/kvm/tests/shutdown.py 
b/client/tests/kvm/tests/shutdown.py
index 8a252d9..3cbdd79 100644
--- a/client/tests/kvm/tests/shutdown.py
+++ b/client/tests/kvm/tests/shutdown.py
@@ -28,7 +28,7 @@ def run_shutdown(test, params, env):
             # Sleep for a while -- give the guest a chance to finish booting
             time.sleep(float(params.get("sleep_before_powerdown", 10)))
             # Send a system_powerdown monitor command
-            vm.send_monitor_cmd("system_powerdown")
+            vm.monitor.cmd("system_powerdown")
             logging.info("system_powerdown monitor command sent; waiting for "
                          "guest to go down...")
 
diff --git a/client/tests/kvm/tests/stepmaker.py 
b/client/tests/kvm/tests/stepmaker.py
index 24060db..ee0ed92 100755
--- a/client/tests/kvm/tests/stepmaker.py
+++ b/client/tests/kvm/tests/stepmaker.py
@@ -10,7 +10,7 @@ Step file creator/editor.
 import pygtk, gtk, gobject, time, os, commands
 import common
 from autotest_lib.client.common_lib import error
-import kvm_utils, logging, ppm_utils, stepeditor
+import kvm_utils, logging, ppm_utils, stepeditor, kvm_monitor
 pygtk.require('2.0')
 
 
@@ -84,7 +84,7 @@ class StepMaker(stepeditor.StepMakerWindow):
 
 
     def destroy(self, widget):
-        self.vm.send_monitor_cmd("cont")
+        self.vm.monitor.cmd("cont")
         self.steps_file.close()
         self.vars_file.close()
         stepeditor.StepMakerWindow.destroy(self, widget)
@@ -112,7 +112,7 @@ class StepMaker(stepeditor.StepMakerWindow):
         # Start the screendump timer
         self.redirect_timer(100, self.update)
         # Resume the VM
-        self.vm.send_monitor_cmd("cont")
+        self.vm.monitor.cmd("cont")
 
 
     def switch_to_step_mode(self):
@@ -127,7 +127,7 @@ class StepMaker(stepeditor.StepMakerWindow):
         # Start the screendump timer
         self.redirect_timer()
         # Stop the VM
-        self.vm.send_monitor_cmd("stop")
+        self.vm.monitor.cmd("stop")
 
 
     # Events in step mode
@@ -137,10 +137,10 @@ class StepMaker(stepeditor.StepMakerWindow):
         if os.path.exists(self.screendump_filename):
             os.unlink(self.screendump_filename)
 
-        (status, output) = self.vm.send_monitor_cmd("screendump " +
-                                                    self.screendump_filename)
-        if status: # Failure
-            logging.info("Could not fetch screendump")
+        try:
+            self.vm.monitor.screendump(self.screendump_filename)
+        except kvm_monitor.MonitorError, e:
+            logging.warn(e)
         else:
             self.set_image_from_file(self.screendump_filename)
 
@@ -228,15 +228,14 @@ class StepMaker(stepeditor.StepMakerWindow):
                     continue
                 self.vm.send_string(val)
             elif words[0] == "mousemove":
-                self.vm.send_monitor_cmd("mouse_move %d %d" % (-8000,-8000))
+                self.vm.monitor.mouse_move(-8000, -8000)
                 time.sleep(0.5)
-                self.vm.send_monitor_cmd("mouse_move %s %s" % (words[1],
-                                                               words[2]))
+                self.vm.monitor.mouse_move(words[1], words[2])
                 time.sleep(0.5)
             elif words[0] == "mouseclick":
-                self.vm.send_monitor_cmd("mouse_button %s" % words[1])
+                self.vm.monitor.mouse_button(words[1])
                 time.sleep(0.1)
-                self.vm.send_monitor_cmd("mouse_button 0")
+                self.vm.monitor.mouse_button(0)
 
         # Remember the current time
         self.time_when_actions_completed = time.time()
@@ -267,7 +266,7 @@ class StepMaker(stepeditor.StepMakerWindow):
                 self.event_capture_button_release,
                 self.event_capture_scroll)
         self.redirect_timer(10, self.update_capture)
-        self.vm.send_monitor_cmd("cont")
+        self.vm.monitor.cmd("cont")
 
     # Events in mouse capture mode
 
@@ -280,11 +279,10 @@ class StepMaker(stepeditor.StepMakerWindow):
 
         delay = self.spin_latency.get_value() / 1000
         if (x, y) != (self.prev_x, self.prev_y):
-            self.vm.send_monitor_cmd("mouse_move %d %d" % (-8000, -8000))
+            self.vm.monitor.mouse_move(-8000, -8000)
             time.sleep(delay)
-            self.vm.send_monitor_cmd("mouse_move %d %d" %
-                                     (self.mouse_click_coords[0],
-                                      self.mouse_click_coords[1]))
+            self.vm.monitor.mouse_move(self.mouse_click_coords[0],
+                                       self.mouse_click_coords[1])
             time.sleep(delay)
 
         self.prev_x = x
@@ -293,10 +291,10 @@ class StepMaker(stepeditor.StepMakerWindow):
         if os.path.exists(self.screendump_filename):
             os.unlink(self.screendump_filename)
 
-        (status, output) = self.vm.send_monitor_cmd("screendump " +
-                                                    self.screendump_filename)
-        if status: # Failure
-            logging.info("Could not fetch screendump")
+        try:
+            self.vm.monitor.screendump(self.screendump_filename)
+        except kvm_monitor.MonitorError, e:
+            logging.warn(e)
         else:
             self.set_image_from_file(self.screendump_filename)
 
@@ -317,7 +315,7 @@ class StepMaker(stepeditor.StepMakerWindow):
                 None,
                 self.event_expose)
         self.redirect_timer()
-        self.vm.send_monitor_cmd("stop")
+        self.vm.monitor.cmd("stop")
         self.mouse_click_captured = True
         self.mouse_click_button = event.button
         self.set_image(self.image_width_backup, self.image_height_backup,
diff --git a/client/tests/kvm/tests/steps.py b/client/tests/kvm/tests/steps.py
index 8ebe7c1..6f782f5 100644
--- a/client/tests/kvm/tests/steps.py
+++ b/client/tests/kvm/tests/steps.py
@@ -6,7 +6,7 @@ Utilities to perform automatic guest installation using step 
files.
 
 import os, time, re, shutil, logging
 from autotest_lib.client.common_lib import utils, error
-import kvm_utils, ppm_utils, kvm_subprocess
+import kvm_utils, ppm_utils, kvm_subprocess, kvm_monitor
 try:
     import PIL.Image
 except ImportError:
@@ -85,10 +85,10 @@ def barrier_2(vm, words, params, debug_dir, 
data_scrdump_filename,
             break
 
         # Request screendump
-        (status, output) = vm.send_monitor_cmd("screendump %s" %
-                                               scrdump_filename)
-        if status:
-            logging.error("Could not fetch screendump")
+        try:
+            vm.monitor.screendump(scrdump_filename)
+        except kvm_monitor.MonitorError, e:
+            logging.warn(e)
             continue
 
         # Read image file
@@ -199,7 +199,7 @@ def run_steps(test, params, env):
     lines = sf.readlines()
     sf.close()
 
-    vm.send_monitor_cmd("cont")
+    vm.monitor.cmd("cont")
 
     current_step_num = 0
     current_screendump = None
diff --git a/client/tests/kvm/tests_base.cfg.sample 
b/client/tests/kvm/tests_base.cfg.sample
index b302557..cabeb4a 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -4,9 +4,11 @@
 vms = vm1
 images = image1
 nics = nic1
+monitors = humanmonitor1
 
-# Choose the main VM
+# Choose the main VM and monitor
 main_vm = vm1
+main_monitor = humanmonitor1
 
 # Some preprocessor/postprocessor params
 start_vm = yes
@@ -34,6 +36,9 @@ image_size = 10G
 shell_port = 22
 display = vnc
 
+# Monitor params
+monitor_type = human
+
 # Default scheduler params
 used_cpus = 1
 used_mem = 512
-- 
1.5.4.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to