On 03/08/2013 03:16 PM, Don Zickus wrote:
This is the bulk of the harness file for enabling beaker.

The idea is to bootstrap it by downloading the remote xml file, parse it,
convert it into a control file and pass that to autotest.

The rest of the harness has various hacks to deal with passing info
from autotest to beaker.

The backend classes will be shown in the next patches.

Would've been better to include the backend classes first, as for a logical progression.

This patch will need to be reviewed by autotest and beaker folks.

Signed-off-by: Don Zickus <dzic...@redhat.com>
---
  client/harness_beaker.py |  399 ++++++++++++++++++++++++++++++++++++++++++++++
  1 files changed, 399 insertions(+), 0 deletions(-)
  create mode 100644 client/harness_beaker.py

diff --git a/client/harness_beaker.py b/client/harness_beaker.py
new file mode 100644
index 0000000..c973a77
--- /dev/null
+++ b/client/harness_beaker.py
@@ -0,0 +1,399 @@
+# harness_beaker.py
+#
+# Copyright (C) 2011 Jan Stancek <jstan...@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+"""
+The harness interface
+The interface between the client and beaker lab controller.
+"""
+__author__ = """Copyright Jan Stancek 2011"""
+
+import os
+import optparse
+import logging as log
+import harness
+import time
+import re
+from time import gmtime, strftime
+from autotest.client.shared import utils,error
+
+from autotest.client.bkr_xml import BeakerXMLParser
+from autotest.client.bkr_proxy import BkrProxy
+from autotest.client.at_utils import logException
+
+'''Use 5 minutes for console heartbeat'''
+BEAKER_CONSOLE_HEARTBEAT = 60 * 5
+#BEAH_CONF_PATH = '/etc/beah_beaker.conf'
+
+class harness_beaker(harness.harness):
+    def __init__(self, job, harness_args):
+        log.debug('harness_beaker __init__')
+        super(harness_beaker, self).__init__(job)
+
+        self.args = {}
+        self.at_client_home = os.path.dirname(os.path.dirname(__file__))
+        self.at_home = os.path.dirname(self.at_client_home)
+        self.at_results = os.path.join(self.at_client_home, 'results/default')
+        self.state_file = os.path.join(os.path.dirname(__file__), 
'harness_beaker.state')
+        self.recipe_xml = None
+        self.bkr_proxy = None
+        self.labc_url = os.environ['LAB_CONTROLLER']
+        self.hostname = os.environ['HOSTNAME']
+        self.common_results_uploaded_after = False
+        self.watchdog_pid = None
+        self.job = job
+        self.current_task_id = [-1]
+        self.skip_upload = False  #hack to work around reservesys
+
+
+        log.debug('harness_beaker: autotest_home: <%s>', self.at_home)
+        log.debug('harness_beaker: autotest_client_home: <%s>', 
self.at_client_home)
+        log.debug('harness_beaker: autotest_results: <%s>', self.at_results)
+        log.debug('harness_beaker: state_file: <%s>', self.state_file)
+
+        log.info('harness_beaker: hostname: <%s>', self.hostname)
+        log.info('harness_beaker: labc_url: <%s>', self.labc_url)
+
+        if not self.hostname:
+            raise error.HarnessError('Need valid hostname')
+
+        self.initBkrProxy()
+        self.initRecipeFromBeaker()
+        #self.setupInitSymlink()
+        #self.setupAutotestConfFile()
+
+    def bootstrap(self, fetchdir):
+        '''How to kickstart autotest when you have no control file?
+           You download the beaker XML, convert it to a control file
+           and pass it back to autotest. Much like bootstrapping.. :-)
+        '''
+
+        self.initRecipeFromBeaker()
+        job_id = self.recipe_xml.job_id
+        rec_id = self.recipe_xml.id
+
+        #create unique name
+        control_file_name = job_id + '_' + rec_id + '.control'
+        control_file_path = fetchdir + '/' + control_file_name
+
+        log.debug('setting up control file - %s' % control_file_path)
+        control_file = open(control_file_path, 'w')
+        try:
+            #convert recipe xml into control file
+            for task in self.recipe_xml.tasks:
+                self.convert_task_to_control(fetchdir, control_file, task)
+            control_file.close()
+        except Exception, ex:
+            os.remove(control_file_path)
+            raise error.HarnessError('beaker_harness: convert failed with -> 
%s' % ex)
+
+        #autotest should find this under FETCHDIRTEST because it is unique
+        return control_file_path
+
+    def panic(self, exc):
+        logException(exc)
+        log.critical('got exception, what now? #FIXME')
+
+    def initBkrProxy(self):
+        self.bkr_proxy = BkrProxy(self.labc_url)
+
+    def initRecipeFromBeaker(self):
+        log.debug('Contacting beaker to get task details')
+        bxp = BeakerXMLParser()
+        recipeXML = self.getRecipeFromLC()
+        recipes_dict = bxp.parseXML(recipeXML)
+
+        self.recipe_xml = self.findRecipe(recipes_dict)
+
+    def initTaskParams(self, task):
+        log.debug('PrepareTaskParams')
+        if task == None:
+            raise error.HarnessError('No valid task')
+
+        for (name, value) in task.params.items():
+            log.debug('adding to os.environ: <%s=%s>', name, value)
+            os.environ[name] = value
+
+    def getRecipeFromLC(self):
+        log.debug('trying to get recipe from LC:')
+        try:
+            recipe = self.bkr_proxy.get_recipe(self.hostname)
+        except Exception, exc:
+            raise error.HarnessError('Failed to retrieve xml: %s' % exc)
+        return recipe
+
+    def findRecipe(self, recipes_dict):
+        if self.hostname in recipes_dict:
+            return recipes_dict[self.hostname]
+        raise error.HarnessError('No valid recipe for host %s' % hostname)

^ typo, self.hostname.

+    def getTestName(self, task):
+        return re.sub('-','_', task.rpmName)
+
+    def convert_task_to_control(self, fetchdir, control, task):
+        """Tasks are really just:
+           # yum install $TEST
+           # cd /mnt/tests/$TEST
+           # make run
+
+           Convert that into a control file

Should be 'Convert that into a test module with a control file'.

Also, this is rather ingenious :)

+        """
+        timeout = ''
+        if task.timeout:
+            timeout = ", timeout=%s" % task.timeout
+
+        #python doesn't like '-' in its class names
+        rpm_name = self.getTestName(task)
+
+        #append test name to control file
+        log.debug('adding test %s to control file' % rpm_name)
+        control.write("job.run_test('%s%s')\n" % (rpm_name, timeout))
+
+        #TODO check for git commands in task.params
+
+        #create the test itself
+        log.debug('setting up test %s.py' % (fetchdir + '/' + rpm_name))
+        if not os.path.exists(fetchdir + '/' + rpm_name):
+            os.mkdir(fetchdir + '/' + rpm_name)
+        test = open(fetchdir + '/' + rpm_name + '/' + rpm_name + '.py', 'w')
+        test.write("import os\n")
+        test.write("from autotest.client import test, utils\n\n")
+        test.write("class %s(test.test):\n" % rpm_name)
+        test.write("    version=1\n\n")
+        test.write("    def setup(self):\n")
+        test.write("        utils.system('yum install %s')\n" % task.rpmName)
+        #for param in task.params:
+        #   test.write("utils.system('EXPORT %s=%s')" % 
(param[name],param[value]))

^ Calling export on a subshell doesn't work. You'd have to use os.environ.

+        test.write("    def run_once(self):\n")
+        test.write("        os.chdir('%s')\n" % task.rpmPath)
+        #test.write("        raw_output = utils.system_output('make run', 
retain_output=True)\n")

^ I wonder why make run itself is commented here.

+        test.write("        raw_output = utils.system_output('make help', 
retain_output=True)\n")
+        test.write("        self.results = raw_output\n")
+        if rpm_name == '/distribution/reservesys':
+            test.write("        self.job.harness.skip_upload=True\n")
+        test.close()
+
+    def setup(self, job):
+        super(harness_beaker, self).setup(job)
+
+    def task_id(self):
+        '''Cheap hack.  Assumes tasks list is in sync with autotest'''
+        return self.recipe_xml.tasks[0].id
+
+    def run_start(self):
+        """A run within this job is starting"""
+        log.debug('run_start')
+        try:
+            self.start_watchdog(BEAKER_CONSOLE_HEARTBEAT)
+        except Exception, exc:
+            logException(exc)
+
+    def run_pause(self):
+        """A run within this job is completing (expect continue)"""
+        log.debug('run_pause')
+
+    def run_reboot(self):
+        """A run within this job is performing a reboot
+           (expect continue following reboot)
+        """
+        log.debug('run_reboot')
+
+    def run_abort(self):
+        """A run within this job is aborting. It all went wrong"""
+        log.debug('run_abort')
+        self.tearDown()
+
+    def run_complete(self):
+        """A run within this job is completing (all done)"""
+        log.debug('run_complete')
+        self.tearDown()
+
+    def run_test_complete(self):
+        """A test run by this job is complete. Note that if multiple
+        tests are run in parallel, this will only be called when all
+        of the parallel runs complete."""
+        log.debug('run_test_complete')
+
+    def test_status(self, status, tag):
+        """A test within this job is completing"""
+        log.debug('test_status ' + status + ' / ' + tag)
+
+    def test_status_detail(self, code, subdir, operation, status, tag,
+                           optional_fields):
+        """A test within this job is completing (detail)"""
+
+        log.debug('test_status_detail %s / %s / %s / %s / %s / %s',
+                  code, subdir, operation, status, tag, str(optional_fields))
+        #log.info('harness_beaker: results directory: <%s>', 
self.job.resultdir)
+
+        if not subdir:
+            '''special start/stop of recipe, ignore for now'''
+            return
+
+        '''Because autotest does not tell you when a new test starts nor does
+           an embedded test necessarily mean a new task, create a stack to 
reflect
+           current task id (embededed tests inside task are given a 0) and 
compare
+           that with the task id beaker thinks is running (self.task_id)'''
+        if code.startswith('START'):
+            '''Nothing to report, but kick off task'''
+            task_id = 0
+            if self.current_task_id[-1] and self.current_task_id[-1] != 
self.task_id():
+                '''New task, start it'''
+                task_id = self.task_id()
+                log.debug('Starting task %s', self.task_id())
+                self.bkr_proxy.task_start(self.task_id())
+
+            self.current_task_id.append(task_id)
+            log.debug('Start: adding new task_id -> %s' % task_id)
+            return
+
+        bkr_status = getBeakerCode(code)
+        try:
+            remote_subdir = subdir + strftime("_%a_%d_%b_%Y_%H_%M_%S", 
gmtime())
+            resultid = self.bkr_proxy.task_result(self.task_id(),
+                                                  bkr_status,
+                                                  remote_subdir,
+                                                  1, '')
+            self.upload_test_results(subdir, resultid)
+        except Exception, exc:
+            log.critical('ERROR: Failed to process test results')
+            logException(exc)
+
+        if code.startswith('END '):
+            '''Clean up on real tasks not embedded ones (ie task id 0)'''
+            if self.current_task_id[-1] == self.task_id():
+                log.debug('Stopping task %s', self.task_id())
+                self.bkr_proxy.task_stop(self.task_id(), 'stop', 'task stop')
+                self.upload_task_files()
+                self.recipe_xml.tasks.pop(0)
+            task_id = self.current_task_id.pop()
+            log.debug('End: removing task_id -> %s' % task_id)
+
+    def tearDown(self):
+        '''called from complete and abort.  clean up and shutdown'''
+        self.recipe_upload_files(self.recipe_xml.id)
+        self.kill_watchdog()
+
+    def start_watchdog(self, heartbeat):
+        log.debug('harness: Starting watchdog process, heartbeat: %d' % 
heartbeat)
+        try:
+            pid = os.fork()

^ I believe it is better to register a signal handler here. Another problem

+            if pid == 0:
+                self.watchdog_loop(heartbeat)
+            else:
+                self.watchdog_pid = pid
+                log.debug('harness: Watchdog process started, pid: %d', 
self.watchdog_pid)
+        except OSError, e:
+            log.error('harness: fork in start_watchdog failed: %d (%s)\n' % 
(e.errno, e.strerror))
+
+    def kill_watchdog(self):
+        log.debug('harness: Killing watchdog, pid: %d', self.watchdog_pid)
+        print "type is %s" % type(self.watchdog_pid)
+        utils.nuke_pid(self.watchdog_pid)
+        self.watchdog_pid = None
+
+    def watchdog_loop(self, heartbeat):
+        while True:
+            time.sleep(heartbeat)
+            log.info('[-- MARK -- %s]' % time.asctime( 
time.localtime(time.time())))
+        sys.exit()

Here is that sys was not imported at te beginning of the file, so this call would fail.

+    def get_processed_tests(self):
+        subdirs = []
+        if os.path.isfile(self.state_file):
+            f = open(self.state_file, 'r')
+            lines = f.readlines()
+            f.close()
+            for line in lines:
+                subdirs.append(line.strip())
+        return subdirs
+
+    def write_processed_tests(self, subdirs):
+        f = open(self.state_file, 'w')
+        for subdir in subdirs:
+            f.write(subdir + '\n')
+        f.close()
+
+    # filter our some of the dirs, because this one is called at start/stop of 
test
+    def upload_common_results(self):
+        try:
+            log.debug('Uploading common test results')
+            filter_dirs = self.get_processed_tests()
+            filter_dirs.extend(['analysis', 'sysinfo'])
+
+            #log.info('sleeping for 3 seconds')
+            #time.sleep(3)
+
+            self.upload_task_files(filter_dirs)
+        except Exception, exc:
+            log.critical('ERROR: Failed to upload task results')
+            logException(exc)
+
+    def upload_task_files(self, except_dirs):
+        log.debug('Uploading results except dirs: ' + str(except_dirs))
+        task = self.recipe_xml.task[0]
+        path = self.job.resultdir + '/' + self.getTestName(task)
+
+        for root, dirnames, files in sorted(os.walk(path)):
+            for name in files:
+                #strip full path
+                basedir = re.sub(path + "/", "", root)
+                result_file = root + '/' + name
+                self.bkr_proxy.task_upload_file(result_file,
+                                                task.id, name, basedir)
+
+            '''do not upload previously uploaded results files'''
+            for edir in task.excluded_dir:
+                if edir in dirnames:
+                    dirnames.remove(edir)
+
+        '''add test name in future excluded directories for recipe'''
+        self.recipe_xml.excluded_dir.append(self.getTestName(task))
+
+    def upload_test_results(self, subdir, resultid):
+        path = self.job.resultdir + '/' + subdir
+        task = self.recipe_xml.task[0]
+        for root, _, files in sorted(os.walk(path)):
+            for name in files:
+                #strip full path
+                basedir = re.sub(path + "/", "", root)
+                result_file = root + '/' + name
+                self.bkr_proxy.result_upload_file(result_file,
+                                                  resultid, name, basedir)
+
+        '''add subdir in future excluded directories for task'''
+        edir = re.sub(self.getTestName(task) + '/', "", subdir)
+        self.recipe_xml.tasks[0].excluded_dir.append(edir)
+
+
+def getBeakerCode(at_code):
+    bkr_status = 'warn'
+    if at_code == 'END GOOD':
+        bkr_status = 'pass_'

Funny, I wonder why the underscore here.

+    if at_code == 'END WARN':
+        bkr_status = 'warn'
+    if at_code == 'END FAIL':
+        bkr_status = 'fail'
+    if at_code == 'END ERROR':
+        bkr_status = 'fail'
+    if at_code == 'END ABORT':
+        bkr_status = 'fail'

if at_code in ['END WARN', 'END FAIL', 'END ERROR', 'END ABORT']:
    bkr_status = 'fail'

+    return bkr_status
+
+
+if __name__ == '__main__':
+    pass


_______________________________________________
Beaker-devel mailing list
Beaker-devel@lists.fedorahosted.org
https://lists.fedorahosted.org/mailman/listinfo/beaker-devel

Reply via email to