Muehlenhoff has uploaded a new change for review. ( 
https://gerrit.wikimedia.org/r/377239 )

Change subject: Remove salt-based debdeploy code
......................................................................

Remove salt-based debdeploy code

Change-Id: Ibe8d94b4ed14e8498d43125dd7fe7e7a8bce4180
---
M debian/control
D debian/debdeploy-master.dirs
D debian/debdeploy-master.examples
D debian/debdeploy-master.install
D debian/debdeploy-minion.dirs
D debian/debdeploy-minion.install
D examples/debdeploy-minion.conf
D examples/elinks.yaml
D examples/openssl.yaml
D examples/python.yaml
D master/debdeploy
D master/debdeploy_conf.py
D master/debdeploy_joblog.py
D master/debdeploy_updatespec.py
D master/generate-debdeploy-spec
D minion/debdeploy-log.py
D minion/debdeploy-minion.py
D minion/debdeploy_restart.py
D tests/test-jobdb
D tests/test-pkgdb
20 files changed, 0 insertions(+), 1,915 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/operations/debs/debdeploy 
refs/changes/39/377239/1

diff --git a/debian/control b/debian/control
index 650abf5..1d731a5 100644
--- a/debian/control
+++ b/debian/control
@@ -5,24 +5,6 @@
 Build-Depends: debhelper
 Standards-Version: 3.9.6
 
-Package: debdeploy-master
-Architecture: all
-Depends: salt-master, sqlite3 (>= 3.6.19), python-yaml, debdeploy-common, 
${shlibs:Depends}, ${misc:Depends}
-Description: central package management (master)
- central package management (package master)
-
-Package: debdeploy-minion
-Architecture: all
-Depends: salt-minion, lsof, python-debian, debdeploy-common, ${misc:Depends}, 
${misc:Depends}
-Description: central package management (minion)
- central package management (minions)
-
-Package: debdeploy-common
-Architecture: all
-Depends: ${shlibs:Depends}, ${misc:Depends}
-Description: central package management (master)
- central package management (common files)
-
 Package: debdeploy-client
 Architecture: all
 Depends: lsof, python-debian, ${misc:Depends}
diff --git a/debian/debdeploy-master.dirs b/debian/debdeploy-master.dirs
deleted file mode 100644
index cae4528..0000000
--- a/debian/debdeploy-master.dirs
+++ /dev/null
@@ -1,7 +0,0 @@
-/var/lib/debdeploy/
-/var/log/debdeploy/
-/usr/lib/debdeploy/
-/usr/lib/python2.7/dist-packages/
-/usr/sbin/
-/usr/bin/
-
diff --git a/debian/debdeploy-master.examples b/debian/debdeploy-master.examples
deleted file mode 100644
index d60465f..0000000
--- a/debian/debdeploy-master.examples
+++ /dev/null
@@ -1,4 +0,0 @@
-examples/debdeploy.conf
-examples/elinks.yaml
-examples/python.yaml
-examples/openssl.yaml
diff --git a/debian/debdeploy-master.install b/debian/debdeploy-master.install
deleted file mode 100644
index 1783969..0000000
--- a/debian/debdeploy-master.install
+++ /dev/null
@@ -1,5 +0,0 @@
-master/debdeploy_joblog.py /usr/lib/python2.7/dist-packages/
-master/debdeploy_updatespec.py /usr/lib/python2.7/dist-packages/
-master/debdeploy_conf.py /usr/lib/python2.7/dist-packages/
-master/debdeploy /usr/sbin
-master/generate-debdeploy-spec /usr/bin
diff --git a/debian/debdeploy-minion.dirs b/debian/debdeploy-minion.dirs
deleted file mode 100644
index d0554d7..0000000
--- a/debian/debdeploy-minion.dirs
+++ /dev/null
@@ -1,5 +0,0 @@
-/var/lib/debdeploy/
-/usr/lib/python2.7/dist-packages/salt/returners/
-/usr/lib/python2.7/dist-packages/salt/modules/
-/usr/lib/debdeploy/
-/var/log/debdeploy/
diff --git a/debian/debdeploy-minion.install b/debian/debdeploy-minion.install
deleted file mode 100644
index e77960b..0000000
--- a/debian/debdeploy-minion.install
+++ /dev/null
@@ -1,4 +0,0 @@
-minion/debdeploy-minion.py /usr/lib/python2.7/dist-packages/salt/modules/
-minion/debdeploy_restart.py /usr/lib/python2.7/dist-packages/salt/modules/
-minion/debdeploy-log.py /usr/lib/python2.7/dist-packages/salt/returners/
-
diff --git a/examples/debdeploy-minion.conf b/examples/debdeploy-minion.conf
deleted file mode 100644
index 4600462..0000000
--- a/examples/debdeploy-minion.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[blacklist-trusty]
-bind9: libdns81, libisc83
\ No newline at end of file
diff --git a/examples/elinks.yaml b/examples/elinks.yaml
deleted file mode 100644
index 5cb0b16..0000000
--- a/examples/elinks.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-source: elinks
-comment: CVE-2015-0123
-update_type: tool
-fixes:
-        jessie: 0.12~pre6-7
-        trusty: 2.0-1
-        precise:
-
-        
diff --git a/examples/openssl.yaml b/examples/openssl.yaml
deleted file mode 100644
index db75dea..0000000
--- a/examples/openssl.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-source: openssl
-comment: need current crypto
-update_type: library
-fixes:
-        jessie: 1.0.2d-1~wmf1
-        trusty: 
-        precise:
-
-        
diff --git a/examples/python.yaml b/examples/python.yaml
deleted file mode 100644
index 8306521..0000000
--- a/examples/python.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-source: python2.7
-comment: various python security updates
-update_type: library
-fixes:
-        jessie: 2.7.10-3
-        trusty: 2.7.6-8ubuntu0.2
-        precise: 2.7.3-0ubuntu3.8
-
-        
diff --git a/master/debdeploy b/master/debdeploy
deleted file mode 100755
index a3476d1..0000000
--- a/master/debdeploy
+++ /dev/null
@@ -1,551 +0,0 @@
-#! /usr/bin/python
-# -*- coding: utf-8 -*-
-
-import sys, logging, os, datetime, argparse
-
-if os.geteuid() != 0:
-    print "debdeploy needs to be run as root"
-    sys.exit(1)
-
-from debdeploy_conf import *
-
-conf = DebDeployConfig("/etc/debdeploy.conf")
-
-if conf.debug:
-    logging.basicConfig(filename='/var/log/debdeploy/debdeploy.log', 
format='%(levelname)s: %(asctime)s : %(funcName)s : %(message)s', 
level=logging.DEBUG)
-else:
-    logging.basicConfig(filename='/var/log/debdeploy/debdeploy.log', 
format='%(levelname)s: %(asctime)s : %(funcName)s : %(message)s', 
level=logging.INFO)
-
-import salt, pydoc
-from salt.scripts import salt_run
-from debdeploy_joblog import *
-from debdeploy_updatespec import *
-#from debdeploy_pkgdb import *
-
-class logpager:
-    threshold = 20 # if pager buffer contains more than <threshold> lines, use 
the pager
-    def __init__(self):
-        self.buf = ""
-    def add(self, *args):
-        for i in args:
-            self.buf += str(i)
-        self.buf += "\n"
-    def add_nb(self, *args):
-        for i in args:
-            self.buf += str(i)
-    def show(self):
-        if self.buf.count("\n") > self.threshold:
-            pydoc.pager(self.buf)
-        else:
-            print self.buf
-
-
-def display_status(rollback_mode=False):
-    '''
-    Deployment and rollback jobs are started asynchrously by Salt. The 
-    This function displays the result of a previous deployment run.
-
-    If rollback_mode is enabled, the status of a rollback job is displayed 
(mostly the
-    same, but the job ID is looked up in a different database and some status 
messages
-    are different).
-    '''
-
-    runner = 
salt.runner.RunnerClient(salt.config.master_config('/etc/salt/minion'))
-
-    amount_of_hosts = 0
-    add_cnt = {}
-    remove_cnt = {}
-    update_cnt = {}
-    install_errors = []
-    restart_cnt = {}
-    new_restart_cnt = {}
-    jid = ""
-
-    if rollback_mode:
-        if not joblogdb.does_job_exist(opt.updatefile, opt.serverlist):
-            print opt.updatefile, "hasn't been deployed yet for this server 
group (", opt.serverlist, "), so we can't roll it back either."
-            sys.exit(0)
-
-        if not joblogdb.has_been_rolled_back(opt.updatefile, opt.serverlist):
-            print opt.updatefile, "hasn't been rolled-back yet for this server 
group (", opt.serverlist, ")"
-            sys.exit(0)
-    else:
-        # Job was specified by job ID (or relative job ID, e.g. -3 for the 
third-last job)
-        if opt.command_option:
-            if opt.command_option.strip().lstrip("-").isdigit():
-                j = int(opt.command_option)
-                if j > 0:
-                    if joblogdb.does_jobid_exist(opt.command_option):
-                        jid = opt.command_option
-                elif j < 0:
-                    try:
-                        jid = joblogdb.get_jobs(abs(j))[abs(j)-1][2]
-                    except IndexError:
-                        print "Invalid job query"
-                        sys.exit(1)
-                else:
-                    print "Invalid jobid"
-                    sys.exit(1)
-
-        # Job was specified by servergroup and update file
-        else:
-            if not joblogdb.does_job_exist(opt.updatefile, opt.serverlist):
-                print opt.updatefile, "hasn't been deployed yet for this 
server group (", opt.serverlist, ")"
-                sys.exit(1)
-
-            if rollback_mode:
-                jid = joblogdb.get_rollbackid(opt.updatefile, opt.serverlist)
-            else:
-                jid = joblogdb.get_jobid(opt.updatefile, opt.serverlist)
-
-    # SaltRunner always emits its status on stdout, 
https://github.com/saltstack/salt/issues/21392
-    # Can be dropped once fixed upstream
-    with open('/dev/null', 'w') as discard_output:
-        oldstdout = sys.stdout
-        sys.stdout = discard_output
-        job = runner.cmd('jobs.print_job', (jid,))
-        sys.stdout = oldstdout
-    if not job:
-        print "Job " + str(jid) + " hasn't finished yet, please check again 
later"
-        sys.exit(0)
-
-    res = job[jid]['Result']
-    if not res:
-        print "Job hasn't finished yet, please check again later"
-        sys.exit(0)
-
-    amount_of_hosts += len(res.keys())
-    p = logpager()
-
-    for host in res:
-
-        # Error message returned by the minion, e.g. if a minion hasn't 
correctly setup the debdeploy module
-        if type(res[host]['return']) is str:
-            install_errors.append(host + ": " + res[host]['return'])
-            continue
-
-        added = False
-        if res[host]['return'].has_key('additions'):
-            added = res[host]['return']['additions']
-        removed = False
-        if res[host]['return'].has_key('removals'):
-            removed = res[host]['return']['removals']
-        updated = False
-        if res[host]['return'].has_key('updated'):
-            updated = res[host]['return']['updated']
-        restart = False
-        if res[host]['return'].has_key('restart'):
-            restart = res[host]['return']['restart']
-        new_restart = False
-        if res[host]['return'].has_key('new_restart'):
-            new_restart = res[host]['return']['new_restart']
-
-        p.add(host, ":")
-        if added:
-            p.add("  Added packages:", host['return']['additions'])
-            for added_pkg in host['return']['additions']:
-                if not add_cnt.get(added_pkg, None):
-                    add_cnt[added_pkg] = 1
-                else:
-                    add_cnt[added_pkg] += 1
-
-        elif removed:
-            p.add("  Removed packages:", host['return']['removals'])
-            for removed_pkg in host['return']['removals']:
-                if not remove_cnt.get(removed_pkg, None):
-                    remove_cnt[removed_pkg] = 1
-                else:
-                    remove_cnt[removed_pkg] += 1
-
-        elif updated and len(updated.keys()) > 0:
-            p.add("  Updated packages:")
-            for k in updated:
-                p.add("    " + k + ": " + updated[k][0] + " -> " + 
updated[k][1])
-
-                updated_idx = k + ": " + updated[k][0] + " -> " + updated[k][1]
-                if not update_cnt.get(updated_idx, None):
-                    update_cnt[updated_idx] = 1
-                else:
-                    update_cnt[updated_idx] += 1
-
-        else:
-            p.add("  No change")
-
-
-        if restart:
-            for process in restart:
-                if len(process) > 0:
-                    if not restart_cnt.get(process, None):
-                        restart_cnt[process] = []
-                        restart_cnt[process].append(host)
-                    else:
-                        restart_cnt[process].append(host)
-
-        if new_restart:
-            for process in new_restart:
-                if len(process) > 0:
-                    if not new_restart_cnt.get(process, None):
-                        new_restart_cnt[process] = []
-                        new_restart_cnt[process].append(host)
-                    else:
-                        new_restart_cnt[process].append(host)
-
-        # Detect installation errors and collect them to display an error list
-        # across all Salt grains
-        # apt doesn't provide more fine-grained error reporting other than
-        # 0/success and 100/error, so we need to parse stderr for some common
-        # error patterns
-        if res[host]['return'].has_key('aptreturn') and 
res[host]['return']['aptreturn'] == 100:
-            if res[host]['return']['apterrlog'].find("Could not get lock 
/var/lib/dpkg/lock") != -1: # older dpkg
-                install_errors.append(host + ": The dpkg status database is 
locked, possibly a conflicting package installation")
-            elif res[host]['return']['apterrlog'].find("dpkg status database 
is locked by another process") != -1: # contemporary dpkg
-                install_errors.append(host + ": The dpkg status database is 
locked, possibly a conflicting package installation")
-            elif res[host]['return']['apterrlog'].find("was not found") != -1:
-                install_errors.append(host + ": The version to be installed 
could not be found. It might have been superceded by a more recent version or 
the apt source is incomplete")
-            else:
-                install_errors.append(host + ": Unknown installation error")
-
-        if opt.verbose:
-            p.add("")
-            if res[host]['return'].has_key('aptlog'):
-                indented = ['  ' + l for l in 
res[host]['return']['aptlog'].splitlines()]
-                p.add("\n".join(indented))
-
-    p.add("") 
-    p.add("") 
-    if rollback_mode:
-        p.add("Rollback summary:")
-        p.add("Number of hosts in this rollback run:", amount_of_hosts)
-    else:
-        p.add("Deployment summary:")
-        p.add("Number of hosts in this deployment run:", amount_of_hosts)
-
-
-    if len(add_cnt.keys()) == 0:
-        p.add("No packages were added")
-    else:
-        p.add("Added packages:")
-        for pkg in add_cnt:
-            p.add(pkg, "on", add_cnt[pkg], "hosts")
-
-    if len(remove_cnt.keys()) == 0:
-        p.add("No packages were removed")
-    else:
-        p.add("Removed packages:")
-        for pkg in remove_cnt:
-            p.add(pkg, "on", remove_cnt[pkg], "hosts")
-
-    if len(update_cnt.keys()) == 0:
-        p.add("No packages were updated")
-    else:
-        p.add("Updated packages:")
-        for pkg in sorted(update_cnt.keys()):
-            p.add(pkg, "on", update_cnt[pkg], "hosts")
-
-    p.add("")
-
-    if len(restart_cnt.keys()) == 0:
-        p.add("No restarts are needed")
-    else:
-        p.add("Restarts needed:")
-        for process in sorted(restart_cnt.keys()):
-            p.add(process, "on", len(restart_cnt[process]), "hosts:")
-            p.add_nb("  ")
-            for i in restart_cnt[process]:
-                p.add_nb(i)
-            p.add("")
-        p.add("")
-        p.add("New restarts needed:")
-        for process in sorted(new_restart_cnt.keys()):
-            p.add(process, "on", len(new_restart_cnt[process]), "hosts:")
-            p.add_nb("  ")
-            for i in new_restart_cnt[process]:
-                p.add_nb(i)
-            p.add("")
-
-    p.add("")
-    p.add("Error summary:")
-
-    if len(install_errors) == 0:
-        p.add("No errors found")
-    else:
-        for i in install_errors:
-            p.add(i)
-    p.show()
-
-def deploy_update(source, update_type, grains, update_file, servergroup):
-    '''
-    Initiate a deployment. The job is processed asynchrously by Salt.
-
-    source      : Name of the source package (string)
-    update_type : Various types of packages have different outcome, see 
doc/readme.txt (string)
-    grains      : Apply the update on this list of Salt grains (list of 
strings)
-    update_file : Filename of update specification (string)
-    servergroup : The name of the server group (string)
-    '''
-
-    update_desc = {}
-    update_desc["tool"] = "Non-daemon update, no service restart needed"
-    update_desc["daemon-direct"] = "Daemon update without user impact"
-    update_desc["daemon-disrupt"] = "Daemon update with service availability 
impact"
-    update_desc["library"] = "Library update, several services might need to 
be restarted"
-
-    print "Rolling out", source, ":",
-    print update_desc[update_type]
-
-    target = ""
-    if update_type in ["daemon-cluster", "reboot", "reboot-cluster"]:
-        print "Not implemented yet"
-        sys.exit(1)
-
-    if joblogdb.has_been_rolled_back(update_file, servergroup):
-        print update_file, "was already deployed and rolled back for this 
server group, if you want to redeploy you need to assign a different name"
-        sys.exit(1)
-
-    if joblogdb.does_job_exist(update_file, servergroup):
-        print update_file, "was already deployed for this server group (", 
servergroup, "), if you want to rollback the change use the <rollback> command."
-        sys.exit(1)
-
-    for i in grains:
-        target += 'G@' + i + ' or '
-
-    target = target[:-4]
-    jid = client.cmd_async(target, 'debdeploy-minion.deploy', [source, 
update_type, update.fixes], expr_form='compound', ret='debdeploy-log')
-    logging.info("Initiated rollout of spec file " + update_file + " (source 
package " + source + ") on grain compound " + target + " (salt job id: " + 
str(jid) + ")")
-    joblogdb.add_job(update_file, servergroup, jid)
-
-
-def list_jobs():
-    for i in joblogdb.get_jobs():
-        print i[2], ":", i[0], "applied on", i[1]
-
-def restart(grains, processes):
-    '''
-    Trigger process restarts for a set of servers
-
-    grains      : Apply the update on this list of Salt grains (list of 
strings)
-    processes   : A list of processes to restart (list of strings)
-    '''
-
-    c = 0
-    c_success = {}
-    c_failed  = {}
-    c_stopped = {}
-
-    print "Restarting services. Use --verbose to also display non-failing 
restarts."
-    logging.info("Initiated restart of process(es) " + str(processes) + " on 
grain compound " + str(grains))
-
-    for i in grains:
-        r = client.cmd(i, 'debdeploy-minion.restart_service', [processes], 
expr_form='grain')
-        for host in r:
-            c += 1
-            if opt.verbose:
-                print host + ":"
-            for process in r[host]:
-                c_success.setdefault(process, 0)
-                c_failed.setdefault(process, 0)
-                c_stopped.setdefault(process, 0)
-
-                if r[host][process] == 0:
-                    c_success[process] += 1
-                    if opt.verbose:
-                        print "  ", process, "successfully restarted"
-                elif r[host][process] == 1:
-                    c_failed[process] += 1
-                    if opt.verbose:
-                        print "  ", process, "failed to restart on", host
-                elif r[host][process] == 2:
-                    c_stopped[process] += 1
-                    if opt.verbose:
-                        print "  ", process, "wasn't running on", host, ", not 
restarted"
-                elif r[host][process] == 3:
-                    c_failed[process] += 1
-                    if opt.verbose:
-                        print " Restart handler", process, "could not be found 
on", host
-
-    if opt.verbose:
-        print
-    print "Restart summary:"
-    for proc in c_success:
-        print proc, "successfully restarted on", c_success[proc], "out of", c, 
"hosts."
-        logging.info(proc + " successfully restarted on " + 
str(c_success[proc]) + " out of " +  str(c) + " hosts.")
-
-    for proc in c_failed:
-        if c_failed[proc] > 0:
-            print proc, "failed to restart on", c_failed[proc], "out of", c, 
"hosts."
-            logging.info(proc + " failed to restart on " + str(c_failed[proc]) 
+ " out of " +  str(c) + " hosts.")
-
-    for proc in c_stopped:
-        if c_stopped[proc] > 0:
-            print proc, "wasn't running on on", c_stopped[proc], "out of", c, 
"hosts."
-            logging.info(proc + " wasn't running on on " + 
str(c_stopped[proc]) + " out of " +  str(c) + " hosts.")
-
-
-def list_server_groups(update=None):
-    '''
-    List all available server groups
-
-    update      : If an update is specified, it is displayed whether an update
-                  has already been deployed for that group.
-    '''
-
-    p = logpager()
-
-    if not update:
-        for servergroup in sorted(conf.server_groups):
-            p.add(servergroup)
-    else:
-        done_list = []
-        missing_list = []
-
-        for servergroup in conf.server_groups:
-            job_ran = False
-            if joblogdb.does_job_exist(update, servergroup):
-                job_ran = True
-            if job_ran:
-                done_list.append(servergroup)
-            else:
-                missing_list.append(servergroup)
-
-        p.add(update, "has been applied to these server groups:")
-        for i in sorted(done_list):
-            p.add(i)
-
-        p.add("")
-        p.add(update, "hasn't been applied to these server groups:")
-        for i in sorted(missing_list):
-            p.add(i)
-    p.show()
-
-def list_server_group_members(server_group):
-    '''
-    Show all servers represented by a server group
-
-    server_group        : The name of a server group (as defined in 
/etc/debdeploy.conf)
-    '''
-
-    p = logpager()
-    p.add("Server group", server_group, ":")
-    members = client.cmd('*', 'match.grain', conf.server_groups[server_group])
-    for i in sorted(members):
-        p.add(i)
-
-def rollback(servergroup, update_file):
-    '''
-    Trigger process restarts for a set of servers
-
-    servergroup      : Rollback an update on this server group (string)
-    update_file : Filename of update specification (string)
-    '''
-
-    if not joblogdb.does_job_exist(update_file, servergroup):
-        print "No software deployment for " + update_file + " exists."
-        sys.exit(1)
-    
-    jid = ""
-    if opt.jobid:
-        jid = opt.jobid
-    else:
-        jid = joblogdb.get_jobid(update_file, servergroup)
-    print "Initiated rollback for JID", jid
-    logging.info("Initiated rollback for JID" + jid)
-
-    target = ""
-    for i in conf.server_groups[servergroup]:
-        target += 'G@' + i + ' or '
-    target = target[:-4]
-
-    rid = client.cmd_async(target, 'debdeploy-minion.rollback', [jid], 
expr_form='compound')
-
-    joblogdb.mark_as_rolled_back(jid, rid)
-
-client = salt.client.LocalClient()
-joblogdb = DebDeployJobLog("/var/lib/debdeploy/jobdb.sqlite")
-#pkgdb = DebDeployPkgDB("/var/lib/debdeploy/pkgdb.sqlite")
-
-p = argparse.ArgumentParser(usage="debdeploy-master [options] command 
<cmd-option>\n \
-   The following commands are supported: \n\n \
-   deploy                     : Install a software update, requires --update 
and --servers \n \
-   status-deploy              : Query the status of a software deployment, 
requires --update and --servers\n \
-   restart                    : Restart a service on all servers of a server 
group \n \
-   rollback                   : Rollback a software deployment\n \
-   status-rollback            : Query the status of a software deployment 
rollback\n \
-   list-server-groups         : Display a list of all defined server groups. 
If an update file is listed \n \
-                                in addition, it shows whether an update has 
been applied for that group. \n \
-   list-server-group-members  : Show all servers represented by a server group 
\n \
-   check-missing              : Display a list of all servers which don't have 
an update installed \n \
-   pkgdb-source               : Re-read the package status of a given host 
into the package database")
-
-p.add_argument("-u", "--update", action="store", type=str, dest="updatefile", 
help="A YAML file containing the update specification (which source package to 
update and the respective fixed versions")
-p.add_argument("-s", "--servers", action="store", type=str, dest="serverlist", 
help="The group of servers on which the update should be applied")
-p.add_argument("-j", "--jobid", action="store", type=str, dest="jobid", 
help="Some commands (e.g. rollout require) require a specific job ID in case 
multiple invocations of an update have been made")
-p.add_argument("--host", action="store", type=str, dest="host", help="Some 
commands (e.g. pkgdb-source) a specific hostname")
-p.add_argument("--verbose", action="store_true", dest="verbose", help="Enable 
verbose output, e.g. show full apt output in status-deploy and status-rollback")
-p.add_argument("-p", "--program", action="append", type=str, dest="program", 
help="The program(s) to restart on a server group")
-
-p.add_argument("command")
-p.add_argument("command_option", nargs="?", default="unset")
-
-opt = p.parse_args()
-
-if opt.command in ("deploy", "rollback", "restart", 
"list-server-group-members"):
-    if not opt.serverlist:
-        p.error("You need to provide a server list (-s)")
-    if opt.serverlist not in conf.server_groups.keys():
-        p.error("Invalid server group. It needs to be defined in 
/etc/debdeploy.conf")
-
-if opt.command in ("status-deploy", "status-rollback"):
-    if not opt.command_option and not opt.serverlist:
-        p.error("You need to provide a server list (-s)")
-        if opt.serverlist not in conf.server_groups.keys():
-            p.error("Invalid server group. It needs to be defined in 
/etc/debdeploy.conf")
-
-if opt.command in ("deploy", "rollback"):
-    if not opt.updatefile:
-        p.error("You need to provide an update file (-u)")
-
-if opt.command in ("status-deploy", "status-rollback"):
-    if not opt.command_option and not opt.updatefile:
-        p.error("You need to provide an update file (-u)")
-
-if opt.command in ("restart"):
-    if not opt.program:
-        p.error("You need to provide a program to restart (-p)")
-
-if opt.command in ("pkgdb-source"):
-    if not opt.host:
-        p.error("You need to provide a hostname (-h)")
-
-if opt.command == "deploy":
-    update = DebDeployUpdateSpec(opt.updatefile, conf.supported_distros)
-    deploy_update(update.source, update.update_type, 
conf.server_groups[opt.serverlist], opt.updatefile, opt.serverlist)
-
-elif opt.command == "restart":
-    restart(conf.server_groups[opt.serverlist], opt.program)
-
-elif opt.command == "status-deploy":
-    display_status()
-
-elif opt.command == "list-jobs":
-    list_jobs()
-
-elif opt.command == "status-rollback":
-    display_status(rollback_mode=True)
-
-elif opt.command == "rollback":
-    rollback(opt.serverlist, opt.updatefile)
-
-elif opt.command == "list-server-group-members":
-    list_server_group_members(opt.serverlist)
-
-elif opt.command == "list-server-groups":
-    if opt.updatefile:
-        list_server_groups(opt.updatefile)
-    else:
-        list_server_groups()
-
-sys.exit(0)
-
-# Local variables:
-# mode: python
-# End:
diff --git a/master/debdeploy_conf.py b/master/debdeploy_conf.py
deleted file mode 100644
index c421dbd..0000000
--- a/master/debdeploy_conf.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import ConfigParser, sys
-
-class DebDeployConfig(object):
-    '''
-    Class to read/provide the system-wide configuration of the debdeploy 
master component.
-    It contains the following variables:
-
-    supported_distros: List of strings of supported distros (per Debian/Ubuntu 
codename)
-    server_groups: Dictionary of Salt grains which define a group of servers. 
A server group
-                   can be defined by multiple Salt grains.
-    '''
-    supported_distros = []
-    server_groups = {}
-    debug = False
-
-    def __init__(self, configfile):
-        config = ConfigParser.ConfigParser()
-        if len(config.read(configfile)) == 0:
-            print "/etc/debdeploy.conf doesn't exist, you need to create it."
-            print "See /usr/share/doc/debdeploy-master/examples/debdeploy.conf"
-            sys.exit(1)
-
-        if not config.has_section("distros") or not 
config.has_option("distros", "supported"):
-            print "Could not read list of supported distributions, make sure", 
configfile, "contains a section [distros] and an option 'supported'"
-            sys.exit(1)
-
-        self.supported_distros = [x.strip() for x in config.get("distros", 
"supported").split(",")]
-
-        if len(self.supported_distros) < 1:
-            print "You need to specify at least one supported distribution in 
/etc/debdeploy.conf"
-            sys.exit(1)
-
-        if config.has_section("logging") and config.has_option("logging", 
"debug"):
-            if config.getboolean("logging", "debug"):
-                self.debug = True
-
-        if not config.has_section("serverlists"):
-            print "Warning: No serverlists are defined, but that means that 
only the implicit group 'all' is available."
-        else:
-            if len(config.options("serverlists")) == 0:
-                print "Warning: No serverlists are defined, but that means 
that only the implicit group 'all' is available."
-            else:
-                for i in config.options("serverlists"):
-                    if len(config.get("serverlists", i)) > 0:
-                        self.server_groups[i] = [x.strip() for x in 
config.get("serverlists", i).split(",")]
-                    else:
-                        print "Malformed server list, at least one grain must 
be specified for the server group", i
-                        sys.exit(1)
-
-# Local variables:
-# mode: python
-# End:
diff --git a/master/debdeploy_joblog.py b/master/debdeploy_joblog.py
deleted file mode 100644
index e5c3dab..0000000
--- a/master/debdeploy_joblog.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import sqlite3, os
-
-class DebDeployJobLog(object):
-    '''
-    This class manages software deployment jobs. The jobs are executed 
asynchronously
-    by Salt and a Sqlite3 database is used to store which jobs have been 
issued/rolled
-    back.
-    '''
-
-    sqlite_dbfilename = ''
-
-    def __init__(self, dbfilename):
-        self.sqlite_dbfilename = dbfilename
-
-        if not os.path.exists(self.sqlite_dbfilename):
-            conn = sqlite3.connect(self.sqlite_dbfilename)
-
-            with conn:
-                conn.execute('CREATE TABLE updates (updatespec text, 
servergroup text, jobid text, rollbackid text)')
-
-    def add_job(self, yamlfile, servergroup, jid):
-        '''
-        This function records a software deployment in the job database.
-
-        yamlfile = name of transaction file (string)
-        servergroup = group of servers (specified in debdeploy.conf) (string)
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-
-        v = (str(yamlfile), str(servergroup), str(jid), "")
-        with conn:
-            conn.execute('INSERT INTO updates VALUES (?, ?, ?, ?)', v)
-
-    def does_job_exist(self, yamlfile, servergroup):
-        '''
-        This boolean function returns whether a software update has been 
deployed yet.
-
-        yamlfile = name of transaction file (string)
-        servergroup = group of servers (specified in debdeploy.conf) (string)
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-        with conn:
-            r = conn.execute("SELECT * FROM updates WHERE updatespec=? AND 
servergroup=?", (yamlfile,servergroup,)).fetchall()
-            if len(r) > 0:
-                return True
-            else:
-                return False
-
-    def does_jobid_exist(self, jobid):
-        '''
-        This boolean function returns whether a job ID exists.
-
-        jobid = job ID in debdeploy job database (string)
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-        with conn:
-            r = conn.execute("SELECT * FROM updates WHERE jobid=?", 
(jobid,)).fetchall()
-            if len(r) > 0:
-                return True
-            else:
-                return False
-
-    def has_been_rolled_back(self, yamlfile, servergroup):
-        '''
-        This boolean function returns whether a deployed software update has 
been rolled back.
-
-        yamlfile = name of transaction file (string)
-        servergroup = group of servers (specified in debdeploy.conf) (string)
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-        with conn:
-            r = conn.execute("SELECT * FROM updates WHERE updatespec=? and 
servergroup=? and rollbackid !=''", (yamlfile,servergroup,)).fetchall()
-            if len(r) > 0:
-                return True
-            else:
-                return False
-
-
-    def get_jobs(self, jobrange=0):
-        '''
-        This function returns a list all jobs.
-
-        jobrange = returns only the last x commands (can be specified as a 
positive or negative number). If 0 is
-                   passed or if left out, all jobs are returned
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-
-        if jobrange == 0:
-            with conn:
-                r = conn.execute("SELECT * FROM updates").fetchall()
-
-                if not r:
-                    return None
-
-                return r
-        else:
-            with conn:
-                r = conn.execute("SELECT * FROM updates order by jobid desc 
limit ?", (abs(jobrange),)).fetchall()
-                if not r:
-                    return None
-
-                return r
-
-    def get_jobid(self, yamlfile, servergroup):
-        '''
-        This function returns the ID of a deployed software update. Returns 
None for
-        invalid updatefile/server group.
-
-        yamlfile = name of transaction file (string)
-        servergroup = group of servers (specified in debdeploy.conf) (string)
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-
-        with conn:
-            r = conn.execute("SELECT jobid FROM updates WHERE updatespec=? and 
servergroup=?", (yamlfile,servergroup,)).fetchall()
-
-            if not r:
-                return None
-
-            if len(r) > 1:
-                raise ValueError, "Multiple jobs found for update " + yamlfile 
+ " on server group " + servergroup
-            else:
-                return r[0][0]
-
-    def get_rollbackid(self, yamlfile, servergroup):
-        '''
-        This function returns the ID of a rollback transaction. Returns None 
for
-        invalid update/server group and an empty string for updates which 
haven't been
-        rolled back yet.
-
-        yamlfile = name of transaction file (string)
-        servergroup = group of servers (specified in debdeploy.conf) (string)
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-
-        with conn:
-            r = conn.execute("SELECT rollbackid FROM updates WHERE 
updatespec=? and servergroup=?", (yamlfile,servergroup,)).fetchall()
-            if not r:
-                return None
-            else:
-                return r[0][0]
-
-    def mark_as_rolled_back(self, jid, rid):
-        '''
-        This function records that a rollback has been issued via Salt.
-
-        jid = The Salt job ID of the originally deployed update
-        rid = The Salt job ID of the rollback
-        '''
-        conn = sqlite3.connect(self.sqlite_dbfilename)
-
-        with conn:
-            if not conn.execute("SELECT * FROM updates WHERE jobid=?", 
(jid,)).fetchall():
-                raise ValueError, "Invalid job ID, doesn't exist in database"
-            conn.execute("UPDATE updates SET rollbackid=? WHERE jobid=?", 
(rid, jid,))
-
-# Local variables:
-# mode: python
-# End:
diff --git a/master/debdeploy_updatespec.py b/master/debdeploy_updatespec.py
deleted file mode 100644
index 68da08e..0000000
--- a/master/debdeploy_updatespec.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import salt.client
-import yaml
-import sys
-
-class DebDeployUpdateSpec(object):
-    '''
-    Each update is described in a YAML file, see docs/readme.txt for the data
-    format.
-    '''
-
-    source = ""
-    comment = ""
-    update_type = ""
-    fixes = {}
-    legit_type = ['tool', 'daemon-direct', 'daemon-disrupt', 'daemon-cluster', 
'reboot', 'reboot-cluster', 'library']
-
-    def __init__(self, updatespec, supported_distros):
-        '''
-        Parse an update spec file.
-
-        updatespec        : Filename of the update spec file (string)
-        supported_distros : These are the distro codenames for which a fixed 
version can be provided (list of strings)
-        '''
-
-        try:
-            with open(updatespec, "r") as stream:
-                updatefile = yaml.load(stream)
-
-        except IOError:
-            print "Error: Could not open", updatespec
-            sys.exit(1)
-
-        except yaml.scanner.ScannerError, e:
-            print "Invalid YAML file:"
-            print e
-            sys.exit(1)
-
-        if not updatefile.has_key("source"):
-            print "Invalid YAML file, you need to specify the source package 
using the 'source' stanza, see the annotated example file for details"
-            sys.exit(1)
-        else:
-            self.source = updatefile["source"]
-
-        if not updatefile.has_key("update_type"):
-            print "Invalid YAML file, you need to specify the type of update 
using the 'update_type' stanza, see the annotated example file for details"
-            sys.exit(1)
-        else:
-            if updatefile["update_type"] not in self.legit_type:
-                print "Invalid YAML file, invalid 'update_type'"
-                sys.exit(1)
-            self.update_type = updatefile["update_type"]
-
-        if updatefile.has_key("comment"):
-            self.comment = updatefile["comment"]
-
-        if not updatefile.has_key("fixes"):
-            print "Invalid YAML file, you need to specify at least one fixed 
version using the 'fixes' stanza, see the annotated example file for details"
-            sys.exit(1)
-        else:
-            for i in updatefile["fixes"]:
-                if supported_distros.count(i) >= 1:
-                    self.fixes[i] = updatefile["fixes"].get(i)
-                else:
-                    print "Invalid YAML file,", i, "is not a supported 
distribution. You need to activate it in /etc/debdeploy.conf"
-                    sys.exit(1)
-
-# Local variables:
-# mode: python
-# End:
diff --git a/master/generate-debdeploy-spec b/master/generate-debdeploy-spec
deleted file mode 100755
index e18dd33..0000000
--- a/master/generate-debdeploy-spec
+++ /dev/null
@@ -1,92 +0,0 @@
-#! /usr/bin/python
-# -*- coding: utf-8 -*-
-
-import sys, optparse, logging, os, datetime
-from debdeploy_conf import *
-
-conf = DebDeployConfig("/etc/debdeploy.conf")
-
-source = ""
-comment = ""
-updatetype = ""
-fixes = {}
-
-while source == "":
-    source = raw_input("Please enter the name of source package (e.g. 
openssl). Leave blank or type 'quit' to abort\n>").strip()
-    if source == "" or source == "quit":
-        print "Aborting"
-        sys.exit(1)
-
-comment = raw_input('You can enter an optional comment, e.g. a reference to a 
security advisory or a CVE ID mapping\n>').strip()
-if comment == "quit":
-    print "Aborting"
-    sys.exit(1)
-
-while updatetype not in ['tool', 'daemon-direct', 'daemon-disrupt', 'reboot', 
'library', 'quit']:
-    print "tool           -> The updated packages is an enduser tool, can be"
-    print "                  rolled-out immediately."
-    print "daemon-direct  -> Daemons which are restarted during update, but 
which"
-    print "                  do no affect existing users."
-    print "daemon-disrupt -> Daemons which are restarted during update, where 
the"
-    print "                  users notice an impact. The update procedure is 
almost"
-    print "                  identical, but displays additional warnings"
-    print "reboot         -> Lowlevel component which requires a system reboot"
-    print "                  kernel/glibc/dbus). After installing the update, 
the"
-    print "                  system reboot(s) can be managed subsequently"
-    print "library        -> After a library is updated, programs may need to 
be"
-    print "                  restarted to fully effect the change. In addition"
-    print "                  to libs, some applications may also fall under 
this rule,"
-    print "                  e.g. when updating QEMU, you might need to 
restart VMs."
-    updatetype = raw_input("Please enter the update type:\n>").strip()
-    if source == "" or source == "quit":
-        print "Aborting"
-        sys.exit(1)
-
-for i in conf.supported_distros:
-    fixes[i] = raw_input("Please enter the version which fixed in in " + i + 
". Leave blank if no fix is available/required for a given distro.\n>").strip()
-
-at_least_one_fixed_version = False
-for i in fixes.values():
-    if i != "":
-        at_least_one_fixed_version = True
-
-if not at_least_one_fixed_version:
-    print "At least one fixed version needs to be configured, aborting"
-    sys.exit(1)
-
-
-valid_name = False
-suggested_name = datetime.datetime.now().strftime("%Y-%m-%d-") + source + 
".yaml"
-while not valid_name:
-    print "Please enter a name under which the YAML file should be created"
-    print "Leave blank to use ", suggested_name
-    yamlfilename = raw_input('>').strip()
-    if not yamlfilename:
-        yamlfilename = suggested_name
-    if os.path.exists(yamlfilename):
-        print "File name already exists, please re-enter."
-    else:
-        valid_name = True
-
-try:
-    with open(yamlfilename, "w") as yamlfile:
-        yamltext = 'source: ' + source + '\n' \
-                   'comment: ' + comment + '\n' \
-                   'update_type: ' + updatetype + '\n' \
-                   'fixes:\n'
-        yamlfile.write(yamltext)
-        for i in fixes:
-            yamlfile.write("        " + i + ": " + fixes[i] + "\n")
-except IOError, e:
-    print "Error:", e
-    sys.exit(1)
-
-
-# Local variables:
-# mode: python
-# End:
-
-
-
-
-
diff --git a/minion/debdeploy-log.py b/minion/debdeploy-log.py
deleted file mode 100644
index 48bd1d7..0000000
--- a/minion/debdeploy-log.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-This returner logs the package status update to /var/log/debdeploy.log
-'''
-
-import datetime
-
-def returner(ret):
-    with open("/var/log/debdeploy.log", "a") as log:
-        if ret['return'].has_key('aptlog'):
-            indented = [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ") 
+ l for l in ret['return']['aptlog'].splitlines()]
-            log.write("\n".join(indented))
-
-# Local variables:
-# mode: python
-# End:
diff --git a/minion/debdeploy-minion.py b/minion/debdeploy-minion.py
deleted file mode 100644
index 6a164f7..0000000
--- a/minion/debdeploy-minion.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Module for deploying DEB packages on wide scale
-'''
-
-import logging, pickle, subprocess, os, re
-import logging.handlers
-
-import ConfigParser
-import salt.utils
-import salt.config
-import salt.loader
-import salt.modules.aptpkg
-#from salt.modules import aptpkg
-from debian import deb822
-
-from salt.modules.debdeploy_restart import Checkrestart
-from salt.exceptions import (
-    CommandExecutionError, MinionError, SaltInvocationError
-)
-
-log = logging.getLogger(__name__)
-
-__opts__ = salt.config.minion_config('/etc/salt/minion')
-grains = salt.loader.grains(__opts__)
-
-def list_pkgs():
-    '''
-    This Salt function returns a dictionary of installed Debian packages and 
their 
-    respective installed version (keyed by the package name).
-
-    It is mostly used by other debdeploy Salt modules to determine whether 
packages
-    were updated, installed or removed.
-    '''
-
-    cmd = 'dpkg-query --showformat=\'${Status} ${Package} ' \
-          '${Version} ${Architecture}\n\' -W'
-    pkgs = {}
-
-    out = __salt__['cmd.run_stdout'](cmd, output_loglevel='debug')
-    for line in out.splitlines():
-        cols = line.split()
-        try:
-            linetype, status, name, version_num, arch = \
-                [cols[x] for x in (0, 2, 3, 4, 5)]
-        except ValueError:
-            continue
-        if __grains__.get('cpuarch', '') == 'x86_64':
-            osarch = __grains__.get('osarch', '')
-            if arch != 'all' and osarch == 'amd64' and osarch != arch:
-                name += ':{0}'.format(arch)
-        if len(cols):
-            if ('install' in linetype or 'hold' in linetype) and \
-                    'installed' in status:
-                pkgs[name] = version_num
-
-    return pkgs
-
-
-def restart_service(programs):
-    '''
-    This Salt function restarts services based on the process name. It is 
-    checked whether the process is running at all (the command might be
-    applied to a set of hosts of which not all systems actually have the
-    daemon running).
-
-    The restart behaviour is based on heuristics:
-    - Processes started through systemd are determined by control groups
-    - Upstart jobs are required from "initctl list"
-    - If a process was started neither through upstart nor systemd and
-      /etc/init.d/processname exists, the sysvinit script is restarted
-
-    In addition, special restart handlers can be started. Restart handlers
-    can implement advanced restart actions, e.g. suspend-resume-restart
-    KVM instances after a QEMU update. Restart handlers are either shipped
-    by other Debian packages and created locally as executable scripts at
-    /usr/lib/debdeploy/PACKAGE.restart. The restart scripts should return 
-    "0" in case of a successful restart and "1" in case of an error. 
-    Restart handlers are executed as "restarthandler.NAME".
-
-    Returns a dictionary of integers indicating the restart success (keyed by 
processes/handlers):
-    0 = Success
-    1 = Failed to restart
-    2 = Process wasn't running before
-    3 = Could not find restart handler
-    '''
-
-    # Known exceptions which break the rule of the daemon process being 
different from the
-    # base name of the sysvinit script. Only relevant for sysvinit, which 
fortunately is fading out
-    servicemap = {}
-    servicemap['ntpd'] = 'ntp'
-
-    results = {}
-    for program in programs:
-
-        if program.startswith("restarthandler."):
-            handler = os.path.join("/usr/lib/debdeploy/",  
program.split(".")[1] + ".restart")
-            if not os.path.exists(handler):
-                results[program] = 3
-            else:
-                try:
-                    if subprocess.check_call(handler) == 0:
-                        results[program] = 0
-                    else:
-                        results[program] = 1
-                except CalledProcessError:
-                    results[program] = 1
-            break
-
-
-        try:
-            pid = subprocess.check_output(["/bin/pidof", "-x", "-s", 
program])[:-1]
-        except subprocess.CalledProcessError, e:
-            if e.returncode == 1:
-                results[program] = 2
-                break
-
-        service = "undefined"
-
-        if os.path.exists('/bin/systemd'): # systemd
-            cgroup = os.path.join("/proc", pid, "cgroup")
-            if os.path.exists(cgroup):
-                f = open(cgroup, "r")
-                for i in f.readlines():
-                    if i.startswith("1:name"):
-                        service = i.split("/")[-1].strip()
-                f.close()
-
-        elif os.path.exists('/sbin/initctl'): # upstart
-            jobs = subprocess.check_output(["/sbin/initctl", "list"])
-            for x in jobs.splitlines():
-                if x.endswith(str(pid)):
-                    service = x.split()[0]
-
-        # no systemd or upstart job is present, let's check sysvinit
-        if service == "undefined":
-            # try a heuristic for sysvinit, in many cases the name of the 
daemon equals the init script name
-            # apply some mapping for known exceptions
-
-            program_basename = os.path.basename(program)
-            if servicemap.has_key(program_basename):
-                service = servicemap[program_basename]
-
-            if os.path.exists(os.path.join('/etc/init.d/', program_basename)):
-                service = program_basename
-
-        log.info("Restarting " + service + " for " + program)
-        if __salt__['service.restart'](service):
-            results[program] = 0
-        else:
-            results[program] = 1
-
-    return results
-
-
-def install_pkgs(binary_packages, downgrade = False):
-    '''
-    This Salt module installs software updates via apt
-
-    binary_packages: A list of Debian binary package names to update (list of 
dictionaries)
-    downgrade: If enabled, version downgrades are allowed (required to 
rollbacks to earlier versions)
-    '''
-    try:
-        pkg_params, pkg_type = 
__salt__['pkg_resource.parse_targets'](pkgs=binary_packages)
-
-    except MinionError as exc:
-        raise CommandExecutionError(exc)
-
-    if pkg_params is None or len(pkg_params) == 0:
-        return {}
-    if pkg_type == 'repository':
-        targets = []
-        for param, version_num in pkg_params.iteritems():
-            if version_num is None:
-                targets.append(param)
-            else:
-                targets.append('{0}={1}'.format(param, 
version_num.lstrip('=')))
-        cmd = ['apt-get', '-q', '-y']
-        if downgrade:
-            cmd.append('--force-yes')
-        cmd = cmd + ['-o', 'DPkg::Options::=--force-confold']
-        cmd = cmd + ['-o', 'DPkg::Options::=--force-confdef']
-        cmd.append('install')
-        cmd.extend(targets)
-
-    return __salt__['cmd.run_all'](cmd, python_shell=False, 
output_loglevel='debug')
-
-def deploy(source, update_type, versions, **kwargs):
-    '''
-    Updates all installed binary packages of the source package
-    to the specified version.
-
-    source      : Name of the source package
-    update_type : tool | library and others, see doc/readme.txt
-    versions    : A dictionary of distros and the version to be installed,
-                  e.g. jessie : 1.0-1.
-                  If the distro isn't used, no update is performed
-    '''
-
-    pending_restarts_pre = set()
-    pending_restarts_post = set()
-    blacklisted_packages = []
-
-    installed_distro = grains['oscodename']
-    if versions.get(installed_distro, None) == None:
-        log.info("Update doesn't apply to the installed distribution (" + 
installed_distro + ")")
-        return {}
-
-    if os.path.exists("/etc/debdeploy-minion.conf"):
-        config = ConfigParser.ConfigParser()
-        config.read("/etc/debdeploy-minion.conf")
-
-        if config.has_section("blacklist-" + installed_distro):
-            if config.has_option("blacklist-" + installed_distro, source):
-                blacklisted_packages = [x.strip() for x in 
config.get("blacklist-" + installed_distro, source).split(",")]
-    log.info("Packages blacklisted for upgrades: " + str(blacklisted_packages))
-
-    # Detect all locally installed binary packages of a given source package
-    # The only resource we can use for that is parsing the /var/lib/dpkg/status
-    # file. The format is a bit erratic: The Source: line is only present for
-    # binary packages not having the same name as the binary package
-    installed_binary_packages = []
-    for pkg in deb822.Packages.iter_paragraphs(file('/var/lib/dpkg/status')):
-
-        # skip packages in deinstalled status ("rc" in dpkg). These are not 
relevant for
-        # upgrades and cause problems when binary package names have changed 
(since package
-        # installations are forced with a specific version which is not 
available for those
-        # outdated binary package names)
-        installation_status = pkg['Status'].split()[0]
-        if installation_status == "deinstall":
-            continue
-
-        if pkg.has_key('Package') and pkg.get('Package') in 
blacklisted_packages:
-            log.info('Package ' + pkg.get('Package') + ' has been blacklisted 
for installation')
-            continue
-
-        # Source packages which have had a binNMU have a Source: entry with 
the source
-        # package version in brackets, so strip these
-        # If no Source: entry is present in /var/lib/dpkg/status, then the 
source package
-        # name is identical to the binary package name
-        if pkg.has_key('Source') and re.sub(r'\(.*?\)', '', 
pkg['Source']).strip() == source:
-            installed_binary_packages.append({pkg['Package'] : 
versions[installed_distro]})
-        elif pkg.has_key('Package') and pkg['Package'] == source:
-            installed_binary_packages.append({pkg['Package'] : 
versions[installed_distro]})
-    log.debug("Installed binary packages for " + source + ": " + 
str(installed_binary_packages))
-
-    if len(installed_binary_packages) == 0:
-        log.info("No binary packages installed for source package " + source)
-        return {}
-
-    if update_type == "library":
-        pending_restarts_pre = Checkrestart().get_programs_to_restart()
-        log.debug("Packages needing a restart prior to the update:" + 
str(pending_restarts_pre))
-
-    old = list_pkgs()
-
-    log.warn("Refreshing apt package database")
-    log.info("Refreshing apt package database")
-    __salt__['pkg.refresh_db']
-
-    apt_call = install_pkgs(installed_binary_packages)
-
-    new = list_pkgs()
-
-    if update_type == "library":
-        pending_restarts_post = Checkrestart().get_programs_to_restart()
-        log.debug("Packages needing a restart after to the update:" + 
str(pending_restarts_post))
-
-    old_keys = set(old.keys())
-    new_keys = set(new.keys())
-
-    additions = []
-    removals = []
-    updated = []
-    restarts = []
-    new_restarts = []
-
-    if update_type == "library":
-        restarts = list(pending_restarts_post)
-        new_restarts = 
list(pending_restarts_post.difference(pending_restarts_pre))
-
-    for i in new_keys.difference(old_keys):
-        additions.append[i]
-    for i in old_keys.difference(new_keys):
-        removals.append[i]
-    intersect = old_keys.intersection(new_keys)
-    modified = {x : (old[x], new[x]) for x in intersect if old[x] != new[x]}
-
-    log.info("Newly installed packages:" + str(additions))
-    log.info("Removed packages: "  + str(removals))
-    log.info("Modified packages: " + str(modified))
-    log.info("Packages needing a restart: " + str(restarts))
-    log.info("New packages needing a restart: " + str(new_restarts))
-
-    r = {}
-    r["additions"] = additions
-    r["removals"] = removals
-    r["updated"] = modified
-    r["new_restart"] = new_restarts
-    r["restart"] = restarts
-    r["aptlog"] = str(apt_call['stdout'])
-    r["apterrlog"] = str(apt_call['stderr'])
-    r["aptreturn"] = apt_call['retcode']
-
-    jobid = kwargs.get('__pub_jid')
-    with open("/var/lib/debdeploy/" + jobid + ".job", "w") as jobfile:
-        pickle.dump(r, jobfile)
-
-    return r
-
-
-def rollback(jobid):
-    '''
-    Roll back a software update specified by a Salt job ID
-
-    '''
-    with open("/var/lib/debdeploy/" + jobid + ".job", "r") as jobfile:
-        r = pickle.load(jobfile)
-
-    old = list_pkgs()
-    __salt__['pkg.refresh_db']
-
-    aptstderr = ""
-    aptstdout = ""
-    aptreturn = 0
-
-    if len(r['updated'].keys()) > 0:
-        pkgdowngrade = []
-        for i in r['updated']:
-            a = {}
-            a[i] = r['updated'][i][0]
-            pkgdowngrade.append(a)
-        apt_call = install_pkgs(pkgdowngrade, downgrade=True)
-        aptstderr += apt_call['stderr']
-        aptstdout += apt_call['stdout']
-        aptreturn += apt_call['retcode']
-
-    if len(r['removals']) > 0:
-        install_pkgs(r['removals'])
-        aptstderr += apt_call['stderr']
-        aptstdout += apt_call['stdout']
-        aptreturn += apt_call['retcode']
-
-    if len(r['additions']) > 0:
-        __salt__['pkg.remove'] (pkgs=r['additions'])
-        # TODO, capture output
-
-    if aptreturn > 100:
-        aptreturn = 100
-
-    new = list_pkgs()
-    old_keys = set(old.keys())
-    new_keys = set(new.keys())
-
-    additions = []
-    removals = []
-    updated = []
-    restarts = []
-
-    for i in new_keys.difference(old_keys):
-        additions.append[i]
-    for i in old_keys.difference(new_keys):
-        removals.append[i]
-    intersect = old_keys.intersection(new_keys)
-    modified = {x : (old[x], new[x]) for x in intersect if old[x] != new[x]}
-
-    log.info("Newly installed packages:" + str(additions))
-    log.info("Removed packages: "  + str(removals))
-    log.info(modified)
-
-    r = {}
-    r["additions"] = additions
-    r["removals"] = removals
-    r["updated"] = modified
-    r["restart"] = restarts
-    r["aptlog"] = aptstdout
-    r["apterrlog"] = aptstderr
-    r["aptreturn"] = aptreturn
-
-    return r
-
-# Local variables:
-# mode: python
-# End:
diff --git a/minion/debdeploy_restart.py b/minion/debdeploy_restart.py
deleted file mode 100644
index 77b1388..0000000
--- a/minion/debdeploy_restart.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Partially based on checkrestart from debian-goodies:
-# Copyright (C) 2001 Matt Zimmerman <[email protected]>
-# Copyright (C) 2007,2010-2015 Javier Fernandez-Sanguino <[email protected]>
-# - included patch from Justin Pryzby <justinpryzby_AT_users.sourceforge.net>
-#   to work with the latest Lsof - modify to reduce false positives by not
-#   complaining about deleted inodes/files under /tmp/, /var/log/,
-#   /var/run or named   /SYSV. 
-# - introduced a verbose option
-#
-# Additional changes:
-# Copyright (C) 2015 Moritz Muehlenhoff <[email protected]>
-# Copyright (C) 2015 Wikimedia Foundation
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, 
-# MA 02110-1301 USA
-#
-# On Debian systems, a copy of the GNU General Public License may be
-# found in /usr/share/common-licenses/GPL.
-
-import os, errno, sys, re, pwd, sys, subprocess, getopt
-from stat import *
-
-# Tells if a file has to be considered a deleted file
-# Returns:
-#  - 0 (NO) for known locations of files which might be deleted
-#  - 1 (YES) for valid deleted files we are interested in
-def isdeletedFile (f, blacklist = None):
-
-    if blacklist:
-        for p in blacklist:
-            if p.search(f):
-                return 0
-    if f.startswith('/var/log/') or f.startswith('/var/local/log/'):
-        return 0
-    if f.startswith('/var/run/') or f.startswith('/var/local/run/'):
-        return 0
-    if f.startswith('/tmp/'):
-        return 0
-    if f.startswith('/dev/shm/'):
-        return 0
-    if f.startswith('/run/'):
-        return 0
-    if f.startswith('/drm'):
-        return 0
-    if f.startswith('/var/tmp/') or f.startswith('/var/local/tmp/'):
-        return 0
-    if f.startswith('/dev/zero'):
-        return 0
-    if f.startswith('/dev/pts/'):
-        return 0
-    if f.startswith('/usr/lib/locale/'):
-        return 0
-    # Skip files from the user's home directories
-    # many processes hold temporary files there 
-    if f.startswith('/home/'):
-        return 0
-    # Skip automatically generated files
-    if f.endswith('icon-theme.cache'):
-        return 0
-    if f.startswith('/var/cache/fontconfig/'):
-        return 0
-    if f.startswith('/var/lib/nagios3/spool/'):
-        return 0
-    if f.startswith('/var/lib/nagios3/spool/checkresults/'):
-       return 0
-    if f.startswith('/var/lib/postgresql/'):
-        return 0
-    # Skip Aio files found in MySQL servers
-    if f.startswith('/[aio]'):
-        return 0
-
-    # TODO: it should only care about library files (i.e. /lib, /usr/lib and 
the like)
-    # build that check with a regexp to exclude others
-    if f.endswith(' (deleted)'):
-        return 1
-    if re.compile("\(path inode=[0-9]+\)$").search(f):
-        return 1
-    # Default: it is a deleted file we are interested in
-    return 1
-
-class Package:
-    def __init__(self, name):
-        self.name = name
-        # use a set, we don't need duplicates
-        self.initscripts = set()
-        self.systemdservice = set()
-        self.processes = []
-
-class Process:
-    def __init__(self, pid):
-        self.pid = pid
-        self.files = []
-        self.descriptors = []
-        self.links = []
-        self.program = ''
-
-        try:
-            self.program = os.readlink('/proc/%d/exe' % self.pid)
-            # if the executable command is an interpreter such as 
perl/python/ruby/tclsh,
-            # we want to find the real program
-            m = re.match("^/usr/bin/(perl|python|ruby|tclsh)", self.program)
-            if m:
-                with open('/proc/%d/cmdline' % self.pid, 'r') as cmdline:
-                    # only match program in /usr (ex.: /usr/sbin/smokeping)
-                    # ignore child, etc.
-                    #m = re.search(r'^(([/]\w*){1,5})\s.*$', cmdline.read())
-                    # Split by null-bytes, see proc(5)
-                    data = cmdline.read().split('\x00')
-                    # Last character should be null-byte, too, see proc(5)
-                    if not data[-1]: data.pop()
-                    # Spamd sets $0 wrongly, see
-                    # https://bugzilla.redhat.com/show_bug.cgi?id=755644
-                    # i.e. the blank after spamd is relevant in case
-                    # this will be fixed in the future.
-                    m = re.match("^/usr/sbin/spamd |^spamd ", data[0])
-                    if m:
-                        self.program = "/usr/sbin/spamd"
-                    else:
-                        # Strip first value, the interpreter
-                        data.pop(0)
-                        # Check if something's left after the interpreter, see 
#715000
-                        if data:
-                            # Strip all options following the interpreter, 
e.g. python's -O
-                            m = re.match("^-", data[0])
-                            while (m):
-                                data.pop(0)
-                                if not data: break
-                                m = re.match("^-", data[0])
-                            if data and data[0]:
-                                data = self.which(data[0])
-                                m = re.search(r'^(/usr/\S+)$', data)
-                                if m:
-                                    # store the real full path of script as 
the program
-                                    self.program = m.group(1)
-        except OSError, e:
-            if e.errno != errno.ENOENT:
-                if self.pid == 1:
-                    sys.stderr.write("Found unreadable pid 1. Assuming we're 
under vserver and continuing.\n")
-                else:
-                    sys.stderr.write('ERROR: Failed to read %d' % self.pid)
-                    raise
-        self.program = self.cleanFile(self.program)
-
-    def which(self, program):
-        if os.path.isabs(program):
-            return program
-        path = os.environ.get("PATH", os.defpath).split(os.pathsep)
-        seen = set()
-        for dir in path:
-            dir = os.path.normcase(os.path.abspath(dir))
-            if not dir in seen:
-                seen.add(dir)
-                name = os.path.join(dir, program)
-                if os.path.exists(name) and os.access(name, os.F_OK|os.X_OK) 
and not os.path.isdir(name):
-                    return name
-        return program
-
-    def cleanFile(self, f):
-        # /proc/pid/exe has all kinds of junk in it sometimes
-        null = f.find('\0')
-        if null != -1:
-            f = f[:null]
-        # Support symlinked /usr
-        if f.startswith('/usr'):
-            statinfo = os.lstat('/usr')[ST_MODE]
-            # If /usr is a symlink then find where it points to
-            if S_ISLNK(statinfo): 
-                newusr = os.readlink('/usr')
-                if not newusr.startswith('/'):
-                    # If the symlink is relative, make it absolute
-                    newusr = os.path.join(os.path.dirname('/usr'), newusr)
-                f = re.sub('^/usr',newusr, f)
-                # print "Changing usr to " + newusr + " result:" +f; # 
Debugging
-        return re.sub('( \(deleted\)|.dpkg-new).*$','',f)
-
-    # Check if a process needs to be restarted, previously we would
-    # just check if it used libraries named '.dpkg-new' since that's
-    # what dpkg would do. Now we need to be more contrieved.
-    # Returns:
-    #  - 0 if there is no need to restart the process
-    #  - 1 if the process needs to be restarted
-    def needsRestart(self, blacklist = None):
-        for f in self.files:
-            if isdeletedFile(f, blacklist):
-                return 1
-        for f in self.links:
-            if f == 0:
-                return 1
-        return 0
-
-class Checkrestart:
-    def __init__(self):
-        if os.getuid() != 0:
-            sys.stderr.write('ERROR: This program must be run as root in order 
to obtain information\n')
-            sys.stderr.write('about all open file descriptors in the 
system.\n')
-            sys.exit(1)
-
-        process = None
-        toRestart = {}
-
-        lc_all_c_env = os.environ
-        lc_all_c_env['LC_ALL'] = 'C'
-        blacklistFiles = []
-        blacklist = []
-        ignorelist = [ 'screen', 'systemd' ]
-
-        for f in blacklistFiles:
-            for line in file(f, "r"):
-                if line.startswith("#"):
-                    continue
-                blacklist.append(re.compile(line.strip()))
-
-        toRestart = self.lsoffilescheck(blacklist = blacklist)
-
-        self.programs = {}
-        for process in toRestart:
-            self.programs.setdefault(process.program, [])
-            self.programs[process.program].append(process)
-
-        self.packages = {}
-        diverted = None
-
-        dpkgQuery = ["dpkg-query", "--search"] + self.programs.keys()
-        dpkgProc = subprocess.Popen(dpkgQuery, shell=False, 
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env = lc_all_c_env)
-
-        while True:
-            line = dpkgProc.stdout.readline()
-            if not line:
-                break
-            if line.startswith('local diversion'):
-                continue
-            if not ':' in line:
-                continue
-
-            m = re.match('^diversion by (\S+) (from|to): (.*)$', line)
-            if m:
-                if m.group(2) == 'from':
-                    diverted = m.group(3)
-                    continue
-                if not diverted:
-                    raise Exception('Weird error while handling diversion')
-                packagename, program = m.group(1), diverted
-            else:
-                packagename, program = line[:-1].split(': ')
-                if program == diverted:
-                    # dpkg prints a summary line after the diversion, name both
-                    # packages of the diversion, so ignore this line
-                    # mutt-patched, mutt: /usr/bin/mutt
-                    continue
-            self.packages.setdefault(packagename,Package(packagename))
-            try:
-                 
self.packages[packagename].processes.extend(self.programs[program])
-            except KeyError:
-                  sys.stderr.write ('checkrestart (program not found): %s: 
%s\n' % (packagename, program))
-            sys.stdout.flush()
-
-        dpkgProc.stdout.close()
-
-        # Remove the ignored packages from the list of packages
-        if ignorelist:
-            for i in ignorelist:
-                if i in self.packages:
-                    try:
-                        del self.packages[i]
-                    except KeyError:
-                        continue
-
-    def get_packages_to_restart(self):
-        return set(self.packages.keys())
-
-    def get_programs_to_restart(self):
-        return set(self.programs.keys())
-
-    def lsoffilescheck(self, blacklist = None):
-        processes = {}
-
-        for line in os.popen('lsof +XL -F nf').readlines():
-            field, data = line[0], line[1:-1]
-
-            if field == 'p':
-                process = processes.setdefault(data,Process(int(data)))
-            elif field == 'k':
-                process.links.append(data)
-            elif field == 'n':
-                # Remove the previous entry to check if this is something we 
should use
-                if data.find('SYSV') >= 0:
-                    # If we find SYSV we discard the previous descriptor
-                    last = process.descriptors.pop()
-                elif data.startswith('/') or data.startswith('(deleted)/') or 
data.startswith(' (deleted)/'):
-                    last = process.descriptors.pop()
-
-                    # If the data starts with (deleted) put it in the end of 
the
-                    # file name, this is used to workaround different 
behaviour in
-                    # OpenVZ systems, see
-                    # https://bugzilla.openvz.org/show_bug.cgi?id=2932
-                    if data.startswith('(deleted)'):
-                        data = data[9:] + ' (deleted)'
-                    elif data.startswith(' (deleted)'):
-                        data = data[10:] + ' (deleted)'
-
-                    # Add it to the list of deleted files if the previous 
descriptor
-                    # was DEL or lsof marks it as deleted
-                    if re.compile("DEL").search(last) or 
re.compile("\(deleted\)").search(data) or re.compile("\(path 
inode=[0-9]+\)$").search(data):
-                        process.files.append(data)
-                else:
-                    # We discard the previous descriptors and drop it
-                    last = process.descriptors.pop()
-            elif field == 'f':
-                # Save the descriptor for later comparison
-                process.descriptors.append(data)
-
-        toRestart = filter(lambda process: process.needsRestart(blacklist),
-                       processes.values())
-        return toRestart
diff --git a/tests/test-jobdb b/tests/test-jobdb
deleted file mode 100755
index d2d4b36..0000000
--- a/tests/test-jobdb
+++ /dev/null
@@ -1,76 +0,0 @@
-#! /usr/bin/python
-# -*- coding: utf-8 -*-
-
-import sqlite3, os, unittest
-from debdeploy_joblog import *
-
-class TestDeployJob(unittest.TestCase):
-
-    if not os.path.exists("testrun"):
-        os.mkdir("testrun")
-
-    testdb = os.path.join("testrun/", "tests-jobs.sqlite")
-    if os.path.exists(testdb):
-        os.remove(testdb)
-
-    joblogdb = DebDeployJobLog(testdb)
-
-    yaml1 = '''
-source: elinks
-comment: CVE-2015-0123
-fixes:
-        jessie: 0.12~pre6-7
-        trusty: 2.0-1
-        precise:
-        '''
-
-    yamlfile = os.path.join("job1.yaml")
-    if not os.path.exists(yamlfile):
-        with open(yamlfile, "w") as f:
-            f.write(yaml1)
-
-    def testJobs(self):
-
-        self.assertEqual(self.joblogdb.does_job_exist(self.yamlfile, 
"testsystem"), False)
-        self.assertEqual(self.joblogdb.does_job_exist("fake.yaml", 
"testsystem"), False)
-        self.assertEqual(self.joblogdb.does_job_exist(self.yamlfile, 
"tesstsystem"), False)
-
-        self.joblogdb.add_job(self.yamlfile, "testsystem", 
"20150519122347247890")
-
-        self.assertEqual(self.joblogdb.does_job_exist(self.yamlfile, 
"testsystem"), True)
-        self.assertEqual(self.joblogdb.does_job_exist("fake.yaml", 
"testsystem"), False)
-        self.assertEqual(self.joblogdb.does_job_exist(self.yamlfile, 
"tesstsystem"), False)
-
-        self.assertEqual(self.joblogdb.has_been_rolled_back(self.yamlfile, 
"testsystem"), False)
-        self.assertEqual(self.joblogdb.has_been_rolled_back("fake.yaml", 
"testsystem"), False)
-        self.assertEqual(self.joblogdb.has_been_rolled_back(self.yamlfile, 
"tesstsystem"), False)
-
-        self.assertEqual(self.joblogdb.get_jobid(self.yamlfile, "testsystem"), 
"20150519122347247890")
-        self.assertEqual(self.joblogdb.get_jobid("fake.yaml", "testsystem"), 
None)
-        self.assertEqual(self.joblogdb.get_jobid(self.yamlfile, 
"tesstsystem"), None)
-
-        self.assertEqual(self.joblogdb.get_rollbackid(self.yamlfile, 
"testsystem"), "")
-        self.assertEqual(self.joblogdb.get_rollbackid("fake.yaml", 
"testsystem"), None)
-        self.assertEqual(self.joblogdb.get_rollbackid(self.yamlfile, 
"tesstsystem"), None)
-
-        self.assertRaises(ValueError, self.joblogdb.mark_as_rolled_back, 
"10150519122347247890", "20150519122347248000")
-        self.joblogdb.mark_as_rolled_back("20150519122347247890", 
"20150519122347248000")
-
-        self.assertEqual(self.joblogdb.has_been_rolled_back(self.yamlfile, 
"testsystem"), True)
-        self.assertEqual(self.joblogdb.get_rollbackid(self.yamlfile, 
"testsystem"), "20150519122347248000")
-
-        self.joblogdb.add_job(self.yamlfile, "testsystem2", 
"20150519122347247890")
-        self.joblogdb.add_job(self.yamlfile, "testsystem3", 
"20150519122347247891")
-        self.joblogdb.add_job(self.yamlfile, "testsystem4", 
"20150519122347247892")
-        self.joblogdb.add_job(self.yamlfile, "testsystem5", 
"20150519122347247893")
-
-        self.assertEqual(len(self.joblogdb.get_jobs()), 5)
-        self.assertEqual(len(self.joblogdb.get_jobs(2)), 2)
-        self.assertEqual(self.joblogdb.get_jobs(1)[0][1], "testsystem5")
-
-if __name__ == '__main__':
-    unittest.main()
-
-# Local variables:
-# mode: python
-# End:
diff --git a/tests/test-pkgdb b/tests/test-pkgdb
deleted file mode 100755
index 8bd686b..0000000
--- a/tests/test-pkgdb
+++ /dev/null
@@ -1,112 +0,0 @@
-#! /usr/bin/python
-# -*- coding: utf-8 -*-
-
-import sqlite3, os, unittest
-
-#from debdeploy import *
-from debdeploy_pkgdb import *
-
-class TestDeployPkgdb(unittest.TestCase):
-
-    pkg = DebDeployPkgDB
-
-    def reinit_db(self, name):
-        if os.path.exists(name):
-            os.remove(name)
-        self.pkg = DebDeployPkgDB(name)
-
-    def testHosts(self):
-        self.reinit_db("tests-host.sqlite")
-        self.assertEqual(self.pkg.get_host_id("host1"), 0)
-        self.assertEqual(self.pkg.add_host("host1"), 1)
-        self.assertEqual(self.pkg.get_host_id("host1"), 1)
-        self.assertEqual(self.pkg.add_host("host1"), None)
-        self.assertEqual(self.pkg.does_host_exist("host2"), False)
-        self.assertEqual(self.pkg.add_host("host2"), 2)
-        self.assertEqual(self.pkg.does_host_exist("host2"), True)
-        self.pkg.remove_host("host2")
-        self.assertEqual(self.pkg.does_host_exist("host2"), False)
-
-    def testPackages(self):
-        self.reinit_db("tests-pkgs.sqlite")
-        self.assertEqual(self.pkg.add_packages(["elinks"]), set(["elinks"]))
-        self.assertEqual(self.pkg.get_package_id("elinks"), 1)
-        self.assertEqual(self.pkg.get_packages(), set(["elinks"]))
-        self.assertEqual(self.pkg.get_package_id("notexisting"), 0)
-        self.assertEqual(self.pkg.does_package_exist("elinks-data"), False)
-        self.assertEqual(self.pkg.add_packages(["elinks-data", "elinks"]), 
set(["elinks-data"]))
-        self.assertEqual(self.pkg.does_package_exist("elinks-data"), True)
-        self.assertEqual(self.pkg.get_packages(), set(["elinks", 
"elinks-data"]))
-        self.pkg.remove_package("elinks-data")
-        self.assertEqual(self.pkg.does_package_exist("elinks-data"), False)
-
-    def testPackageVersions(self):
-        self.reinit_db("tests-pkgvers.sqlite")
-        self.assertEqual(self.pkg.add_packages(["elinks"]), set(["elinks"]))
-        self.assertEqual(self.pkg.does_packageversion_exist("elinks", 
"0.11-4"), False)
-        pid = self.pkg.get_package_id("elinks")
-        self.pkg.add_packageversions([(pid, "0.11-4",)])
-        pvid = self.pkg.get_packageversion_id(pid, "0.11-4")
-        self.assertEqual(pvid, 1)
-        self.assertEqual(self.pkg.does_packageversion_exist("elinks", 
"0.11-4"), True)
-        self.assertEqual(self.pkg.get_packageversion_id(pid, "0.11-4"), 1)
-        self.assertEqual(self.pkg.get_packageversion_id(pid, 
"0.11-4-invalid"), 0)
-        self.pkg.add_packageversions([(pid, "0.11-4",)])
-        self.pkg.add_packageversions([(pid, "0.11-5",)])
-        self.pkg.remove_packageversion("elinks", "0.11-4")
-        self.assertEqual(self.pkg.does_packageversion_exist("elinks", 
"0.11-4"), False)
-        self.assertEqual(self.pkg.does_packageversion_exist("elinks", 
"0.11-5"), True)
-
-    def testPackageHosts(self):
-        self.reinit_db("tests-pkginsts.sqlite")
-        self.assertEqual(self.pkg.does_host_exist("host1"), False)
-#        inst = []
-#        inst.append(("elinks", "0.11-4",))
-#        self.assertRaises(ValueError, self.pkg.add_installed_packages, 
"host1", inst)
-#        self.assertEqual(self.pkg.does_host_exist("host1"), False)
-
-        inst = {}
-        inst["elinks"] = "0.11-4"
-        inst["elinks-data"] = "0.11-4"
-        inst["wget"] = "1.5-1"
-        self.pkg.add_installed_packages("host1", inst)
-        self.assertEqual(self.pkg.get_installed_version("elinks", "host1"), 
"0.11-4")
-        self.assertEqual(self.pkg.does_host_exist("host1"), True)
-
-        self.assertEqual(self.pkg.get_outdated_packages("elinks", "0.11-3"), 
[])
-        self.assertEqual(self.pkg.get_outdated_packages("elinks", "0.11-4"), 
[])
-        self.assertEqual(self.pkg.get_outdated_packages("elinks", "0.11-5"), 
["host1"])
-        self.assertEqual(self.pkg.get_outdated_packages("elinks", "1:0.11-4"), 
["host1"])
-
-        self.pkg.add_host("host2")
-        self.pkg.add_host("host3")
-        inst = {}
-        inst["elinks"] = "0.11-4"
-        self.assertEqual(self.pkg.get_servers_with_package("elinks"), 
["host1"])
-        self.pkg.add_installed_packages("host2", inst)
-
-        self.assertEqual(self.pkg.get_outdated_packages("elinks", "0.11-4"), 
[])
-        self.assertEqual(self.pkg.get_outdated_packages("elinks", "0.11-5"), 
["host1", "host2"])
-
-        self.assertEqual(self.pkg.get_servers_with_package("elinks"), 
["host1", "host2"])
-        
self.assertEqual(self.pkg.get_servers_with_package("elinks-non-exist"), [])
-        self.assertNotEqual(self.pkg.get_last_modification("elinks", "host1"), 
None)
-        self.assertEqual(self.pkg.get_last_modification("elinks", "host3"), 
None)
-
-        # If a package is added for which an entry already exists, the old 
version is pruned
-        inst = {}
-        inst["elinks"] = "0.11-6"
-        self.pkg.add_installed_packages("host1", inst)
-        self.assertEqual(self.pkg.get_installed_version("elinks", "host1"), 
"0.11-6")
-
-        remove = {}
-        remove["elinks"] = "0.11-6"
-        self.pkg.remove_installed_packages("host1", remove)
-        self.assertEqual(self.pkg.get_installed_version("elinks", "host1"), 
None)
-
-if __name__ == '__main__':
-    unittest.main()
-
-# Local variables:
-# mode: python
-# End:

-- 
To view, visit https://gerrit.wikimedia.org/r/377239
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ibe8d94b4ed14e8498d43125dd7fe7e7a8bce4180
Gerrit-PatchSet: 1
Gerrit-Project: operations/debs/debdeploy
Gerrit-Branch: master
Gerrit-Owner: Muehlenhoff <[email protected]>

_______________________________________________
MediaWiki-commits mailing list
[email protected]
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to