Dear all,

I am no python developer (basically no developer at all) but i struggled with 
the backup for a while but finally i have a script almost OK for the backup.

Due to the fact that the “snapshot -> export” feature is coming as it seems in 
3.6 we still need to clone and then export.

This script is threaded to do backups of 3 Vms concurrently, however, in our 
setup ( compute server with 3 separate gluster machines) parallel backups of 3 
VMS create very heavy load on the gluster storage servers, and i would like to 
do the “clone snapshot” to a separate intermediate storage already, can anybody 
help me with that ? (somewhere around line 70)
The cloning can hopefully go away in September with 3.6 but until then we still 
need that.

Any other improvements are more than welcome – please keep in mind i am not a 
developer and I only tried my very best to write a decent script.

Also: do we have a place (git repo or so) where we can put this script for 
everybody ?

I am pretty sure there is more than one person out there looking for something 
like that (at least i hope :-) )

Regards
Soeren

#!/usr/bin/python

import Queue
import threading
import time
from ovirtsdk.api import API
from ovirtsdk.xml import params
import sys
import datetime
import smtplib
from email.mime.text import MIMEText


global SNAPSHOT_NAME

VERSION             = params.Version(major='3', minor='0')
ENGINE_SERVER       = ''
ENGINE_USER         = 'admin@internal'
ENGINE_PASSWORD     = ''
ENGINE_CERT         = ''
NOW                 = datetime.datetime.now()
SNAPSHOT_NAME       = 'BACKUP_' + NOW.strftime("%Y-%m-%d-%H%M")
DAY_OF_WEEK         = NOW.strftime("%w")
BACKUP              = "FULL"

exitFlag = 0

class myThread (threading.Thread):
    def __init__(self, threadID, name, q):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.q = q
        self.api = api
        global message
    def run(self):
        print "Starting " + self.name
        process_data(self.name, self.q)
        print "Exiting " + self.name

def process_data(threadName, q):
    while not exitFlag:
        queueLock.acquire()
        if not workQueue.empty():
            data = q.get()
            queueLock.release()
            print "%s processing %s" % (threadName, data.name)
            vm = api.vms.get(name=data.name)
            vmname = data.name +"_"
            newvmname = vmname + SNAPSHOT_NAME
            cluster = api.clusters.get(id=vm.cluster.id)
            dc = api.datacenters.get(id=cluster.data_center.id)
            export = None
            for sd in dc.storagedomains.list():
                if sd.type_ == "export":
                    export = sd
            if not export:
                print("Export domain required, and none found, exitting...\n")
                sys.exit(1)

            if vm.name != "HostedEngine":
                vm.snapshots.add(params.Snapshot(description=SNAPSHOT_NAME, vm=vm ))
                snap = vm.snapshots.list(description=SNAPSHOT_NAME)[0]
                while vm.snapshots.get(id=snap.id).snapshot_status == "locked":
                    print("%s Waiting for snapshot of %s to finish") % (threadName, vm.name)
                    time.sleep(10)
                print("%s Snapshotting %s is done") % (threadName,vm.name)
                try:
                    snapshots = params.Snapshots(snapshot=[params.Snapshot(id=snap.id)])
                    api.vms.add(params.VM(name=newvmname, snapshots=snapshots, cluster=cluster, template=api.templates.get(name="Blank")))
                    while api.vms.get(name=newvmname).status.state == "image_locked":
                        print("%s Waiting for clone of %s to finish") % (threadName, vm.name)
                        time.sleep(60)
                    print("%s Cloning of %s  done") % (threadName, vm.name)
                    api.vms.get(name=newvmname).export(params.Action(storage_domain=export))
                    while api.vms.get(name=newvmname).status.state == "image_locked":
                        print("%s Waiting for export of %s finish") % (threadName, vm.name)
                        time.sleep(60)
                    print("%s Exporting %s done") % (threadName, vm.name)
                    api.vms.get(name=newvmname).delete()
                except Exception as e:
                    print ("Something went wrong with the coling or exporting\n%s") % str(e)
                snapshotlist = vm.snapshots.list()
                for snapshot in snapshotlist:
                    if snapshot.description != "Active VM":
                        snapshot.delete()
                        time.sleep(3)
                        try:
                            while api.vms.get(name=vm.name).snapshots.get(id=snapshot.id).snapshot_status == "locked":
                                print("%s Waiting for snapshot %s on %s deletion to finish") % (threadName, snapshot.name, vm.name)
                                time.sleep(10)
                        except Exception as e:
                            print ("%s Snapshot status request might have failed, this usually means that the snpashot was deleted properly") % threadName
                        print("%s Deleting snapshot %s on %s done") % (threadName, snapshot.name, vm.name)

        else:
            queueLock.release()
        time.sleep(1)

threadList = ["Backup-Thread-1", "Backup-Thread-2", "Backup-Thread-3"]

def Connect():
    global api
    api = API(url=ENGINE_SERVER, username=ENGINE_USER, password=ENGINE_PASSWORD, ca_file=ENGINE_CERT)

def Disconnect(exitcode):
    api.disconnect()
    sys.exit(exitcode)

try:
    Connect()
    vms = api.vms.list()

except Exception as e:
    print 'Failed:\n%s' % str(e)

nameList = vms 
queueLock = threading.Lock()
workQueue = Queue.Queue(0)
threads = []
threadID = 1

# Create new threads
for tName in threadList:
    thread = myThread(threadID, tName, workQueue)
    thread.start()
    threads.append(thread)
    threadID += 1

# Fill the queue
queueLock.acquire()
for word in nameList:
    workQueue.put(word)
queueLock.release()

# Wait for queue to empty
while not workQueue.empty():
    pass

# Notify threads it's time to exit
exitFlag = 1

# Wait for all threads to complete
for t in threads:
    t.join()
print "Exiting Main Thread"
api.disconnect()
_______________________________________________
Users mailing list
Users@ovirt.org
http://lists.ovirt.org/mailman/listinfo/users

Reply via email to