http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/storage/drivers.py
----------------------------------------------------------------------
diff --git a/aria/storage/drivers.py b/aria/storage/drivers.py
new file mode 100644
index 0000000..17a99a1
--- /dev/null
+++ b/aria/storage/drivers.py
@@ -0,0 +1,407 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Aria's storage.drivers module
+Path: aria.storage.driver
+
+drivers module holds a generic abstract implementation of drivers.
+
+classes:
+    * Driver - abstract storage driver implementation.
+    * ModelDriver - abstract model base storage driver.
+    * ResourceDriver - abstract resource base storage driver.
+    * FileSystemModelDriver - file system implementation for model storage 
driver.
+    * FileSystemResourceDriver - file system implementation for resource 
storage driver.
+"""
+
+import os
+import shutil
+import distutils.dir_util
+from functools import partial
+from multiprocessing import RLock
+
+import jsonpickle
+
+from ..exceptions import StorageError
+from ..logger import LoggerMixin
+
+
+__all__ = (
+    'ModelDriver',
+    'FileSystemModelDriver',
+    'ResourceDriver',
+    'FileSystemResourceDriver',
+)
+
+
+class Driver(LoggerMixin):
+    """
+    Driver: storage driver context manager - abstract driver implementation.
+    In the implementation level, It is a good practice to raise StorageError 
on Errors.
+    """
+
+    def __enter__(self):
+        """
+        Context manager entry method, executes connect.
+        :return: context manager instance
+        :rtype: Driver
+        """
+        self.connect()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        """
+        Context manager exit method, executes disconnect.
+        """
+        self.disconnect()
+        if not exc_type:
+            return
+        # self.logger.debug(
+        #     '{name} had an error'.format(name=self.__class__.__name__),
+        #     exc_info=(exc_type, exc_val, exc_tb))
+        if StorageError in exc_type.mro():
+            return
+        raise StorageError('Exception had occurred, {type}: {message}'.format(
+            type=exc_type, message=str(exc_val)))
+
+    def connect(self):
+        """
+        Open storage connection.
+        In some cases, This method can get the connection from a connection 
pool.
+        """
+        pass
+
+    def disconnect(self):
+        """
+        Close storage connection.
+        In some cases, This method can release the connection to the 
connection pool.
+        """
+        pass
+
+    def create(self, name, *args, **kwargs):
+        """
+        Create table/document in storage by name.
+        :param str name: name of table/document in storage.
+        """
+        pass
+
+
+class ModelDriver(Driver):
+    """
+    ModelDriver context manager.
+    Base Driver for Model based storage.
+    """
+
+    def get(self, name, entry_id, **kwargs):
+        """
+        Getter from storage.
+        :param str name: name of table/document in storage.
+        :param str entry_id: id of the document to get from storage.
+        :return: value of entity from the storage.
+        """
+        raise NotImplementedError('Subclass must implement abstract get 
method')
+
+    def delete(self, name, entry_id, **kwargs):
+        """
+        Delete from storage.
+        :param str name: name of table/document in storage.
+        :param str entry_id: id of the entity to delete from storage.
+        :param dict kwargs: extra kwargs if needed.
+        """
+        raise NotImplementedError('Subclass must implement abstract delete 
method')
+
+    def store(self, name, entry_id, entry, **kwargs):
+        """
+        Setter to storage.
+        :param str name: name of table/document in storage.
+        :param str entry_id: id of the entity to store in the storage.
+        :param dict entry: content to store.
+        """
+        raise NotImplementedError('Subclass must implement abstract store 
method')
+
+    def iter(self, name, **kwargs):
+        """
+        Generator over the entries of table/document in storage.
+        :param str name: name of table/document/file in storage to iter over.
+        """
+        raise NotImplementedError('Subclass must implement abstract iter 
method')
+
+    def update(self, name, entry_id, **kwargs):
+        """
+        Updates and entry in storage.
+
+        :param str name: name of table/document in storage.
+        :param str entry_id: id of the document to get from storage.
+        :param kwargs: the fields to update.
+        :return:
+        """
+        raise NotImplementedError('Subclass must implement abstract store 
method')
+
+
+class ResourceDriver(Driver):
+    """
+    ResourceDriver context manager.
+    Base Driver for Resource based storage.
+
+    Resource storage structure is a file system base.
+    <resource root directory>/<resource_name>/<entry_id>/<entry>
+    entry: can be one single file or multiple files and directories.
+    """
+
+    def data(self, entry_type, entry_id, path=None, **kwargs):
+        """
+        Get the binary data from a file in a resource entry.
+        If the entry is a single file no path needed,
+        If the entry contain number of files the path will gide to the 
relevant file.
+
+        resource path:
+            <resource root directory>/<name>/<entry_id>/<path>
+
+        :param basestring entry_type: resource name.
+        :param basestring entry_id: id of the entity to resource in the 
storage.
+        :param basestring path: path to resource relative to entry_id folder 
in the storage.
+        :return: entry file object.
+        :rtype: bytes
+        """
+        raise NotImplementedError('Subclass must implement abstract get 
method')
+
+    def download(self, entry_type, entry_id, destination, path=None, **kwargs):
+        """
+        Download the resource to a destination.
+        Like data method bat this method isn't returning data,
+        Instead it create a new file in local file system.
+
+        resource path:
+            <resource root directory>/<name>/<entry_id>/<path>
+        copy to:
+            /<destination>
+        destination can be file or directory
+
+        :param basestring entry_type: resource name.
+        :param basestring entry_id: id of the entity to resource in the 
storage.
+        :param basestring destination: path in local file system to download 
to.
+        :param basestring path: path to resource relative to entry_id folder 
in the storage.
+        """
+        raise NotImplementedError('Subclass must implement abstract get 
method')
+
+    def upload(self, entry_type, entry_id, source, path=None, **kwargs):
+        """
+        Upload the resource from source.
+        source can be file or directory with files.
+
+        copy from:
+            /<source>
+        to resource path:
+            <resource root directory>/<name>/<entry_id>/<path>
+
+        :param basestring entry_type: resource name.
+        :param basestring entry_id: id of the entity to resource in the 
storage.
+        :param basestring source: source can be file or directory with files.
+        :param basestring path: path to resource relative to entry_id folder 
in the storage.
+        """
+        raise NotImplementedError('Subclass must implement abstract get 
method')
+
+
+class BaseFileSystemDriver(Driver):
+    def __init__(self, *args, **kwargs):
+        super(BaseFileSystemDriver, self).__init__(*args, **kwargs)
+        self._lock = RLock()
+
+    def connect(self):
+        self._lock.acquire()
+
+    def disconnect(self):
+        self._lock.release()
+
+    def __getstate__(self):
+        obj_dict = super(BaseFileSystemDriver, self).__getstate__()
+        del obj_dict['_lock']
+        return obj_dict
+
+    def __setstate__(self, obj_dict):
+        super(BaseFileSystemDriver, self).__setstate__(obj_dict)
+        vars(self).update(_lock=RLock(), **obj_dict)
+
+
+class FileSystemModelDriver(ModelDriver, BaseFileSystemDriver):
+    """
+    FileSystemModelDriver context manager.
+    """
+
+    def __init__(self, directory, **kwargs):
+        """
+        File system implementation for storage driver.
+        :param str directory: root dir for storage.
+        """
+        super(FileSystemModelDriver, self).__init__(**kwargs)
+        self.directory = directory
+
+        self._join_path = partial(os.path.join, self.directory)
+
+    def __repr__(self):
+        return '{cls.__name__}(directory={self.directory})'.format(
+            cls=self.__class__, self=self)
+
+    def create(self, name):
+        """
+        Create directory in storage by path.
+        tries to create the root directory as well.
+        :param str name: path of file in storage.
+        """
+        try:
+            os.makedirs(self.directory)
+        except (OSError, IOError):
+            pass
+        os.makedirs(self._join_path(name))
+
+    def get(self, name, entry_id, **kwargs):
+        """
+        Getter from storage.
+        :param str name: name of directory in storage.
+        :param str entry_id: id of the file to get from storage.
+        :return: value of file from storage.
+        :rtype: dict
+        """
+        with open(self._join_path(name, entry_id)) as file_obj:
+            return jsonpickle.loads(file_obj.read())
+
+    def store(self, name, entry_id, entry, **kwargs):
+        """
+        Delete from storage.
+        :param str name: name of directory in storage.
+        :param str entry_id: id of the file to delete from storage.
+        """
+        with open(self._join_path(name, entry_id), 'w') as file_obj:
+            file_obj.write(jsonpickle.dumps(entry))
+
+    def delete(self, name, entry_id, **kwargs):
+        """
+        Delete from storage.
+        :param str name: name of directory in storage.
+        :param str entry_id: id of the file to delete from storage.
+        """
+        os.remove(self._join_path(name, entry_id))
+
+    def iter(self, name, filters=None, **kwargs):
+        """
+        Generator over the entries of directory in storage.
+        :param str name: name of directory in storage to iter over.
+        :param dict filters: filters for query
+        """
+        filters = filters or {}
+
+        for entry_id in os.listdir(self._join_path(name)):
+            value = self.get(name, entry_id=entry_id)
+            for filter_name, filter_value in filters.items():
+                if value.get(filter_name) != filter_value:
+                    break
+            else:
+                yield value
+
+    def update(self, name, entry_id, **kwargs):
+        """
+        Updates and entry in storage.
+
+        :param str name: name of table/document in storage.
+        :param str entry_id: id of the document to get from storage.
+        :param kwargs: the fields to update.
+        :return:
+        """
+        entry_dict = self.get(name, entry_id)
+        entry_dict.update(**kwargs)
+        self.store(name, entry_id, entry_dict)
+
+class FileSystemResourceDriver(ResourceDriver, BaseFileSystemDriver):
+    """
+    FileSystemResourceDriver context manager.
+    """
+
+    def __init__(self, directory, **kwargs):
+        """
+        File system implementation for storage driver.
+        :param str directory: root dir for storage.
+        """
+        super(FileSystemResourceDriver, self).__init__(**kwargs)
+        self.directory = directory
+        self._join_path = partial(os.path.join, self.directory)
+
+    def __repr__(self):
+        return '{cls.__name__}(directory={self.directory})'.format(
+            cls=self.__class__, self=self)
+
+    def create(self, name):
+        """
+        Create directory in storage by path.
+        tries to create the root directory as well.
+        :param basestring name: path of file in storage.
+        """
+        try:
+            os.makedirs(self.directory)
+        except (OSError, IOError):
+            pass
+        os.makedirs(self._join_path(name))
+
+    def data(self, entry_type, entry_id, path=None):
+        """
+        Retrieve the content of a file system storage resource.
+
+        :param basestring entry_type: the type of the entry.
+        :param basestring entry_id: the id of the entry.
+        :param basestring path: a path to a specific resource.
+        :return: the content of the file
+        :rtype: bytes
+        """
+        resource = os.path.join(self.directory, entry_type, entry_id, path or 
'')
+        if not os.path.isfile(resource):
+            resources = os.listdir(resource)
+            if len(resources) != 1:
+                raise StorageError('No resource in path: {0}'.format(resource))
+            resource = os.path.join(resource, resources[0])
+        with open(resource, 'rb') as resource_file:
+            return resource_file.read()
+
+    def download(self, entry_type, entry_id, destination, path=None):
+        """
+        Download a specific file or dir from the file system resource storage.
+
+        :param basestring entry_type: the name of the entry.
+        :param basestring entry_id: the id of the entry
+        :param basestring destination: the destination of the files.
+        :param basestring path: a path on the remote machine relative to the 
root of the entry.
+        """
+        resource = os.path.join(self.directory, entry_type, entry_id, path or 
'')
+        if os.path.isfile(resource):
+            shutil.copy2(resource, destination)
+        else:
+            distutils.dir_util.copy_tree(resource, destination)
+
+    def upload(self, entry_type, entry_id, source, path=None):
+        """
+        Uploads a specific file or dir to the file system resource storage.
+
+        :param basestring entry_type: the name of the entry.
+        :param basestring entry_id: the id of the entry
+        :param source: the source of  the files to upload.
+        :param path: the destination of the file/s relative to the entry root 
dir.
+        """
+        resource_directory = os.path.join(self.directory, entry_type, entry_id)
+        if not os.path.exists(resource_directory):
+            os.makedirs(resource_directory)
+        destination = os.path.join(resource_directory, path or '')
+        if os.path.isfile(source):
+            shutil.copy2(source, destination)
+        else:
+            distutils.dir_util.copy_tree(source, destination)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/storage/models.py
----------------------------------------------------------------------
diff --git a/aria/storage/models.py b/aria/storage/models.py
new file mode 100644
index 0000000..d3cb3f7
--- /dev/null
+++ b/aria/storage/models.py
@@ -0,0 +1,344 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Aria's storage.models module
+Path: aria.storage.models
+
+models module holds aria's models.
+
+classes:
+    * Field - represents a single field.
+    * IterField - represents an iterable field.
+    * Model - abstract model implementation.
+    * Snapshot - snapshots implementation model.
+    * Deployment - deployment implementation model.
+    * DeploymentUpdateStep - deployment update step implementation model.
+    * DeploymentUpdate - deployment update implementation model.
+    * DeploymentModification - deployment modification implementation model.
+    * Execution - execution implementation model.
+    * Node - node implementation model.
+    * Relationship - relationship implementation model.
+    * NodeInstance - node instance implementation model.
+    * RelationshipInstance - relationship instance implementation model.
+    * ProviderContext - provider context implementation model.
+    * Plugin - plugin implementation model.
+"""
+
+from datetime import datetime
+from types import NoneType
+
+from .structures import Field, IterPointerField, Model, uuid_generator, 
PointerField
+
+__all__ = (
+    'Model',
+    'Blueprint',
+    'Snapshot',
+    'Deployment',
+    'DeploymentUpdateStep',
+    'DeploymentUpdate',
+    'DeploymentModification',
+    'Execution',
+    'Node',
+    'Relationship',
+    'NodeInstance',
+    'RelationshipInstance',
+    'ProviderContext',
+    'Plugin',
+)
+
+# todo: sort this, maybe move from mgr or move from aria???
+ACTION_TYPES = ()
+ENTITY_TYPES = ()
+
+
+class Blueprint(Model):
+    plan = Field(type=dict)
+    id = Field(type=basestring, default=uuid_generator)
+    description = Field(type=(basestring, NoneType))
+    created_at = Field(type=datetime)
+    updated_at = Field(type=datetime)
+    main_file_name = Field(type=basestring)
+
+
+class Snapshot(Model):
+    CREATED = 'created'
+    FAILED = 'failed'
+    CREATING = 'creating'
+    UPLOADED = 'uploaded'
+    END_STATES = [CREATED, FAILED, UPLOADED]
+
+    id = Field(type=basestring, default=uuid_generator)
+    created_at = Field(type=datetime)
+    status = Field(type=basestring)
+    error = Field(type=basestring, default=None)
+
+
+class Deployment(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    description = Field(type=(basestring, NoneType))
+    created_at = Field(type=datetime)
+    updated_at = Field(type=datetime)
+    blueprint_id = Field(type=basestring)
+    workflows = Field(type=dict)
+    permalink = Field(default=None)  # TODO: check if needed... (old todo: 
implement)
+    inputs = Field(type=dict, default=lambda: {})
+    policy_types = Field(type=dict, default=lambda: {})
+    policy_triggers = Field(type=dict, default=lambda: {})
+    groups = Field(type=dict, default=lambda: {})
+    outputs = Field(type=dict, default=lambda: {})
+    scaling_groups = Field(type=dict, default=lambda: {})
+
+
+class DeploymentUpdateStep(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    action = Field(type=basestring, choices=ACTION_TYPES)
+    entity_type = Field(type=basestring, choices=ENTITY_TYPES)
+    entity_id = Field(type=basestring)
+    supported = Field(type=bool, default=True)
+
+    def __hash__(self):
+        return hash((self.id, self.entity_id))
+
+    def __lt__(self, other):
+        """
+        the order is 'remove' < 'modify' < 'add'
+        :param other:
+        :return:
+        """
+        if not isinstance(other, self.__class__):
+            return not self >= other
+
+        if self.action != other.action:
+            if self.action == 'remove':
+                return True
+            elif self.action == 'add':
+                return False
+            else:
+                return other.action == 'add'
+
+        if self.action == 'add':
+            return self.entity_type == 'node' and other.entity_type == 
'relationship'
+        if self.action == 'remove':
+            return self.entity_type == 'relationship' and other.entity_type == 
'node'
+        return False
+
+
+class DeploymentUpdate(Model):
+    INITIALIZING = 'initializing'
+    SUCCESSFUL = 'successful'
+    UPDATING = 'updating'
+    FINALIZING = 'finalizing'
+    EXECUTING_WORKFLOW = 'executing_workflow'
+    FAILED = 'failed'
+
+    STATES = [
+        INITIALIZING,
+        SUCCESSFUL,
+        UPDATING,
+        FINALIZING,
+        EXECUTING_WORKFLOW,
+        FAILED,
+    ]
+
+    # '{0}-{1}'.format(kwargs['deployment_id'], uuid4())
+    id = Field(type=basestring, default=uuid_generator)
+    deployment_id = Field(type=basestring)
+    state = Field(type=basestring, choices=STATES, default=INITIALIZING)
+    deployment_plan = Field()
+    deployment_update_nodes = Field(default=None)
+    deployment_update_node_instances = Field(default=None)
+    deployment_update_deployment = Field(default=None)
+    modified_entity_ids = Field(default=None)
+    execution_id = Field(type=basestring)
+    steps = IterPointerField(type=DeploymentUpdateStep, default=())
+
+
+class Execution(Model):
+    TERMINATED = 'terminated'
+    FAILED = 'failed'
+    CANCELLED = 'cancelled'
+    PENDING = 'pending'
+    STARTED = 'started'
+    CANCELLING = 'cancelling'
+    FORCE_CANCELLING = 'force_cancelling'
+    STATES = (
+        TERMINATED,
+        FAILED,
+        CANCELLED,
+        PENDING,
+        STARTED,
+        CANCELLING,
+        FORCE_CANCELLING,
+    )
+    END_STATES = [TERMINATED, FAILED, CANCELLED]
+    ACTIVE_STATES = [state for state in STATES if state not in END_STATES]
+
+    id = Field(type=basestring, default=uuid_generator)
+    status = Field(type=basestring, choices=STATES)
+    deployment_id = Field(type=basestring)
+    workflow_id = Field(type=basestring)
+    blueprint_id = Field(type=basestring)
+    created_at = Field(type=datetime)
+    error = Field()
+    parameters = Field()
+    is_system_workflow = Field(type=bool)
+
+
+class Operation(Model):
+    PENDING = 'pending'
+    STARTED = 'started'
+    SUCCESS = 'success'
+    FAILED = 'failed'
+    STATES = (
+        PENDING,
+        STARTED,
+        SUCCESS,
+        FAILED,
+    )
+    END_STATES = [SUCCESS, FAILED]
+
+    id = Field(type=basestring, default=uuid_generator)
+    status = Field(type=basestring, choices=STATES, default=STARTED)
+    execution_id = Field(type=basestring)
+    eta = Field(type=datetime, default=0)
+    started_at = Field(type=datetime, default=None)
+    ended_at = Field(type=datetime, default=None)
+    max_retries = Field(type=int, default=0)
+    retry_count = Field(type=int, default=0)
+
+
+class Relationship(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    target_id = Field(type=basestring)
+    source_interfaces = Field(type=dict)
+    source_operations = Field(type=dict)
+    target_interfaces = Field(type=dict)
+    target_operations = Field(type=dict)
+    type = Field(type=basestring)
+    type_hierarchy = Field(type=list)
+    properties = Field(type=dict)
+
+
+class Node(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    blueprint_id = Field(type=basestring)
+    type = Field(type=basestring)
+    type_hierarchy = Field()
+    number_of_instances = Field(type=int)
+    planned_number_of_instances = Field(type=int)
+    deploy_number_of_instances = Field(type=int)
+    host_id = Field(type=basestring, default=None)
+    properties = Field(type=dict)
+    operations = Field(type=dict)
+    plugins = Field(type=list, default=())
+    relationships = IterPointerField(type=Relationship)
+    plugins_to_install = Field(type=list, default=())
+    min_number_of_instances = Field(type=int)
+    max_number_of_instances = Field(type=int)
+
+    def relationships_by_target(self, target_id):
+        for relationship in self.relationships:
+            if relationship.target_id == target_id:
+                yield relationship
+        # todo: maybe add here Exception if isn't exists (didn't yield one's)
+
+
+class RelationshipInstance(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    target_id = Field(type=basestring)
+    target_name = Field(type=basestring)
+    type = Field(type=basestring)
+    relationship = PointerField(type=Relationship)
+
+
+class NodeInstance(Model):
+    # todo: add statuses
+    UNINITIALIZED = 'uninitialized'
+    INITIALIZING = 'initializing'
+    CREATING = 'creating'
+    CONFIGURING = 'configuring'
+    STARTING = 'starting'
+    DELETED = 'deleted'
+    STOPPING = 'stopping'
+    DELETING = 'deleting'
+    STATES = (
+        UNINITIALIZED,
+        INITIALIZING,
+        CREATING,
+        CONFIGURING,
+        STARTING,
+        DELETED,
+        STOPPING,
+        DELETING
+    )
+
+    id = Field(type=basestring, default=uuid_generator)
+    deployment_id = Field(type=basestring)
+    runtime_properties = Field(type=dict)
+    state = Field(type=basestring, choices=STATES, default=UNINITIALIZED)
+    version = Field(type=(basestring, NoneType))
+    relationship_instances = IterPointerField(type=RelationshipInstance)
+    node = PointerField(type=Node)
+    host_id = Field(type=basestring, default=None)
+    scaling_groups = Field(default=())
+
+    def relationships_by_target(self, target_id):
+        for relationship_instance in self.relationship_instances:
+            if relationship_instance.target_id == target_id:
+                yield relationship_instance
+        # todo: maybe add here Exception if isn't exists (didn't yield one's)
+
+
+class DeploymentModification(Model):
+    STARTED = 'started'
+    FINISHED = 'finished'
+    ROLLEDBACK = 'rolledback'
+    END_STATES = [FINISHED, ROLLEDBACK]
+
+    id = Field(type=basestring, default=uuid_generator)
+    deployment_id = Field(type=basestring)
+    modified_nodes = Field(type=(dict, NoneType))
+    added_and_related = IterPointerField(type=NodeInstance)
+    removed_and_related = IterPointerField(type=NodeInstance)
+    extended_and_related = IterPointerField(type=NodeInstance)
+    reduced_and_related = IterPointerField(type=NodeInstance)
+    # before_modification = IterPointerField(type=NodeInstance)
+    status = Field(type=basestring, choices=(STARTED, FINISHED, ROLLEDBACK))
+    created_at = Field(type=datetime)
+    ended_at = Field(type=(datetime, NoneType))
+    context = Field()
+
+
+class ProviderContext(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    context = Field(type=dict)
+    name = Field(type=basestring)
+
+
+class Plugin(Model):
+    id = Field(type=basestring, default=uuid_generator)
+    package_name = Field(type=basestring)
+    archive_name = Field(type=basestring)
+    package_source = Field(type=dict)
+    package_version = Field(type=basestring)
+    supported_platform = Field(type=basestring)
+    distribution = Field(type=basestring)
+    distribution_version = Field(type=basestring)
+    distribution_release = Field(type=basestring)
+    wheels = Field()
+    excluded_wheels = Field()
+    supported_py_versions = Field(type=list)
+    uploaded_at = Field(type=datetime)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/storage/structures.py
----------------------------------------------------------------------
diff --git a/aria/storage/structures.py b/aria/storage/structures.py
new file mode 100644
index 0000000..5ebd8b8
--- /dev/null
+++ b/aria/storage/structures.py
@@ -0,0 +1,286 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Aria's storage.structures module
+Path: aria.storage.structures
+
+models module holds aria's models.
+
+classes:
+    * Field - represents a single field.
+    * IterField - represents an iterable field.
+    * PointerField - represents a single pointer field.
+    * IterPointerField - represents an iterable pointers field.
+    * Model - abstract model implementation.
+"""
+
+import json
+from uuid import uuid4
+from itertools import count
+
+from ..logger import LoggerMixin
+from ..exceptions import StorageError
+from ..tools.validation import ValidatorMixin
+
+
+__all__ = (
+    'uuid_generator',
+    'Field',
+    'IterField',
+    'PointerField',
+    'IterPointerField',
+    'Model',
+    'Storage',
+)
+
+
+def uuid_generator():
+    return str(uuid4())
+
+
+class Field(ValidatorMixin):
+    """
+    A single field implementation
+    """
+    NO_DEFAULT = 'NO_DEFAULT'
+
+    try:
+        # python 3 syntax
+        _next_id = count().__next__
+    except AttributeError:
+        # python 2 syntax
+        _next_id = count().next
+    _ATTRIBUTE_NAME = '_cache_{0}'.format
+
+    def __init__(
+            self,
+            type=None,
+            choices=(),
+            default=NO_DEFAULT,
+            **kwargs):
+        """
+        Simple field manager.
+
+        :param type: possible type of the field.
+        :param choices: a set of possible field values.
+        :param default: default field value.
+        :param kwargs: kwargs to be passed to next in line classes.
+        """
+        self.type = type
+        self.choices = choices
+        self.default = default
+        super(Field, self).__init__(**kwargs)
+
+    def __get__(self, instance, owner):
+        if instance is None:
+            return self
+        field_name = self._field_name(instance)
+        try:
+            return getattr(instance, self._ATTRIBUTE_NAME(field_name))
+        except AttributeError as exc:
+            if self.default == self.NO_DEFAULT:
+                raise AttributeError(
+                    str(exc).replace(self._ATTRIBUTE_NAME(field_name), 
field_name))
+
+        default_value = self.default() if callable(self.default) else 
self.default
+        setattr(instance, self._ATTRIBUTE_NAME(field_name), default_value)
+        return default_value
+
+    def __set__(self, instance, value):
+        field_name = self._field_name(instance)
+        self.validate_value(field_name, value)
+        setattr(instance, self._ATTRIBUTE_NAME(field_name), value)
+
+    def validate_value(self, name, value):
+        """
+        Validates the value of the field.
+
+        :param name: the name of the field.
+        :param value: the value of the field.
+        """
+        if self.default != self.NO_DEFAULT and value == self.default:
+            return
+        if self.type:
+            self.validate_instance(name, value, self.type)
+        if self.choices:
+            self.validate_in_choice(name, value, self.choices)
+
+    def _field_name(self, instance):
+        """
+        retrieves the field name from the instance.
+
+        :param Field instance: the instance which holds the field.
+        :return: name of the field
+        :rtype: basestring
+        """
+        for name, member in vars(instance.__class__).iteritems():
+            if member is self:
+                return name
+
+
+class IterField(Field):
+    def __init__(self, **kwargs):
+        """
+        Simple iterable field manager.
+        This field type don't have choices option.
+
+        :param kwargs: kwargs to be passed to next in line classes.
+        """
+        super(IterField, self).__init__(choices=(), **kwargs)
+
+    def validate_value(self, name, values):
+        """
+        Validates the value of each iterable value.
+
+        :param name: the name of the field.
+        :param values: the values of the field.
+        """
+        for value in values:
+            self.validate_instance(name, value, self.type)
+
+
+class PointerField(Field):
+    """
+    A single pointer field implementation.
+
+    Any PointerField points via id to another document.
+    """
+
+    def __init__(self, type, **kwargs):
+        assert issubclass(type, Model)
+        super(PointerField, self).__init__(type=type, **kwargs)
+
+
+class IterPointerField(IterField, PointerField):
+    """
+    An iterable pointers field.
+
+    Any IterPointerField points via id to other documents.
+    """
+    pass
+
+
+class Model(object):
+    def __init__(self, **fields):
+        """
+        Abstract class for any model in the storage.
+        The Initializer creates attributes according to the (keyword 
arguments) that given
+        Each value is validated according to the Field.
+        Each model has to have and ID Field.
+
+        :param fields: each item is validated and transformed into instance 
attributes.
+        """
+        self._assert_model_have_id_field(**fields)
+        missing_fileds, unexpected_fileds = self._setup_fields(fields)
+
+        if missing_fileds:
+            raise StorageError(
+                'Model {name} got missing keyword arguments: {fields}'.format(
+                    name=self.__class__.__name__, fields=missing_fileds))
+
+        if unexpected_fileds:
+            raise StorageError(
+                'Model {name} got unexpected keyword arguments: 
{fields}'.format(
+                    name=self.__class__.__name__, fields=unexpected_fileds))
+
+    def __repr__(self):
+        return '{name}(fields={0})'.format(sorted(self.fields), 
name=self.__class__.__name__)
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, self.__class__) and
+            self.fields_dict == other.fields_dict)
+
+    @property
+    def fields(self):
+        for name, field in vars(self.__class__).items():
+            if isinstance(field, Field):
+                yield name
+
+    @property
+    def fields_dict(self):
+        """
+        Transforms the instance attributes into a dict.
+
+        :return: all fields in dict format.
+        :rtype dict
+        """
+        return dict((name, getattr(self, name)) for name in self.fields)
+
+    @property
+    def json(self):
+        """
+        Transform the dict of attributes into json
+        :return:
+        """
+        return json.dumps(self.fields_dict)
+
+    @classmethod
+    def _assert_model_have_id_field(cls, **fields_initializer_values):
+        if not getattr(cls, 'id', None):
+            raise StorageError('Model {cls.__name__} must have id 
field'.format(cls=cls))
+
+        if cls.id.default == cls.id.NO_DEFAULT and 'id' not in 
fields_initializer_values:
+            raise StorageError(
+                'Model {cls.__name__} is missing required '
+                'keyword-only argument: "id"'.format(cls=cls))
+
+    def _setup_fields(self, input_fields):
+        missing = []
+        for field_name in self.fields:
+            try:
+                field_obj = input_fields.pop(field_name)
+                setattr(self, field_name, field_obj)
+            except KeyError:
+                field = getattr(self.__class__, field_name)
+                if field.default == field.NO_DEFAULT:
+                    missing.append(field_name)
+
+        unexpected_fields = input_fields.keys()
+        return missing, unexpected_fields
+
+
+class Storage(LoggerMixin):
+    def __init__(self, driver, items=(), **kwargs):
+        super(Storage, self).__init__(**kwargs)
+        self.driver = driver
+        self.registered = {}
+        for item in items:
+            self.register(item)
+        self.logger.debug('{name} object is ready: {0!r}'.format(
+            self, name=self.__class__.__name__))
+
+    def __repr__(self):
+        return '{name}(driver={self.driver})'.format(
+            name=self.__class__.__name__, self=self)
+
+    def __getattr__(self, item):
+        try:
+            return self.registered[item]
+        except KeyError:
+            return super(Storage, self).__getattribute__(item)
+
+    def setup(self):
+        """
+        Setup and create all storage items
+        """
+        for name, api in self.registered.iteritems():
+            try:
+                api.create()
+                self.logger.debug(
+                    'setup {name} in storage {self!r}'.format(name=name, 
self=self))
+            except StorageError:
+                pass

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/tools/__init__.py
----------------------------------------------------------------------
diff --git a/aria/tools/__init__.py b/aria/tools/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/aria/tools/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/tools/application.py
----------------------------------------------------------------------
diff --git a/aria/tools/application.py b/aria/tools/application.py
new file mode 100644
index 0000000..0f36ca2
--- /dev/null
+++ b/aria/tools/application.py
@@ -0,0 +1,279 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+import shutil
+import tarfile
+import tempfile
+from datetime import datetime
+
+from aria.logger import LoggerMixin
+from aria.exceptions import StorageError
+
+
+class StorageManager(LoggerMixin):
+    def __init__(
+            self,
+            model_storage,
+            resource_storage,
+            blueprint_path,
+            blueprint_id,
+            blueprint_plan,
+            deployment_id,
+            deployment_plan,
+            **kwargs):
+        super(StorageManager, self).__init__(**kwargs)
+        self.model_storage = model_storage
+        self.resource_storage = resource_storage
+        self.blueprint_path = blueprint_path
+        self.blueprint_id = blueprint_id
+        self.blueprint_plan = blueprint_plan
+        self.deployment_id = deployment_id
+        self.deployment_plan = deployment_plan
+
+    @classmethod
+    def from_deployment(
+            cls,
+            model_storage,
+            resource_storage,
+            deployment_id,
+            deployment_plan):
+        return cls(
+            model_storage,
+            resource_storage,
+            deployment_id,
+            deployment_plan,
+            blueprint_path=None,
+            blueprint_plan=None,
+            blueprint_id=None
+        )
+
+    @classmethod
+    def from_blueprint(
+            cls,
+            model_storage,
+            resource_storage,
+            blueprint_path,
+            blueprint_id,
+            blueprint_plan):
+        return cls(
+            model_storage,
+            resource_storage,
+            blueprint_path,
+            blueprint_plan,
+            blueprint_id,
+            deployment_id=None,
+            deployment_plan=None)
+
+    def create_blueprint_storage(self, source, main_file_name=None):
+        """
+        create blueprint model & resource
+        """
+        assert self.blueprint_path and self.blueprint_id
+        assert hasattr(self.resource_storage, 'blueprint')
+        assert hasattr(self.model_storage, 'blueprint')
+
+        self.logger.debug('creating blueprint resource storage entry')
+        self.resource_storage.blueprint.upload(
+            entry_id=self.blueprint_id,
+            source=os.path.dirname(source))
+        self.logger.debug('created blueprint resource storage entry')
+
+        self.logger.debug('creating blueprint model storage entry')
+        now = datetime.now()
+        blueprint = self.model_storage.blueprint.model_cls(
+            plan=self.blueprint_plan,
+            id=self.blueprint_id,
+            description=self.blueprint_plan.get('description'),
+            created_at=now,
+            updated_at=now,
+            main_file_name=main_file_name,
+        )
+        self.model_storage.blueprint.store(blueprint)
+        self.logger.debug('created blueprint model storage entry')
+
+    def create_nodes_storage(self):
+        """
+        create nodes model
+        """
+        assert self.blueprint_path and self.blueprint_id
+        assert hasattr(self.model_storage, 'node')
+        assert hasattr(self.model_storage, 'relationship')
+
+        for node in self.blueprint_plan['nodes']:
+            node_copy = node.copy()
+            for field in ('name',
+                          'deployment_plugins_to_install',
+                          'interfaces',
+                          'instances'):
+                node_copy.pop(field)
+            scalable = node_copy.pop('capabilities')['scalable']['properties']
+            for index, relationship in enumerate(node_copy['relationships']):
+                relationship = 
self.model_storage.relationship.model_cls(**relationship)
+                self.model_storage.relationship.store(relationship)
+                node_copy['relationships'][index] = relationship
+
+            node_copy = self.model_storage.node.model_cls(
+                blueprint_id=self.blueprint_id,
+                planned_number_of_instances=scalable['current_instances'],
+                deploy_number_of_instances=scalable['default_instances'],
+                min_number_of_instances=scalable['min_instances'],
+                max_number_of_instances=scalable['max_instances'],
+                number_of_instances=scalable['current_instances'],
+                **node_copy)
+            self.model_storage.node.store(node_copy)
+
+    def create_deployment_storage(self):
+        """
+        create deployment model & resource
+        """
+        assert self.deployment_id and self.deployment_plan
+
+        assert hasattr(self.resource_storage, 'blueprint')
+        assert hasattr(self.resource_storage, 'deployment')
+        assert hasattr(self.model_storage, 'deployment')
+
+        self.logger.debug('creating deployment resource storage entry')
+        temp_dir = tempfile.mkdtemp()
+        try:
+            self.resource_storage.blueprint.download(
+                entry_id=self.blueprint_id,
+                destination=temp_dir)
+            self.resource_storage.deployment.upload(
+                entry_id=self.deployment_id,
+                source=temp_dir)
+        finally:
+            shutil.rmtree(temp_dir, ignore_errors=True)
+        self.logger.debug('created deployment resource storage entry')
+
+        self.logger.debug('creating deployment model storage entry')
+        now = datetime.now()
+        deployment = self.model_storage.deployment.model_cls(
+            id=self.deployment_id,
+            blueprint_id=self.blueprint_id,
+            description=self.deployment_plan['description'],
+            workflows=self.deployment_plan['workflows'],
+            inputs=self.deployment_plan['inputs'],
+            policy_types=self.deployment_plan['policy_types'],
+            policy_triggers=self.deployment_plan['policy_triggers'],
+            groups=self.deployment_plan['groups'],
+            scaling_groups=self.deployment_plan['scaling_groups'],
+            outputs=self.deployment_plan['outputs'],
+            created_at=now,
+            updated_at=now
+        )
+        self.model_storage.deployment.store(deployment)
+        self.logger.debug('created deployment model storage entry')
+
+    def create_node_instances_storage(self):
+        """
+        create node_instances model
+        """
+        assert self.deployment_id and self.deployment_plan
+        assert hasattr(self.model_storage, 'node_instance')
+        assert hasattr(self.model_storage, 'relationship_instance')
+
+        self.logger.debug('creating node-instances model storage entries')
+        for node_instance in self.deployment_plan['node_instances']:
+            node_model = self.model_storage.node.get(node_instance['node_id'])
+            relationship_instances = []
+
+            for index, relationship_instance in 
enumerate(node_instance['relationships']):
+                relationship_instance_model = 
self.model_storage.relationship_instance.model_cls(
+                    relationship=node_model.relationships[index],
+                    target_name=relationship_instance['target_name'],
+                    type=relationship_instance['type'],
+                    target_id=relationship_instance['target_id'])
+                relationship_instances.append(relationship_instance_model)
+                
self.model_storage.relationship_instance.store(relationship_instance_model)
+
+            node_instance_model = self.model_storage.node_instance.model_cls(
+                node=node_model,
+                id=node_instance['id'],
+                runtime_properties={},
+                state=self.model_storage.node_instance.model_cls.UNINITIALIZED,
+                deployment_id=self.deployment_id,
+                version='1.0',
+                relationship_instances=relationship_instances)
+
+            self.model_storage.node_instance.store(node_instance_model)
+        self.logger.debug('created node-instances model storage entries')
+
+    def create_plugin_storage(self, plugin_id, source):
+        """
+        create plugin model & resource
+        """
+        assert hasattr(self.model_storage, 'plugin')
+        assert hasattr(self.resource_storage, 'plugin')
+
+        self.logger.debug('creating plugin resource storage entry')
+        self.resource_storage.plugin.upload(entry_id=plugin_id, source=source)
+        self.logger.debug('created plugin resource storage entry')
+
+        self.logger.debug('creating plugin model storage entry')
+        plugin = _load_plugin_from_archive(source)
+        build_props = plugin.get('build_server_os_properties')
+        now = datetime.now()
+
+        plugin = self.model_storage.plugin.model_cls(
+            id=plugin_id,
+            package_name=plugin.get('package_name'),
+            package_version=plugin.get('package_version'),
+            archive_name=plugin.get('archive_name'),
+            package_source=plugin.get('package_source'),
+            supported_platform=plugin.get('supported_platform'),
+            distribution=build_props.get('distribution'),
+            distribution_version=build_props.get('distribution_version'),
+            distribution_release=build_props.get('distribution_release'),
+            wheels=plugin.get('wheels'),
+            excluded_wheels=plugin.get('excluded_wheels'),
+            supported_py_versions=plugin.get('supported_python_versions'),
+            uploaded_at=now
+        )
+        self.model_storage.plugin.store(plugin)
+        self.logger.debug('created plugin model storage entry')
+
+
+def _load_plugin_from_archive(tar_source):
+        if not tarfile.is_tarfile(tar_source):
+            # TODO: go over the exceptions
+            raise StorageError(
+                'the provided tar archive can not be read.')
+
+        with tarfile.open(tar_source) as tar:
+            tar_members = tar.getmembers()
+            # a wheel plugin will contain exactly one sub directory
+            if not tar_members:
+                raise StorageError(
+                    'archive file structure malformed. expecting exactly one '
+                    'sub directory; got none.')
+            package_json_path = os.path.join(tar_members[0].name,
+                                             'package.json')
+            try:
+                package_member = tar.getmember(package_json_path)
+            except KeyError:
+                raise StorageError("'package.json' was not found under {0}"
+                                   .format(package_json_path))
+            try:
+                package_json = tar.extractfile(package_member)
+            except tarfile.ExtractError as e:
+                raise StorageError(str(e))
+            try:
+                return json.load(package_json)
+            except ValueError as e:
+                raise StorageError("'package.json' is not a valid json: "
+                                   "{json_str}. error is {error}"
+                                   .format(json_str=package_json.read(), 
error=str(e)))

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/tools/lru_cache.py
----------------------------------------------------------------------
diff --git a/aria/tools/lru_cache.py b/aria/tools/lru_cache.py
new file mode 100755
index 0000000..2cd2864
--- /dev/null
+++ b/aria/tools/lru_cache.py
@@ -0,0 +1,127 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Function lru_cache implementation for python 2.7
+(In Python 3 this decorator is in functools)
+"""
+
+from time import time
+from functools import partial, wraps
+from itertools import imap
+from collections import OrderedDict
+
+
+class _LRUCache(object):
+    def __init__(self, input_func, max_size, timeout):
+        self._input_func = input_func
+        self._max_size = max_size
+        self._timeout = timeout
+
+        # This will store the cache for this function, format:
+        # {caller1 : [OrderedDict1, last_refresh_time1],
+        #  caller2 : [OrderedDict2, last_refresh_time2]}.
+        # In case of an instance method -
+        # the caller is the instance,
+        # in case called from a regular function - the caller is None.
+        self._caches_dict = {}
+
+    def _prepare_key(self, *args, **kwargs):
+        kwargs_key = "".join(
+            imap(lambda x: str(x) + str(type(kwargs[x])) + str(kwargs[x]),
+                 sorted(kwargs)))
+        return "".join(imap(lambda x: str(type(x)) + str(x), args)) + 
kwargs_key
+
+    def cache_clear(self, caller=None):
+        # Remove the cache for the caller, only if exists:
+        if caller in self._caches_dict:
+            del self._caches_dict[caller]
+            self._caches_dict[caller] = (OrderedDict(), time())
+
+    def __get__(self, obj, _):
+        """ Called for instance methods """
+        return_func = partial(self._cache_wrapper, obj)
+        return_func.cache_clear = partial(self.cache_clear, obj)
+        # Return the wrapped function and wraps it to maintain the docstring
+        # and the name of the original function:
+        return wraps(self._input_func)(return_func)
+
+    def __call__(self, *args, **kwargs):
+        """ Called for regular functions """
+        return self._cache_wrapper(None, *args, **kwargs)
+    # Set the cache_clear function in the __call__ operator:
+    __call__.cache_clear = cache_clear
+
+    def _cache_wrapper(self, caller, *args, **kwargs):
+        # Create a unique key including the types
+        # (in order to differentiate between 1 and '1'):
+        key = self._prepare_key(*args, **kwargs)
+
+        # Check if caller exists, if not create one:
+        if caller not in self._caches_dict:
+            self._caches_dict[caller] = (OrderedDict(), time())
+        else:
+            # Validate in case the refresh time has passed:
+            if self._timeout is not None and time() - 
self._caches_dict[caller][1] > self._timeout:
+                self.cache_clear(caller)
+
+        # Check if the key exists, if so - return it:
+        cur_caller_cache_dict = self._caches_dict[caller][0]
+        if key in cur_caller_cache_dict:
+            return cur_caller_cache_dict[key]
+
+        # Validate we didn't exceed the max_size:
+        if len(cur_caller_cache_dict) >= self._max_size:
+            # Delete the first item in the dict:
+            cur_caller_cache_dict.popitem(False)
+
+        # Call the function and store the data in the cache
+        # (call it with the caller in case it's an instance function - Ternary 
condition):
+        cur_caller_cache_dict[key] = self._input_func(caller, *args, **kwargs) 
\
+            if caller is not None else self._input_func(*args, **kwargs)
+        return cur_caller_cache_dict[key]
+
+
+def lru_cache(maxsize=255, timeout=None):
+    """
+    lru_cache(maxsize = 255, timeout = None)
+    Returns a decorator which returns an instance (a descriptor).
+
+    Purpose:
+        This decorator factory will wrap a function / instance method,
+        and will supply a caching mechanism to the function.
+        For every given input params it will store the result in a queue of 
maxsize size,
+        and will return a cached ret_val if the same parameters are passed.
+
+    Notes:
+        * If an instance method is wrapped,
+          each instance will have it's own cache and it's own timeout.
+        * The wrapped function will have a cache_clear variable inserted into 
it,
+          and may be called to clear it's specific cache.
+        * The wrapped function will maintain the original function's docstring 
and name (wraps)
+        * The type of the wrapped function will no longer be that of a 
function,
+          but either an instance of _LRU_Cache_class or a functool.partial 
type.
+
+    :param maxsize: The cache size limit,
+                    Anything added above that will delete the first values 
enterred (FIFO).
+                    This size is per instance, thus 1000 instances with 
maxsize of 255,
+                    will contain at max 255K elements.
+    :type maxsize: int
+    :param timeout: Every n seconds the cache is deleted, regardless of usage.
+                    If None - cache will never be refreshed.
+    :type: timeout: int, float, None
+
+    """
+    return lambda input_func: wraps(input_func)(_LRUCache(input_func, maxsize, 
timeout))

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/tools/plugin.py
----------------------------------------------------------------------
diff --git a/aria/tools/plugin.py b/aria/tools/plugin.py
new file mode 100644
index 0000000..976ac93
--- /dev/null
+++ b/aria/tools/plugin.py
@@ -0,0 +1,38 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from importlib import import_module
+
+
+def plugin_installer(path, plugin_suffix, package=None, callback=None):
+    """
+
+    :param path:
+    :param plugin_suffix:
+    :param package:
+    :param callback:
+    :return:
+    """
+    assert callback is None or callable(callback)
+    plugin_suffix = '{0}.py'.format(plugin_suffix)
+
+    for file_name in os.listdir(path):
+        if not file_name.endswith(plugin_suffix):
+            continue
+        module_name = '{0}.{1}'.format(package, file_name[:-3]) if package 
else file_name[:-3]
+        module = import_module(module_name)
+        if callback:
+            callback(module)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/tools/process.py
----------------------------------------------------------------------
diff --git a/aria/tools/process.py b/aria/tools/process.py
new file mode 100644
index 0000000..7df0026
--- /dev/null
+++ b/aria/tools/process.py
@@ -0,0 +1,153 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+from signal import SIGKILL
+from time import sleep
+
+from aria.logger import LoggerMixin
+from aria.exceptions import ExecutorException, ProcessException
+
+
+class Process(LoggerMixin):
+    def __init__(
+            self,
+            args,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            cwd=None,
+            env=None,
+            **kwargs):
+        """
+        Process class - subprocess wrapper
+        :param args:
+        :param stdout:
+        :param stderr:
+        :param cwd:
+        :param env:
+        """
+        super(Process, self).__init__(**kwargs)
+        self.args = args
+        self.cwd = cwd
+        self.env = env
+        self.process = None
+        self._stdout = stdout
+        self._stderr = stderr
+
+    def __repr__(self):
+        return '{cls.__name__}(args=self.args, cwd=self.cwd)'.format(
+            cls=self.__class__, self=self)
+
+    def __getattr__(self, item):
+        return getattr(self.process, item)
+
+    @property
+    def name(self):
+        return self.args[0]
+
+    @property
+    def pid(self):
+        if self.is_running():
+            return self.process.pid
+
+    @property
+    def stdout(self):
+        assert self.process, 'Need to run before calling thie method'
+        return self.process.stdout
+
+    @property
+    def stderr(self):
+        assert self.process, 'Need to run before calling thie method'
+        return self.process.stderr
+
+    @property
+    def return_code(self):
+        if self.process is None:
+            return None
+        if self.is_running():
+            raise ExecutorException(
+                'Can not get return code while process is still running')
+        if self.process.returncode is None:
+            self.wait()
+        return self.process.returncode
+
+    def terminate(self):
+        if self.process is not None and self.process.poll() is None:
+            self.logger.debug('terminating process {0:d} 
({1})'.format(self.process.pid, self.name))
+            self.process.terminate()
+            sleep(1)
+        kill_attempts = 0
+        while self.process is not None and self.process.poll() is None and 
kill_attempts < 10:
+            self.logger.debug('trying to kill process 
{0:d}'.format(self.process.pid))
+            self.process.kill()
+            sleep(1)
+            kill_attempts += 1
+
+    def killpg(self):
+        if self.is_running():
+            os.killpg(os.getpgid(self.pid), SIGKILL)
+
+    def is_running(self):
+        return self.process.poll() is None if self.process else False
+
+    def wait(self):
+        """
+        Block till child process finishes
+        """
+        assert self.process, 'Need to run before calling thie method'
+        self.process.wait()
+
+    def run(self, nice=None, universal_newlines=True):
+        """
+        Run the child process. This call does not block.
+        :param int nice: nice on the child process run
+        :param bool universal_newlines:
+        """
+        self.logger.debug('Running child process: {0}'.format(' 
'.join(self.args)))
+        self.process = subprocess.Popen(
+            self.args,
+            cwd=self.cwd,
+            env=self.env,
+            stdout=self._stdout,
+            stderr=self._stderr,
+            close_fds=os.name != 'nt',
+            preexec_fn=lambda: os.nice(nice) if nice else None,
+            universal_newlines=universal_newlines)
+
+    def run_in_shell(self, nice=None, universal_newlines=True):
+        command = ' '.join(self.args)
+        self.logger.debug('Running child process in shell: 
{0}'.format(command))
+        self.process = subprocess.Popen(
+            command,
+            shell=True,
+            cwd=self.cwd,
+            env=self.env,
+            stdout=self._stdout,
+            stderr=self._stderr,
+            close_fds=os.name != 'nt',
+            preexec_fn=lambda: os.nice(nice) if nice else None,
+            universal_newlines=universal_newlines)
+
+    def raise_failure(self):
+        if self.is_running():
+            self.wait()
+        if self.return_code == 0:
+            return
+        raise ProcessException(
+            command=self.args,
+            stderr=self.stderr.read(),
+            stdout=self.stdout.read(),
+            return_code=self.return_code)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/tools/validation.py
----------------------------------------------------------------------
diff --git a/aria/tools/validation.py b/aria/tools/validation.py
new file mode 100644
index 0000000..1de995b
--- /dev/null
+++ b/aria/tools/validation.py
@@ -0,0 +1,78 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class ValidatorMixin(object):
+    _ARGUMENT_TYPE_MESSAGE = '{name} argument must be {type} based, got 
{arg!r}'
+    _ACTION_MESSAGE = 'action arg options: {actions}, got {action}'
+    _ARGUMENT_CHOICE_MESSAGE = '{name} argument must be in {choices}, got 
{arg!r}'
+
+    @classmethod
+    def validate_actions(cls, action):
+        # todo: remove this and use validate choice
+        if action not in cls.ACTIONS:
+            raise TypeError(cls._ACTION_MESSAGE.format(
+                actions=cls.ACTIONS, action=action))
+
+    @classmethod
+    def validate_in_choice(cls, name, argument, choices):
+        if argument not in choices:
+            raise TypeError(cls._ARGUMENT_CHOICE_MESSAGE.format(
+                name=name, choices=choices, arg=argument))
+
+    @classmethod
+    def validate_type(cls, argument_name, argument, expected_type):
+        if not issubclass(argument, expected_type):
+            raise TypeError(cls._ARGUMENT_TYPE_MESSAGE.format(
+                name=argument_name, type=expected_type, arg=argument))
+
+    @classmethod
+    def validate_instance(cls, argument_name, argument, expected_type):
+        if not isinstance(argument, expected_type):
+            raise TypeError(cls._ARGUMENT_TYPE_MESSAGE.format(
+                name=argument_name, type=expected_type, arg=argument))
+
+    @classmethod
+    def validate_callable(cls, argument_name, argument):
+        if not callable(argument):
+            raise TypeError(cls._ARGUMENT_TYPE_MESSAGE.format(
+                name=argument_name, type='callable', arg=argument))
+
+
+def validate_function_arguments(func, func_kwargs):
+    _KWARGS_FLAG = 8
+
+    has_kwargs = func.func_code.co_flags & _KWARGS_FLAG != 0
+    args_count = func.func_code.co_argcount
+
+    # all args without the ones with default values
+    args = func.func_code.co_varnames[:args_count]
+    non_default_args = args[:len(func.func_defaults)] if func.func_defaults 
else args
+
+    # Check if any args without default values is missing in the func_kwargs
+    for arg in non_default_args:
+        if arg not in func_kwargs:
+            raise ValueError(
+                "The argument '{arg}' doest not have a default value, and it "
+                "isn't passed to {func.__name__}".format(arg=arg, func=func))
+
+    # check if there are any extra kwargs
+    extra_kwargs = [arg for arg in func_kwargs.keys() if arg not in args]
+
+    # assert that the function has kwargs
+    if extra_kwargs and not has_kwargs:
+        raise ValueError("The following extra kwargs were supplied: 
{extra_kwargs}".format(
+            extra_kwargs=extra_kwargs
+        ))

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/__init__.py
----------------------------------------------------------------------
diff --git a/aria/workflows/__init__.py b/aria/workflows/__init__.py
new file mode 100644
index 0000000..c0e9e5a
--- /dev/null
+++ b/aria/workflows/__init__.py
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Aria's Executors Package
+Path: aria.executors
+
+API:
+    - executors - set of every executor that bin register
+    - executor - executor that bin register, and have the highest priority
+    - ExecutorInformation - tool to register executor
+    - Process - subprocess wrapper tool
+    -
+
+Plugins:
+...
+"""
+
+import os
+
+from ..tools.plugin import plugin_installer
+
+__all__ = (
+    'executors',
+    'Process',
+)
+
+
+executors = {}
+
+
+def executor_register_callback(module):
+    global executors
+    register_executor_func = getattr(module, 'register_executor', None)
+    for executor in register_executor_func():
+        executors[executor.__name__] = executor
+
+plugin_installer(
+    path=os.path.dirname(os.path.realpath(__file__)),
+    plugin_suffix='_executor',
+    callback=executor_register_callback,
+    package=__package__)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/api/__init__.py
----------------------------------------------------------------------
diff --git a/aria/workflows/api/__init__.py b/aria/workflows/api/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/aria/workflows/api/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/api/tasks_graph.py
----------------------------------------------------------------------
diff --git a/aria/workflows/api/tasks_graph.py 
b/aria/workflows/api/tasks_graph.py
new file mode 100644
index 0000000..5160345
--- /dev/null
+++ b/aria/workflows/api/tasks_graph.py
@@ -0,0 +1,203 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from uuid import uuid4
+
+from networkx import DiGraph, topological_sort
+
+from aria.tools.validation import ValidatorMixin
+
+
+class TaskNotFoundError(Exception):
+    pass
+
+
+class TaskNotInGraphError(Exception):
+    pass
+
+
+class TaskGraph(ValidatorMixin):
+    """
+    A task graph builder.
+    Build a operations flow graph
+    """
+
+    def __init__(self, name):
+        self.name = name
+        self.id = str(uuid4())
+        self.graph = DiGraph()
+
+    def __getattr__(self, attr):
+        try:
+            return getattr(self.graph, attr)
+        except AttributeError:
+            return super(TaskGraph, self).__getattribute__(attr)
+
+    def __repr__(self):
+        return '{name}(id={self.id}, name={self.name}, 
graph={self.graph!r})'.format(
+            name=self.__class__.__name__, self=self)
+
+    @property
+    def tasks(self):
+        """
+        An iterator on tasks added to the graph
+        """
+        for _, data in self.graph.nodes_iter(data=True):
+            yield data['task']
+
+    @property
+    def leaf_tasks(self):
+        for task in self.tasks_in_order():
+            if not self.graph.predecessors(task.id):
+                yield task
+
+    def task_tree(self, reverse=False):
+        """
+        Iterates over the tasks to be executed in topological order and their 
dependencies.
+        :param reverse: reverse the order
+        """
+        for task in self.tasks_in_order(reverse=reverse):
+            yield task, self.task_dependencies(task)
+
+    def tasks_in_order(self, reverse=False):
+        """
+        Iterates over the tasks to be executed in topological order
+        :param reverse: reverse the order
+        """
+        for task_id in topological_sort(self.graph, reverse=reverse):
+            yield self.graph.node[task_id]['task']
+
+    def has_dependencies(self, task):
+        return len(self.task_dependencies(task)) > 0
+
+    def task_dependencies(self, task):
+        """
+        Iterates over the task dependencies
+        """
+        for task_ids in self.graph.edges_iter(task.id):
+            for task_id in task_ids:
+                if task.id != task_id:
+                    yield self.get_task(task_id)
+
+    def add_task(self, task):
+        """
+        Add a task to this graph
+        :param WorkflowTask|TaskGraph task: The task
+        """
+        self.graph.add_node(task.id, task=task)
+
+    def get_task(self, task_id):
+        """
+        Get a task instance that was inserted to this graph by its id
+
+        :param basestring task_id: the task id
+        :return: requested task
+        :rtype: WorkflowTask|TaskGraph
+        :raise: TaskNotFoundError if no task found with given id
+        """
+        try:
+            data = self.graph.node[task_id]
+            return data['task']
+        except KeyError:
+            raise TaskNotFoundError('Task id: {0}'.format(task_id))
+
+    def remove_task(self, task):
+        """
+        Remove the provided task from the graph
+        :param WorkflowTask|graph task: The task
+        """
+        self.graph.remove_node(task.id)
+
+    def dependency(self, source_task, after):
+        """
+        Add a dependency between tasks.
+        The source task will only be executed after the target task terminates.
+        A source task may depend on several tasks,
+        In which case it will only be executed after all its target tasks will 
terminate.
+
+        tasks flow order:
+        after -> source_task
+
+        :param WorkflowTask|TaskGraph source_task: The source task
+        :type source_task: WorkflowTask
+        :param list after: The target task
+        :raise TaskNotInGraphError
+        """
+        if not self.graph.has_node(source_task.id):
+            raise TaskNotInGraphError(
+                'source task {0!r} is not in graph (task id: 
{0.id})'.format(source_task))
+        for target_task in after:
+            if not self.graph.has_node(target_task.id):
+                raise TaskNotInGraphError(
+                    'target task {0!r} is not in graph (task id: 
{0.id})'.format(target_task))
+            self.graph.add_edge(source_task.id, target_task.id)
+
+    # workflow creation helper methods
+    def chain(self, tasks, after=()):
+        """
+        create a chain of tasks.
+        tasks will be added to the graph with a dependency between
+        the tasks by order.
+
+        tasks flow order:
+        if tasks = (task0, task1, ..., taskn)
+        after -> task0 -> task1 -> ... -> taskn
+
+        :param tasks: list of WorkflowTask instances.
+        :param after: target to the sequence
+        """
+        for source_task in tasks:
+            self.add_task(source_task)
+            self.dependency(source_task, after=after)
+            after = (source_task,)
+
+    def fan_out(self, tasks, after=()):
+        """
+        create a fan-out.
+        tasks will be added to the graph with a dependency to
+        the target task.
+
+        tasks flow order:
+        if tasks = (task0, task1, ..., taskn)
+        after      -> task0
+                   |-> task1
+                   |...
+                   \-> taskn
+
+        :param tasks: list of WorkflowTask instances.
+        :param after: target to the tasks
+        """
+        for source_task in tasks:
+            self.add_task(source_task)
+            self.dependency(source_task, after=after)
+
+    def fan_in(self, source_task, after=None):
+        """
+        create a fan-in.
+        source task will be added to the graph with a dependency to
+        the tasks.
+
+        tasks flow order:
+        if after = (task0, task1, ..., taskn)
+        task0\
+        task1|-> source_task
+        ...  |
+        taskn/
+
+        :param source_task: source to the tasks
+        :param after: list of WorkflowTask instances.
+        """
+        self.add_task(source_task)
+        self.dependency(source_task, after=after)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/__init__.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/__init__.py 
b/aria/workflows/builtin/__init__.py
new file mode 100644
index 0000000..c868fe7
--- /dev/null
+++ b/aria/workflows/builtin/__init__.py
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .install import install
+from .uninstall import uninstall
+from .execute_operation import execute_operation
+from .heal import heal
+from .scale import scale_entity, scale
+from .update import update
+
+
+__all__ = [
+    'install',
+    'uninstall'
+    'execute_operation',
+    'heal',
+    'scale'
+    'scale_entity',
+    'update'
+]

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/deployment_modification.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/deployment_modification.py 
b/aria/workflows/builtin/deployment_modification.py
new file mode 100644
index 0000000..b4300b1
--- /dev/null
+++ b/aria/workflows/builtin/deployment_modification.py
@@ -0,0 +1,221 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import itertools
+from datetime import datetime
+
+from aria.storage import models
+from aria.deployment import modify_deployment as 
extract_deployment_modification
+
+
+def modify_deployment(context, modified_nodes, modification_context=None):
+    deployment = context.deployment
+    active_modifications = filter(
+        lambda dm: dm == models.DeploymentModification.STARTED,
+        context.storage.deployment_modification)
+    if active_modifications:
+        # TODO: raise proper exception
+        raise Exception(
+            'Cannot start deployment modification while there are '
+            'existing started deployment modifications. Currently '
+            'started deployment modifications: {0}'
+            .format(active_modifications))
+
+    nodes = set(context.nodes)
+    node_instances = set(context.node_instances)
+    node_instances_modification = extract_deployment_modification(
+        nodes=nodes,
+        previous_nodes=nodes,
+        previous_node_instances=node_instances,
+        scaling_groups=deployment.scaling_groups,
+        modified_nodes=modified_nodes)
+
+    modification = models.DeploymentModification(
+        created_at=datetime.now(),
+        ended_at=None,
+        status=models.DeploymentModification.STARTED,
+        deployment_id=context.deployment_id,
+        modified_nodes=modified_nodes,
+        added_and_related=node_instances_modification['added_and_related'],
+        removed_and_related=node_instances_modification['removed_and_related'],
+        
extended_and_related=node_instances_modification['extended_and_related'],
+        reduced_and_related=node_instances_modification['reduced_and_related'],
+        # before_modification=node_instances,
+        # node_instances=node_instances_modification,
+        context=modification_context)
+
+    context.storage.deployment_modification.store(modification)
+
+    for node_id, modified_node in modified_nodes.items():
+        if node_id in deployment.scaling_groups:
+            deployment.scaling_groups[node_id]['properties'].update({
+                'planned_instances': modified_node['instances']
+            })
+        else:
+            node = context.storage.node.get(node_id)
+            node.planned_number_of_instances = modified_node['instances']
+            context.storage.node.store(node)
+    context.storage.deployment.store(deployment)
+
+    added_and_related = node_instances_modification['added_and_related']
+    added_node_instances = []
+    for node_instance in added_and_related:
+        if hasattr(node_instance, 'modification') and 
node_instance.modification == 'added':
+            added_node_instances.append(node_instance)
+        else:
+            current_instance = 
context.storage.node_instance.get(node_instance.id)
+            target_names = [r.target_id for r in 
current_instance.node.relationships]
+            current_relationship_groups = {
+                target_name: list(group)
+                for target_name, group in itertools.groupby(
+                    current_instance.relationship_instances,
+                    key=lambda r: r.target_name)
+                }
+            new_relationship_groups = {
+                target_name: list(group)
+                for target_name, group in itertools.groupby(
+                node_instance.relationship_instances,
+                key=lambda r: r.target_name)
+                }
+            new_relationships = []
+            for target_name in target_names:
+                new_relationships += current_relationship_groups.get(
+                    target_name, [])
+                new_relationships += new_relationship_groups.get(
+                    target_name, [])
+
+            updated_node_instance = models.NodeInstance(
+                id=node_instance.id,
+                deployment_id=context.deployment_id,
+                relationship_instances=new_relationships,
+                version=current_instance.version,
+                node=context.storage.node.get(node_instance.node.id),
+                host_id=None,
+                runtime_properties={})
+
+            context.storage.node_instance.store(updated_node_instance)
+
+    _create_deployment_node_instances(context, added_node_instances)
+    return modification
+
+
+def _create_deployment_node_instances(context,
+                                      dsl_node_instances):
+    node_instances = []
+    for node_instance in dsl_node_instances:
+        instance = models.NodeInstance(
+            id=node_instance.id,
+            node=node_instance.node,
+            host_id=node_instance.host_id,
+            relationship_instances=node_instance.relationship_instances,
+            deployment_id=context.deployment_id,
+            runtime_properties={},
+            version=None,
+            scaling_groups=node_instance.scaling_groups)
+        node_instances.append(instance)
+
+    for node_instance in node_instances:
+        context.storage.node_instance.store(node_instance)
+
+
+def finish_deployment_modification(context, modification_id):
+    modification = context.storage.deployment_modification.get(modification_id)
+
+    if modification.status in models.DeploymentModification.END_STATES:
+        raise Exception(
+            'Cannot finish deployment modification: {0}. It is already in'
+            ' {1} status.'.format(modification_id,
+                                  modification.status))
+
+    for node_id, modified_node in modification.modified_nodes.items():
+        if node_id in context.deployment.scaling_groups:
+            context.deployment.scaling_groups[node_id]['properties'].update({
+                'current_instances': modified_node['instances']
+            })
+        else:
+            node_dict = context.storage.node.get(node_id).fields_dict
+            node_dict['number_of_instances'] = modified_node['instances']
+            context.storage.node.store(models.Node(**node_dict))
+
+    context.storage.deployment.store(context.deployment)
+
+    for node_instance in modification.removed_and_related:
+        if node_instance.get('modification') == 'removed':
+            context.storage.node_instance.delete(node_instance.id)
+            pass
+        else:
+            removed_relationship_target_ids = set(
+                [rel.target_id for rel in node_instance.relationships])
+            current = context.storage.node_instance.get(node_instance.id)
+            new_relationships = [rel for rel in current.relationships
+                                 if rel.target_id not in 
removed_relationship_target_ids]
+            context.storage.node_instance.store(models.NodeInstance(
+                id=node_instance.id,
+                relationships=new_relationships,
+                version=current.version,
+                node_id=None,
+                host_id=None,
+                deployment_id=None,
+                state=None,
+                runtime_properties=None))
+
+    now = datetime.now()
+
+    context.storage.deployment_modification.store(
+        models.DeploymentModification(
+            id=modification_id,
+            status=models.DeploymentModification.FINISHED,
+            ended_at=now,
+            created_at=modification.created_at,
+            deployment_id=modification.deployment_id,
+            modified_nodes=modification.modified_nodes,
+            added_and_related=modification.added_and_related,
+            removed_and_related=modification.removed_and_related,
+            extended_and_related=modification.extended_and_related,
+            reduced_and_related=modification.reduced_and_related,
+            context=None))
+
+
+def rollback_deployment_modification(context, modification_id):
+    pass
+    # modification = 
context.storage.deployment_modification.get(modification_id)
+    #
+    # if modification.status in models.DeploymentModification.END_STATES:
+    #     raise Exception(
+    #         'Cannot rollback deployment modification: {0}. It is already '
+    #         'in {1} status.'.format(modification_id,
+    #                                 modification.status))
+    #
+    # # before_rollback_node_instnaces = [instance.fields_dict for instance in 
context.node_instances]
+    # for instance in context.node_instances:
+    #     context.storage.node_instance.delete(instance.id)
+    #
+    # for instance in modification.before_modification:
+    #     context.storage.node_instance.store(models.NodeInstance(**instance))
+    # nodes_num_instances = dict((node.id, node) for node in context.nodes)
+    #
+    # modified_nodes = modification.modified_nodes
+    # for node_id, modified_node in modified_nodes.items():
+    #     if node_id in context.deployment.scaling_groups:
+    #         props = context.deployment.scaling_groups[node_id]['properties']
+    #         props['planned_instances'] = props['current_instances']
+    #     else:
+    #         rolled_back_node_dict = 
context.storage.node.get(node_id).fields_dict
+    #         rolled_back_node_dict['planned_number_of_instances'] = 
nodes_num_instances[node_id].number_of_instances
+    #         context.storage.node.store(models.Node(**rolled_back_node_dict))
+    #
+    # context.storage.deployment.store(context.deployment)
+    #
+    # return context.storage.deployment_modification.get(modification_id)


Reply via email to