http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/execute_operation.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/execute_operation.py 
b/aria/workflows/builtin/execute_operation.py
new file mode 100644
index 0000000..6a5cbd4
--- /dev/null
+++ b/aria/workflows/builtin/execute_operation.py
@@ -0,0 +1,77 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria import workflow
+
+from .workflows import execute_operation_on_instance
+
+
+@workflow
+def execute_operation(
+        context,
+        graph,
+        operation,
+        operation_kwargs,
+        allow_kwargs_override,
+        run_by_dependency_order,
+        type_names,
+        node_ids,
+        node_instance_ids,
+        **kwargs):
+    subgraphs = {}
+    # filtering node instances
+    filtered_node_instances = list(_filter_node_instances(
+        context=context,
+        node_ids=node_ids,
+        node_instance_ids=node_instance_ids,
+        type_names=type_names))
+
+    if run_by_dependency_order:
+        filtered_node_instances_ids = set(node_instance.id
+                                          for node_instance in 
filtered_node_instances)
+        for node_instance in context.node_instances:
+            if node_instance.id not in filtered_node_instances_ids:
+                subgraphs[node_instance.id] = context.task_graph(
+                    name='execute_operation_stub_{0}'.format(node_instance.id))
+
+    # registering actual tasks to sequences
+    for node_instance in filtered_node_instances:
+        node_instance_sub_workflow = execute_operation_on_instance(
+            workflow_id=node_instance.id,
+            context=context,
+            graph=graph,
+            node_instance=node_instance,
+            operation=operation,
+            operation_kwargs=operation_kwargs,
+            allow_kwargs_override=allow_kwargs_override)
+        subgraphs[node_instance.id] = node_instance_sub_workflow
+
+    for _, node_instance_sub_workflow in subgraphs.items():
+        graph.add_task(node_instance_sub_workflow)
+
+    # adding tasks dependencies if required
+    if run_by_dependency_order:
+        for node_instance in context.node_instances:
+            for relationship_instance in node_instance.relationship_instances:
+                graph.dependency(source_task=subgraphs[node_instance.id],
+                                 
after=[subgraphs[relationship_instance.target_id]])
+
+
+def _filter_node_instances(context, node_ids=(), node_instance_ids=(), 
type_names=()):
+    for node_instance in context.node_instances:
+        if ((not node_instance_ids or node_instance.id in node_instance_ids) 
and
+                (not node_ids or node_instance.node.id in node_ids) and
+                (not type_names or node_instance.node.type_hierarchy in 
type_names)):
+            yield node_instance

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/heal.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/heal.py b/aria/workflows/builtin/heal.py
new file mode 100644
index 0000000..9bb729b
--- /dev/null
+++ b/aria/workflows/builtin/heal.py
@@ -0,0 +1,147 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria import workflow
+
+from .uninstall import uninstall
+from .install import install
+from .workflows import relationship_tasks
+
+
+@workflow
+def heal(context, graph, node_instance_id):
+    failing_node = context.storage.node_instance.get(node_instance_id)
+    host_node = context.storage.node_instance.get(failing_node.host_id)
+    failed_node_instance_subgraph = _get_contained_subgraph(context, host_node)
+    failed_node_instance_ids = list(n.id for n in 
failed_node_instance_subgraph)
+
+    targeted_node_instances = [
+        context.storage.node_instance.get(relationship_instance.target_id)
+        for node_instance in failed_node_instance_subgraph
+        for relationship_instance in node_instance.relationship_instances
+        if relationship_instance.target_id not in failed_node_instance_ids
+    ]
+
+    graph.chain([
+        heal_uninstall(
+            context=context,
+            failing_node_instances=failed_node_instance_subgraph,
+            targeted_node_instances=targeted_node_instances),
+        heal_install(
+            context=context,
+            failing_node_instances=failed_node_instance_subgraph,
+            targeted_node_instances=targeted_node_instances)
+    ])
+
+
+@workflow(suffix_template='{failing_node_instances}')
+def heal_uninstall(context, graph, failing_node_instances, 
targeted_node_instances):
+    node_instance_sub_workflows = {}
+
+    # Create install stub workflow for each unaffected node instance
+    for node_instance in targeted_node_instances:
+        node_instance_sub_workflow = context.task_graph(
+            name='uninstall_stub_{0}'.format(node_instance.id))
+        node_instance_sub_workflows[node_instance.id] = 
node_instance_sub_workflow
+        graph.add_task(node_instance_sub_workflow)
+
+    # Create install sub workflow for each failing node
+    uninstall(
+        context=context,
+        graph=graph,
+        node_instances=failing_node_instances,
+        node_instance_sub_workflows=node_instance_sub_workflows)
+
+    # Add operations for intact nodes depending on a node instance
+    # belonging to node_instances
+    for node_instance in targeted_node_instances:
+        node_instance_sub_workflow = 
node_instance_sub_workflows[node_instance.id]
+
+        for relationship_instance in 
reversed(node_instance.relationship_instances):
+            target_node_instance = context.storage.node_instance.get(
+                relationship_instance.target_id)
+            if target_node_instance in failing_node_instances:
+                after_tasks = 
[node_instance_sub_workflows[relationship.target_id]
+                               for relationship in 
node_instance.relationship_instances]
+
+            elif target_node_instance in targeted_node_instances:
+                after_tasks = [relationship_tasks(
+                    node_instance=node_instance,
+                    relationship_instance=relationship_instance,
+                    context=context,
+                    
operation_name='cloudify.interfaces.relationship_lifecycle.unlink')]
+
+            if after_tasks:
+                graph.dependency(source_task=node_instance_sub_workflow, 
after=after_tasks)
+
+
+@workflow(suffix_template='{failing_node_instances}')
+def heal_install(context, graph, failing_node_instances, 
targeted_node_instances):
+    node_instance_sub_workflows = {}
+
+    # Create install sub workflow for each unaffected
+    for node_instance in targeted_node_instances:
+        node_instance_sub_workflow = context.task_graph(
+            name='install_stub_{0}'.format(node_instance.id))
+        node_instance_sub_workflows[node_instance.id] = 
node_instance_sub_workflow
+        graph.add_task(node_instance_sub_workflow)
+
+    # create install sub workflow for every node instance
+    install(
+        context=context,
+        graph=graph,
+        node_instances=failing_node_instances,
+        node_instance_sub_workflows=node_instance_sub_workflows)
+
+    # Add operations for intact nodes depending on a node instance
+    # belonging to node_instances
+    for node_instance in targeted_node_instances:
+        node_instance_sub_workflow = 
node_instance_sub_workflows[node_instance.id]
+
+        for relationship_instance in node_instance.relationship_instances:
+            target_node_instance = context.storage.node_instance.get(
+                relationship_instance.target_id)
+            if target_node_instance in failing_node_instances:
+                after_tasks = 
[node_instance_sub_workflows[relationship.target_id]
+                               for relationship in 
node_instance.relationship_instances]
+
+            elif target_node_instance in targeted_node_instances:
+                after_tasks = [relationship_tasks(
+                    node_instance=node_instance,
+                    relationship_instance=relationship_instance,
+                    context=context,
+                    
operation_name='cloudify.interfaces.relationship_lifecycle.establish')]
+                
+            if after_tasks:
+                graph.dependency(source_task=node_instance_sub_workflow, 
after=after_tasks)
+
+
+def _get_contained_subgraph(context, host_node_instance):
+    contained_instances = set(node_instance
+                              for node_instance in context.node_instances
+                              if node_instance.host_id == 
host_node_instance.id and
+                              node_instance.id != node_instance.host_id)
+    result = {host_node_instance}
+
+    if not contained_instances:
+        return result
+
+    result.update(contained_instances)
+    for node_instance in contained_instances:
+        result.update(_get_contained_subgraph(context, node_instance))
+
+    return result
+
+

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/install.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/install.py 
b/aria/workflows/builtin/install.py
new file mode 100644
index 0000000..15e6516
--- /dev/null
+++ b/aria/workflows/builtin/install.py
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria import workflow
+
+from .workflows import install_node_instance
+
+
+@workflow
+def install(context, graph, node_instances=(), 
node_instance_sub_workflows=None):
+
+    node_instance_sub_workflows = node_instance_sub_workflows or {}
+    node_instances = node_instances or list(context.node_instances)
+
+    # create install sub workflow for every node instance
+    for node_instance in node_instances:
+        node_instance_sub_workflow = install_node_instance(
+            context=context,
+            node_instance=node_instance)
+        node_instance_sub_workflows[node_instance.id] = 
node_instance_sub_workflow
+        graph.add_task(node_instance_sub_workflow)
+
+    # create dependencies between the node instance sub workflow
+    for node_instance in node_instances:
+        node_instance_sub_workflow = 
node_instance_sub_workflows[node_instance.id]
+        graph.dependency(
+            source_task=node_instance_sub_workflow,
+            after=[
+                node_instance_sub_workflows[relationship.target_id]
+                for relationship in node_instance.relationship_instances])

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/scale.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/scale.py b/aria/workflows/builtin/scale.py
new file mode 100644
index 0000000..e8788c9
--- /dev/null
+++ b/aria/workflows/builtin/scale.py
@@ -0,0 +1,416 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria import workflow
+from aria.workflows.engine.engine import Engine
+from .install import install
+from .uninstall import uninstall
+
+from .deployment_modification import modify_deployment, 
finish_deployment_modification, \
+    rollback_deployment_modification
+
+
+def scale(context, node_id, delta, scale_compute):
+    return scale_entity(
+        context=context,
+        scalable_entity_name=node_id,
+        delta=delta,
+        scale_compute=scale_compute)
+
+
+# TODO: 1. the screening of which nodes were added doesn't work
+
+@workflow(simple_workflow=False)
+def scale_entity(context, graph, scalable_entity_name, delta, scale_compute):
+    engine = Engine(context.concurrency_count)
+    if isinstance(delta, basestring):
+        try:
+            delta = int(delta)
+        except ValueError:
+            raise ValueError('The delta parameter must be a number. Got: 
{0}'.format(delta))
+
+    if delta == 0:
+        context.logger.info('delta parameter is 0, so no scaling will take 
place.')
+        return
+
+    scaling_group = context.deployment.scaling_groups.get(scalable_entity_name)
+    if scaling_group:
+        curr_num_instances = scaling_group['properties']['current_instances']
+        planned_num_instances = curr_num_instances + delta
+        scale_id = scalable_entity_name
+    else:
+        node = context.storage.node.get(scalable_entity_name)
+        if not node:
+            raise ValueError("No scalable entity named {0} was found".format(
+                scalable_entity_name))
+        host_node = context.storage.node.get(node.host_id)
+        scaled_node = host_node if (scale_compute and host_node) else node
+        curr_num_instances = scaled_node.number_of_instances
+        planned_num_instances = curr_num_instances + delta
+        scale_id = scaled_node.id
+
+    if planned_num_instances < 0:
+        raise ValueError('Provided delta: {0} is illegal. current number of '
+                         'instances of entity {1} is {2}'
+                         .format(delta,
+                                 scalable_entity_name,
+                                 curr_num_instances))
+
+    modification = modify_deployment(
+        context,
+        {
+            scale_id: {
+                'instances': planned_num_instances
+
+                # These following parameters are not exposed at the moment,
+                # but should be used to control which node instances get 
scaled in
+                # (when scaling in).
+                # They are mentioned here, because currently, the modification 
API
+                # is not very documented.
+                # Special care should be taken because if `scale_compute == 
True`
+                # (which is the default), then these ids should be the compute 
node
+                # instance ids which are not necessarily instances of the node
+                # specified by `scalable_entity_name`.
+
+                # Node instances denoted by these instance ids should be 
*kept* if
+                # possible.
+                # 'removed_ids_exclude_hint': [],
+
+                # Node instances denoted by these instance ids should be 
*removed*
+                # if possible.
+                # 'removed_ids_include_hint': []
+            }
+        }
+    )
+    try:
+        context.logger.info('Deployment modification started. '
+                            '[modification_id={0}]'.format(modification.id))
+        if delta > 0:
+            added, related = [], []
+            for node_instance in modification.added_and_related:
+                if hasattr(node_instance, 'modification') and 
node_instance.modification == 'added':
+                    added.append(node_instance)
+                else:
+                    related.append(node_instance)
+            try:
+                graph.add_task(_scale_install(
+                    graph=graph,
+                    context=context,
+                    scaling_up_node_instances=added,
+                    unaffected_node_instances=related))
+            except:
+                context.logger.error('Scale out failed, scaling back in.')
+                for task in graph.tasks:
+                    graph.remove_task(task)
+                graph.add_task(_scale_uninstall(
+                    graph=graph,
+                    context=context,
+                    scaling_down_node_instances=added,
+                    unaffected_node_instances=related))
+                raise
+        else:
+            removed, related = [], []
+            for node_instance in modification.removed_and_related:
+                if hasattr(node_instance, 'modifictation') and 
node_instance.pop('modification') == 'removed':
+                    removed.append(node_instance)
+                else:
+                    related.append(node_instance)
+            graph.add_task(_scale_uninstall(
+                context=context,
+                scaling_down_node_instances=removed,
+                related_nodes=related))
+    except:
+        context.logger.warn('Rolling back deployment modification. '
+                        '[modification_id={0}]'.format(modification.id))
+        try:
+            rollback_deployment_modification(context, modification.id)
+        except:
+            context.logger.warn('Deployment modification rollback failed. The '
+                            'deployment model is most likely in some corrupted'
+                            ' state.'
+                            '[modification_id={0}]'.format(modification.id))
+            raise
+        raise
+    else:
+        engine.create_workflow(context, graph)
+        engine.execute()
+        try:
+            finish_deployment_modification(context, modification.id)
+        except:
+            context.logger.warn('Deployment modification finish failed. The '
+                            'deployment model is most likely in some corrupted'
+                            ' state.'
+                            '[modification_id={0}]'.format(modification.id))
+            raise
+
+
+@workflow(simple_workflow=False)
+def _scale_uninstall(context, graph, scaling_down_node_instances, 
unaffected_node_instances):
+    node_instance_sub_workflows = {}
+
+    # Create install sub workflow for each unaffected
+    for node_instance in unaffected_node_instances:
+        node_instance_sub_workflow = uninstall_stub_subworkflow(
+            sub_workflow_name='uninstall_stub_{0}'.format(node_instance.id),
+            graph=graph,
+            context=context)
+        node_instance_sub_workflows[node_instance.id] = 
node_instance_sub_workflow
+        graph.add_task(node_instance_sub_workflow)
+
+    # Create install sub workflow for each failing node
+    uninstall(
+        context=context,
+        graph=graph,
+        node_instances=scaling_down_node_instances,
+        node_instance_sub_workflows=node_instance_sub_workflows)
+
+    # Add operations for intact nodes depending on a node instance
+    # belonging to node_instances
+    intact_node_relationship_operation = 
'cloudify.interfaces.relationship_lifecycle.unlink'
+    for node_instance in unaffected_node_instances:
+        node_instance_sub_workflow = 
node_instance_sub_workflows[node_instance.id]
+
+        for relationship_instance in 
reversed(node_instance.relationship_instances):
+            target_node_instance = context.storage.node_instance.get(
+                relationship_instance.target_id)
+            after_tasks = []
+            if target_node_instance in scaling_down_node_instances:
+                after_tasks.extend(
+                    [node_instance_sub_workflows[relationship.target_id]
+                     for relationship in node_instance.relationship_instances])
+
+            elif target_node_instance in unaffected_node_instances:
+                intact_tasks = relationship_tasks_subworkflow(
+                    graph=graph,
+                    context=context,
+                    sub_workflow_name='{0}.{1}'.format(
+                        intact_node_relationship_operation,
+                        node_instance.id),
+                    operation_name=intact_node_relationship_operation,
+                    node_instance=node_instance,
+                    relationship_instance=relationship_instance)
+                after_tasks.extend(intact_tasks)
+
+            graph.dependency(source_task=node_instance_sub_workflow, 
after=after_tasks)
+
+
+@workflow(simple_workflow=False)
+def _scale_install(context, graph, scaling_up_node_instances, 
unaffected_node_instances):
+    node_instance_sub_workflows = {}
+
+    # Create install sub workflow for each unaffected
+    for node_instance in unaffected_node_instances:
+        node_instance_sub_workflow = install_stub_subworkflow(
+            sub_workflow_name='install_stub_{0}'.format(node_instance.id),
+            context=context,
+            graph=graph)
+        node_instance_sub_workflows[node_instance.id] = 
node_instance_sub_workflow
+        graph.add_task(node_instance_sub_workflow)
+
+    # create install sub workflow for every node instance
+    install(
+        context=context,
+        graph=graph,
+        node_instances=scaling_up_node_instances,
+        node_instance_sub_workflows=node_instance_sub_workflows)
+
+    # Add operations for intact nodes depending on a node instance
+    # belonging to node_instances
+    intact_node_relationship_operation = 
'cloudify.interfaces.relationship_lifecycle.establish'
+    for node_instance in unaffected_node_instances:
+        node_instance_sub_workflow = 
node_instance_sub_workflows[node_instance.id]
+
+        for relationship_instance in node_instance.relationship_instances:
+            after_tasks = [
+                node_instance_sub_workflows[relationship_instance.target_id]
+                for relationship_instance in 
node_instance.relationship_instances]
+
+            if 
context.storage.node_instance.get(relationship_instance.target_id) in 
unaffected_node_instances:
+                intact_tasks = relationship_tasks_subworkflow(
+                    graph=graph,
+                    context=context,
+                    sub_workflow_name='{0}.{1}'.format(
+                        intact_node_relationship_operation,
+                        node_instance.id),
+                    operation_name=intact_node_relationship_operation,
+                    node_instance=node_instance,
+                    relationship_instance=relationship_instance)
+                after_tasks.extend(intact_tasks)
+
+            graph.dependency(source_task=node_instance_sub_workflow, 
after=after_tasks)
+
+
+@workflow(simple_workflow=False)
+def install_stub_subworkflow(**kwargs):
+    pass
+
+
+@workflow(simple_workflow=False)
+def uninstall_stub_subworkflow(**kwargs):
+    pass
+
+
+@workflow(simple_workflow=False)
+def install_node_instance_sub_workflow(graph, context, node_instance):
+    graph.chain(tasks=[
+        context.operation(
+            
name='cloudify.interfaces.lifecycle.create.{0}'.format(node_instance.id),
+            operation_details=node_instance.node.operations[
+                'cloudify.interfaces.lifecycle.create'],
+            node_instance=node_instance),
+        preconfigure_relationship_sub_workflow(
+            sub_workflow_name='preconfigure_{0}'.format(node_instance.id),
+            context=context,
+            graph=graph,
+            node_instance=node_instance),
+        context.operation(
+            
name='cloudify.interfaces.lifecycle.configure.{0}'.format(node_instance.id),
+            operation_details=node_instance.node.operations[
+                'cloudify.interfaces.lifecycle.configure'],
+            node_instance=node_instance),
+        postconfigure_relationship_sub_workflow(
+            sub_workflow_name='postconfigure_{0}'.format(node_instance.id),
+            context=context,
+            graph=graph,
+            node_instance=node_instance),
+        context.operation(
+            
name='cloudify.interfaces.lifecycle.start.{0}'.format(node_instance.id),
+            operation_details=node_instance.node.operations[
+                'cloudify.interfaces.lifecycle.start'],
+            node_instance=node_instance),
+        establish_relationship_sub_workflow(
+            sub_workflow_name='establish_{0}'.format(node_instance.id),
+            context=context,
+            graph=graph,
+            node_instance=node_instance),
+    ])
+
+
+@workflow(simple_workflow=False)
+def uninstall_node_instance_sub_workflow(graph, context, node_instance):
+    graph.chain(tasks=[
+        # instance.set_state('stopping'),
+        # instance.send_event('Stopping node'),
+        context.operation(
+            
name='cloudify.interfaces.monitoring.stop.{0}'.format(node_instance.id),
+            
operation_details=node_instance.node.operations['cloudify.interfaces.monitoring.stop'],
+            node_instance=node_instance
+        ),
+        context.operation(
+            
name='cloudify.interfaces.lifecycle.stop.{0}'.format(node_instance.id),
+            
operation_details=node_instance.node.operations['cloudify.interfaces.lifecycle.stop'],
+            node_instance=node_instance
+        ),
+        # instance.set_state('stopped'),
+        unlink_relationship_sub_workflow(
+            sub_workflow_name='unlink{0}'.format(node_instance.id),
+            context=context,
+            graph=graph,
+            node_instance=node_instance),
+        # instance.set_state('deleting'),
+        # instance.send_event('Deleting node'),
+        context.operation(
+            
name='cloudify.interfaces.lifecycle.delete.{0}'.format(node_instance.id),
+            
operation_details=node_instance.node.operations['cloudify.interfaces.lifecycle.delete'],
+            node_instance=node_instance
+        )
+        # instance.set_state('deleted')
+
+    ])
+
+
+@workflow(simple_workflow=False)
+def preconfigure_relationship_sub_workflow(context, graph, node_instance):
+    operation_name = 'cloudify.interfaces.relationship_lifecycle.preconfigure'
+    relationship_tasks = []
+    for relationship_instance in node_instance.relationship_instances:
+        relationship_tasks.append(
+            relationship_tasks_subworkflow(
+                graph=graph,
+                context=context,
+                sub_workflow_name='{0}.{1}'.format(operation_name, 
node_instance.id),
+                operation_name=operation_name,
+                node_instance=node_instance,
+                relationship_instance=relationship_instance)
+        )
+    graph.chain(tasks=relationship_tasks)
+
+
+@workflow(simple_workflow=False)
+def postconfigure_relationship_sub_workflow(context, graph, node_instance):
+    operation_name = 'cloudify.interfaces.relationship_lifecycle.postconfigure'
+    relationship_tasks = []
+    for relationship_instance in node_instance.relationship_instances:
+        relationship_tasks.append(
+            relationship_tasks_subworkflow(
+                graph=graph,
+                context=context,
+                sub_workflow_name='{0}.{1}'.format(operation_name, 
node_instance.id),
+                operation_name=operation_name,
+                node_instance=node_instance,
+                relationship_instance=relationship_instance)
+        )
+    graph.chain(tasks=relationship_tasks)
+
+
+@workflow(simple_workflow=False)
+def establish_relationship_sub_workflow(context, graph, node_instance):
+    operation_name = 'cloudify.interfaces.relationship_lifecycle.establish'
+    relationship_tasks = []
+    for relationship_instance in node_instance.relationship_instances:
+        relationship_tasks.append(
+            relationship_tasks_subworkflow(
+                graph=graph,
+                context=context,
+                sub_workflow_name='{0}.{1}'.format(operation_name, 
node_instance.id),
+                operation_name=operation_name,
+                node_instance=node_instance,
+                relationship_instance=relationship_instance)
+        )
+    graph.chain(tasks=relationship_tasks)
+
+
+@workflow(simple_workflow=False)
+def unlink_relationship_sub_workflow(context, graph, node_instance):
+    operation_name = 'cloudify.interfaces.relationship_lifecycle.unlink'
+    relationship_tasks = []
+    for relationship_instance in node_instance.relationship_instances:
+        relationship_tasks.append(
+            relationship_tasks_subworkflow(
+                graph=graph,
+                context=context,
+                sub_workflow_name='{0}.{1}'.format(operation_name, 
node_instance.id),
+                operation_name=operation_name,
+                node_instance=node_instance,
+                relationship_instance=relationship_instance)
+        )
+    graph.chain(tasks=relationship_tasks)
+
+
+@workflow(simple_workflow=False)
+def relationship_tasks_subworkflow(graph, context, operation_name, 
node_instance, relationship_instance):
+    source_operation = 
relationship_instance.relationship.source_operations[operation_name]
+    target_instance = 
context.storage.node_instance.get(relationship_instance.target_id)
+    target_operation = 
relationship_instance.relationship.target_operations[operation_name]
+    graph.fan_out([
+        context.operation(
+            name='source_{0}'.format(operation_name),
+            node_instance=node_instance,
+            operation_details=source_operation),
+        context.operation(
+            name='target_{0}'.format(operation_name),
+            node_instance=target_instance,
+            operation_details=target_operation),
+    ])

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/uninstall.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/uninstall.py 
b/aria/workflows/builtin/uninstall.py
new file mode 100644
index 0000000..85f9bfb
--- /dev/null
+++ b/aria/workflows/builtin/uninstall.py
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria import workflow
+
+from .workflows import uninstall_node_instance
+
+
+@workflow
+def uninstall(context, graph, node_instances=(), 
node_instance_sub_workflows=None):
+
+    node_instance_sub_workflows = node_instance_sub_workflows or {}
+    node_instances = node_instances or list(context.node_instances)
+
+    # create install sub workflow for every node instance
+    for node_instance in node_instances:
+        node_instance_sub_workflow = uninstall_node_instance(
+            context=context,
+            node_instance=node_instance)
+        node_instance_sub_workflows[node_instance.id] = 
node_instance_sub_workflow
+        graph.add_task(node_instance_sub_workflow)
+
+    # create dependencies between the node instance sub workflow
+    for node_instance in node_instances:
+        node_instance_sub_workflow = 
node_instance_sub_workflows[node_instance.id]
+        for relationship_instance in 
reversed(node_instance.relationship_instances):
+            graph.dependency(
+                
source_task=node_instance_sub_workflows[relationship_instance.target_id],
+                after=[node_instance_sub_workflow])

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/update.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/update.py b/aria/workflows/builtin/update.py
new file mode 100644
index 0000000..eb7a097
--- /dev/null
+++ b/aria/workflows/builtin/update.py
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria import workflow
+
+
+@workflow
+def update(context, graph):
+    pass

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/builtin/workflows.py
----------------------------------------------------------------------
diff --git a/aria/workflows/builtin/workflows.py 
b/aria/workflows/builtin/workflows.py
new file mode 100644
index 0000000..84f5242
--- /dev/null
+++ b/aria/workflows/builtin/workflows.py
@@ -0,0 +1,194 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from itertools import groupby
+
+from aria import workflow
+
+
+__all__ = [
+    'create_node_instance',
+    'preconfigure_relationship',
+    'configure_node_instance',
+    'postconfigure_relationship',
+    'start_node_instance',
+    'establish_relationship',
+    'stop_node_instance',
+    'unlink_relationship',
+    'delete_node_instance'
+]
+
+
+# Install node instance workflow and sub workflows
+
+@workflow(suffix_template='{node_instance.id}')
+def install_node_instance(context, graph, node_instance):
+    create_node_instance = context.operation(
+        
name='cloudify.interfaces.lifecycle.create.{0}'.format(node_instance.id),
+        operation_details=node_instance.node.operations[
+            'cloudify.interfaces.lifecycle.create'],
+        node_instance=node_instance
+    )
+    configure_node_instance = context.operation(
+            
name='cloudify.interfaces.lifecycle.configure.{0}'.format(node_instance.id),
+            operation_details=node_instance.node.operations[
+                'cloudify.interfaces.lifecycle.configure'],
+            node_instance=node_instance
+        )
+    start_node_instance = context.operation(
+        
name='cloudify.interfaces.lifecycle.start.{0}'.format(node_instance.id),
+        operation_details=node_instance.node.operations[
+            'cloudify.interfaces.lifecycle.start'],
+        node_instance=node_instance
+    )
+    graph.chain(tasks=[
+        create_node_instance,
+        preconfigure_relationship(context=context, 
node_instance=node_instance),
+        configure_node_instance,
+        postconfigure_relationship(context=context, 
node_instance=node_instance),
+        start_node_instance,
+        establish_relationship(context=context, node_instance=node_instance),
+    ])
+
+
+@workflow(suffix_template='{node_instance.id}')
+def preconfigure_relationship(context, graph, node_instance):
+    graph.chain(tasks=relationships_tasks(
+        
operation_name='cloudify.interfaces.relationship_lifecycle.preconfigure',
+        context=context,
+        node_instance=node_instance))
+
+
+@workflow(suffix_template='{node_instance.id}')
+def postconfigure_relationship(context, graph, node_instance):
+    graph.chain(tasks=relationships_tasks(
+        
operation_name='cloudify.interfaces.relationship_lifecycle.postconfigure',
+        context=context,
+        node_instance=node_instance))
+
+
+@workflow(suffix_template='{node_instance.id}')
+def establish_relationship(context, graph, node_instance):
+    graph.chain(tasks=relationships_tasks(
+        operation_name='cloudify.interfaces.relationship_lifecycle.establish',
+        context=context,
+        node_instance=node_instance))
+
+
+# Uninstall node instance workflow and subworkflows
+
+@workflow(suffix_template='{node_instance.id}')
+def uninstall_node_instance(graph, context, node_instance):
+    stop_node_instance = context.operation(
+        name='cloudify.interfaces.lifecycle.stop.{0}'.format(node_instance.id),
+        operation_details=node_instance.node.operations[
+            'cloudify.interfaces.lifecycle.stop'],
+        node_instance=node_instance
+    )
+    delete_node_instance = context.operation(
+        
name='cloudify.interfaces.lifecycle.delete.{0}'.format(node_instance.id),
+        operation_details=node_instance.node.operations[
+            'cloudify.interfaces.lifecycle.delete'],
+        node_instance=node_instance
+    )
+
+    graph.chain(tasks=[
+        stop_node_instance,
+        unlink_relationship(context=context, node_instance=node_instance),
+        delete_node_instance,
+    ])
+
+
+@workflow(suffix_template='{node_instance.id}')
+def unlink_relationship(context, graph, node_instance):
+    tasks=relationships_tasks(
+        operation_name='cloudify.interfaces.relationship_lifecycle.unlink',
+        context=context,
+        node_instance=node_instance
+    )
+    graph.chain(tasks=tasks)
+    return tasks
+
+
+@workflow(suffix_template='{node_instnace.id}.{operation}')
+def execute_operation_on_instance(
+        context,
+        graph,
+        node_instance,
+        operation,
+        operation_kwargs,
+        allow_kwargs_override):
+
+    if allow_kwargs_override is not None:
+        operation_kwargs['allow_kwargs_override'] = allow_kwargs_override
+
+    task_name = '{node_instance.id}.{operation_name}'.format(
+        node_instance=node_instance,
+        operation_name=operation)
+
+    graph.add_task(context.operation(
+        name=task_name,
+        operation_details=node_instance.node.operations[operation],
+        node_instance=node_instance,
+        parameters=operation_kwargs)
+    )
+
+
+def relationships_tasks(operation_name, context, node_instance):
+    relationships_groups = groupby(
+        node_instance.relationship_instances,
+        key=lambda relationship_instance: 
relationship_instance.relationship.target_id)
+
+    sub_tasks = []
+    for index, (_, relationship_group) in enumerate(relationships_groups):
+        for relationship_instance in relationship_group:
+            relationship_subgraph = relationship_tasks(
+                node_instance=node_instance,
+                relationship_instance=relationship_instance,
+                context=context,
+                operation_name=operation_name,
+                index=index)
+            sub_tasks.append(relationship_subgraph)
+    return sub_tasks
+
+
+def relationship_tasks(node_instance, relationship_instance, context, 
operation_name, index=None):
+    index = index or 
node_instance.relationship_instances.index(relationship_instance)
+    sub_workflow_name = '{name}.{index}.{node_instance.id}'.format(
+        name=operation_name,
+        index=index,
+        node_instance=node_instance,
+    )
+    operation_name_template = '{name}.{index}.{{0}}.<{source_id}, 
{target_id}>'.format(
+        name=operation_name,
+        index=index,
+        source_id=node_instance.id,
+        target_id=relationship_instance.target_id,
+    )
+    source_operation = context.operation(
+        name=operation_name_template.format('source'),
+        node_instance=node_instance,
+        operation_details=relationship_instance.relationship.source_operations[
+            operation_name])
+    target_operation = context.operation(
+        name=operation_name_template.format('target'),
+        node_instance=context.storage.node_instance.get(
+            relationship_instance.target_id),
+        operation_details=relationship_instance.relationship.target_operations[
+            operation_name])
+    sub_graph = context.task_graph(name=sub_workflow_name)
+    sub_graph.add_task(source_operation)
+    sub_graph.add_task(target_operation)
+    return sub_graph

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/engine/__init__.py
----------------------------------------------------------------------
diff --git a/aria/workflows/engine/__init__.py 
b/aria/workflows/engine/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/aria/workflows/engine/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/engine/engine.py
----------------------------------------------------------------------
diff --git a/aria/workflows/engine/engine.py b/aria/workflows/engine/engine.py
new file mode 100644
index 0000000..7b86fb5
--- /dev/null
+++ b/aria/workflows/engine/engine.py
@@ -0,0 +1,185 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+from datetime import datetime
+
+from contextlib import contextmanager
+from networkx import DiGraph
+
+from aria.events import (
+    start_workflow_signal,
+    on_success_workflow_signal,
+    on_failure_workflow_signal,
+    start_task_signal,
+    on_success_task_signal,
+    on_failure_task_signal,
+)
+from aria.logger import LoggerMixin
+
+
+class Engine(LoggerMixin):
+
+    def __init__(self, executor, workflow_context, tasks_graph, **kwargs):
+        super(Engine, self).__init__(**kwargs)
+        self._workflow_context = workflow_context
+        self._tasks_graph = tasks_graph
+        self._execution_graph = DiGraph()
+        self._executor = executor
+        self._build_execution_graph(self._workflow_context, self._tasks_graph)
+
+    def _build_execution_graph(self, workflow_context, graph):
+        pass
+
+    def execute(self):
+        execution_id = self._workflow_context.execution_id
+        with self._connect_signals():
+            try:
+                start_workflow_signal.send(self, execution_id=execution_id)
+                while True:
+                    for task in self._ended_tasks():
+                        self._handle_ended_tasks(task)
+                    for task in self._executable_tasks():
+                        self._handle_executable_task(task)
+                    if self._all_tasks_consumed():
+                        break
+                    else:
+                        time.sleep(0.1)
+                on_success_workflow_signal.send(self, 
execution_id=execution_id)
+            except BaseException as e:
+                on_failure_workflow_signal.send(self, 
execution_id=execution_id, exception=e)
+                raise
+
+    def _executable_tasks(self):
+        now = time.time()
+        return (task for task in self._tasks_iter()
+                if task.status == task.PENDING and
+                task.eta <= now and
+                not self._task_has_dependencies(task))
+
+    def _ended_tasks(self):
+        return (task for task in self._tasks_iter() if task.status in 
task.END_STATES)
+
+    def _task_has_dependencies(self, task):
+        return len(self._execution_graph.succ.get(task.id, {})) > 0
+
+    def _all_tasks_consumed(self):
+        len(self._execution_graph.node) == 0
+
+    def _tasks_iter(self):
+        return (data['task'] for _, data in 
self._execution_graph.nodes_iter(data=True))
+
+    def _get_task(self, task_id):
+        return self._execution_graph.node[task_id]['task']
+
+    def _handle_executable_task(self, task):
+        self._executor.execute(task)
+
+    def _handle_ended_tasks(self, task):
+        if task.status == task.FAILED:
+            raise RuntimeError('Workflow failed')
+        else:
+            self._execution_graph.remove_node(task.id)
+
+    def _task_started_receiver(self, task_id, *args, **kwargs):
+        task = self._get_task(task_id)
+        operation_context = task.operation_context
+        operation = operation_context.operation
+        operation.started_at = datetime.utcnow()
+        operation.status = operation.STARTED
+        operation_context.operation = operation
+
+    def _task_failed_receiver(self, task_id, *args, **kwargs):
+        task = self._get_task(task_id)
+        operation_context = task.operation_context
+        operation = operation_context.operation
+        operation.ended_at = datetime.utcnow()
+        operation.status = operation.FAILED
+        operation_context.operation = operation
+
+    def _task_succeeded_receiver(self, task_id, *args, **kwargs):
+        task = self._get_task(task_id)
+        operation_context = task.operation_context
+        operation = operation_context.operation
+        operation.ended_at = datetime.utcnow()
+        operation.status = operation.SUCCESS
+        operation_context.operation = operation
+
+    def _start_workflow_receiver(self, *args, **kwargs):
+        Execution = self._workflow_context.storage.execution.model_cls
+        execution = Execution(
+            id=self._workflow_context.execution_id,
+            deployment_id=self._workflow_context.deployment_id,
+            workflow_id=self._workflow_context.workflow_id,
+            blueprint_id=self._workflow_context.blueprint_id,
+            status=Execution.PENDING,
+            created_at=datetime.utcnow(),
+            error='',
+            parameters=self._workflow_context.parameters,
+            is_system_workflow=False
+        )
+        self._workflow_context.execution = execution
+
+    def _workflow_failed_receiver(self, exception, *args, **kwargs):
+        execution = self._workflow_context.execution
+        execution.error = str(exception)
+        execution.status = execution.FAILED
+        self._workflow_context.execution = execution
+
+    def _workflow_succeeded_receiver(self, *args, **kwargs):
+        execution = self._workflow_context.execution
+        execution.status = execution.TERMINATED
+        self._workflow_context.execution = execution
+
+    @contextmanager
+    def _connect_signals(self):
+        start_workflow_signal.connect(self._start_workflow_receiver)
+        on_success_workflow_signal.connect(self._workflow_succeeded_receiver)
+        on_failure_workflow_signal.connect(self._workflow_failed_receiver)
+        start_task_signal.connect(self._task_started_receiver)
+        on_success_task_signal.connect(self._task_succeeded_receiver)
+        on_failure_task_signal.connect(self._task_failed_receiver)
+        try:
+            yield
+        finally:
+            start_workflow_signal.disconnect(self._start_workflow_receiver)
+            
on_success_workflow_signal.disconnect(self._workflow_succeeded_receiver)
+            
on_failure_workflow_signal.disconnect(self._workflow_failed_receiver)
+            start_task_signal.disconnect(self._task_started_receiver)
+            on_success_task_signal.disconnect(self._task_succeeded_receiver)
+            on_failure_task_signal.disconnect(self._task_failed_receiver)
+
+
+class Task(object):
+
+    def __init__(self, operation_context):
+        self.operation_context = operation_context
+        self._create_operation_in_storage()
+
+    def _create_operation_in_storage(self):
+        Operation = self.operation_context.storage.operation.model_cls
+        operation = Operation(
+            id=self.operation_context.id,
+            execution_id=self.operation_context.execution_id,
+            max_retries=self.operation_context.parameters.get('max_retries', 
1),
+            status=Operation.PENDING,
+        )
+        self.operation_context.operation = operation
+
+    def __getattr__(self, attr):
+        try:
+            return getattr(self.operation_context, attr)
+        except AttributeError:
+            return super(Task, self).__getattribute__(attr)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/engine/executor.py
----------------------------------------------------------------------
diff --git a/aria/workflows/engine/executor.py 
b/aria/workflows/engine/executor.py
new file mode 100644
index 0000000..1b9f276
--- /dev/null
+++ b/aria/workflows/engine/executor.py
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from aria.events import (
+    start_task_signal,
+    on_success_task_signal,
+    on_failure_task_signal,
+)
+
+
+class Executor(object):
+
+    def execute(self, task):
+        raise NotImplementedError
+
+    def task_started(self, task_id):
+        start_task_signal.send(self, task_id=task_id)
+
+    def task_failed(self, task_id, exception):
+        on_failure_task_signal.send(self, task_id=task_id, exception=exception)
+
+    def task_succeeded(self, task_id):
+        on_success_task_signal.send(self, task_id=task_id)
+
+
+class LocalExecutor(Executor):
+
+    def execute(self, task):
+        pass

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/aria/workflows/exceptions.py
----------------------------------------------------------------------
diff --git a/aria/workflows/exceptions.py b/aria/workflows/exceptions.py
new file mode 100644
index 0000000..a815976
--- /dev/null
+++ b/aria/workflows/exceptions.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class ExecutorException(Exception):
+    pass
+
+
+class ProcessException(ExecutorException):
+    def __init__(self, command, stderr=None, stdout=None, return_code=None):
+        """
+        Process class Exception
+        :param list command: child process command
+        :param str message: custom message
+        :param str stderr: child process stderr
+        :param str stdout: child process stdout
+        :param int return_code: child process exit code
+        """
+        super(ProcessException, self).__init__("child process failed")
+        self.command = command
+        self.stderr = stderr
+        self.stdout = stdout
+        self.return_code = return_code
+
+    @property
+    def explanation(self):
+        return (
+            'Command "{error.command}" executed with an error.\n'
+            'code: {error.return_code}\n'
+            'error: {error.stderr}\n'
+            'output: {error.stdout}'.format(error=self))
+
+
+class AriaEngineError(Exception):
+    pass

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/ctx_api
----------------------------------------------------------------------
diff --git a/ctx_api b/ctx_api
new file mode 100644
index 0000000..07db9e9
--- /dev/null
+++ b/ctx_api
@@ -0,0 +1,113 @@
+
+CTX API:
+_WorkflowContextBase:
+       1. graph_mode - return task_graph class and change mode to "graph_mode" 
(in Aria we will remove graph-mode...)
+       2. bootstrap_context - return bootstrap_context (in Aria we will 
remove...)
+       3. internal - return internal (in Aria we will remove...)
+       4. execution_id - context execution id
+       5. workflow_id - context wofklow id
+       6. local - context is local execution (Maybe remove - it shouldn't 
change if local or not)
+       7. logger - contextual logger - Aria implements with LoggerMixin
+       8. send_event - sends an event to rabbitMQ. - in Aria we will use 
events (in relevant tasks...)
+       9. update_execution_status - remove in Aria mayby use events?, maybe 
move to task level
+       10. execute_task - remove from ctx - in Aria it will be a part of the 
executor...
+       11. local_task -  remove from ctx - in Aria it will be a part of the 
executor...
+       12. remote_task -  remove from ctx - in Aria it will be a part of the 
executor...
+----------------------------------------------------------------------------------------------------------------------------
+WorkflowNodesAndInstancesContainer:
+       13. nodes - iterator that every item = CloudifyWorkflowNode (In Aria 
CloudifyWorkflowNode == aria.storage.models.Node)
+       14. nodes_instances - iterates over a list of 
CloudifyWorkflowNodeInstance (In Aria CloudifyWorkflowNodeInstance == 
aria.storage.models.NodeInstance)
+       15. get_node - returns the node by Id from the list. Will directly 
derive from the Nodes in Aria storage.models module
+       16. get_node_instance - returns the node by Id from the list. Will 
directly derive from the NodesInstance in Aria storage.models module
+       (*) WorkflowNodesAndInstancesContainer => Node, NodeInstance
+               WorkflowNodesAndInstancesContainer.get_node == Node.get 
(storage.models.Node)
+               WorkflowNodesAndInstancesContainer.get_node_instanse == 
NodeInstance.get (storage.models.NodeInstance)
+       17*. redesign node/nodeInstance trees... (do we need relationship class 
/ )  
+
+(*) Maybe for ctx we can use the storage model/field classes but without the 
"db..." (just clean api)
+     Or maby create readOnly model (no db)
+       18. CloudifyWorkflowRelationshipInstance and 
CloudifyWorkflowRelationship - remove in Aria because in here we use 
relationship and nod and node_instance api but add execution api that we dohn't 
want here...
+               
+when ctx initializated we query all nodes and node_instances,
+in the new Aria code we will:
+       1. lazy query
+       2. maybe cache with TTL???
+
+
+       19. blueprint - Holds the blurpint_id and the blueprint itself. will be 
replaced in aria using the models.Blueprint class
+       20. deployment - Holds the deployment_id and workflow_context, isn't 
really required.
+               a. start_modification - needs to be removed, and the workflow 
engine used instead.
+               b. scaling_groups - could be removed, and the value could be 
extracted models.
+----------------------------------------------------------------------------------------------------------------------------
+CloudifySystemWideWorkflowContext(_WorkflowContextBase):
+    1. deployments_context - local deployment context (with lazy loading) - 
should be probably replaced with _WorkflowContextBase and inner lazy loading.
+    2. _ManagedCloudifyWorkflowContext - tasks should not be implemented here 
either way, so this is not necessary.
+----------------------------------------------------------------------------------------------------------------------------
+CloudifyWorkflowContextInternal:
+    1. get_task_configuration - retrieves task configuration - isn't really 
needed here, but for the task executions. Workflow engine could negotiate it 
directly from storage.
+    2. get_subgraph_task_configuration - same as get_task_configuration.
+    3. task_graph - Not needed here. part of the workflow_engine.
+    4. graph_mode - Not needed here. part of the workflow_engine.
+        (graph_mode setter as well)
+    5. start_event_monitor - remove (reImplement in events module).
+    6. stop_event_monitor - remove (reImplement in events module).
+    7. send_task_event - remove (reImplement in events module).
+    8. send_workflow_event - remove (reImplement in events module).
+    9. start_local_tasks_processing - remove (reImplement in workflow engine 
as part of internal executions).
+    10. stop_local_tasks_processing - remove (reImplement in workflow engine 
as part of internal executions).
+    11. add_local_task - remove (reImplement in workflow engine as part of 
internal executions).
+    12. workflow_context - the context for the executions.
+    13. handler - a driver to the workflow type (local/remote).
+    14. local_task_processor - remove (reImplement in workflow engine as part 
of internal executions).
+----------------------------------------------------------------------------------------------------------------------------
+LocalTasksProcessing: This entire class should be implemented in the workflow 
engine level, and not in the ctx.
+                      However, not that this is for internal executions (and 
not local/remote workflows).
+----------------------------------------------------------------------------------------------------------------------------
+CloudifyWorkflowContextHandler: Abstract class for handling workflows
+    1. get_context_logging_handler
+    2. get_node_logging_handler
+    3. bootstrap_context
+    4. get_send_task_event_func
+    5. get_update_execution_status_task
+    6. get_send_node_event_task
+    7. get_send_workflow_event_task
+    8. get_task
+    9. operation_cloudify_context
+    10. get_set_state_task
+    11. get_get_state_task
+    12. send_workflow_event
+    13. download_deployment_resource
+    14. start_deployment_modification
+    15. finish_deployment_modification
+    16. rollback_deployment_modification
+    17. scaling_groups
+==================================
+RemoteContextHandler:
+    1. bootstrap_context - return the context from the storage (could access 
the storage directly).
+    2. get_send_task_event_func - remove and replace with the events module. 
currently just redirects to events.
+    3. get_update_execution_status_task - return a task which updates the 
execution state on the storage. replace with storage module.
+    4. get_send_workflow_event_task - returns a task which sends an event. 
replace with events module.
+    5. get_task - creates a celery subtask for the current execution. should 
be implemented in
+                  the workflow engine.
+    6. operation_cloudify_context - something to do with the maintenance mode, 
for executions to be executed.
+                                    this should probably remain here - maybe 
add this field to the storage.
+    7. get_set_state_task - return a func which sets the state of the task in 
the storage. should be reImplemented in the workflow engine module.
+    8. get_get_state_task - returns a task which returns the state of a node 
instance. replace directly with models.
+    9. download_deployment_resource - is there really a need for 
download_deployment_resource under context.
+    (*) we should support a creation of a generic task. only one method is 
needed to create a task. and it sould
+        be implemented in the workflow engine module. and not the context 
module.
+==================================
+RemoteCloudifyWorkflowContextHandler:
+    1. get_node_logging_handler - replace with loggerMixin
+    2. get_context_logging_handler - replace with loggerMixin
+    3. download_deployment_resource - no need for resource download to be in 
the ctx.
+    4. start_deployment_modification - starts deployment modification 
(adding/removing nodes). No need for that here.
+    5. finish_deployment_modification - see above.
+    6. rollback_deployment_modification - see above.
+    7. send_workflow_event - sends custom workflow event. no need - use events 
module.
+    8. get_send_node_event_task - returns a func which sends an event. use 
events module.
+    9. scaling_groups - returns scaling groups - replace with directs models 
access.
+==================================
+SystemWideWfRemoteContextHandler:
+    1. get_node_logging_handler - replace with loggerMixin
+    7. send_workflow_event - sends custom workflow event. no need - use events 
module.

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/requirements.txt
----------------------------------------------------------------------
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..c2b7663
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+PyYAML==3.10
+networkx==1.11
+requests==2.7.0
+retrying==1.3.3
+blinker==1.4
+jsonpickle

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/setup.py
----------------------------------------------------------------------
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..b7978fe
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+from setuptools import setup, find_packages
+
+_PACKAGE_NAME = 'aria'
+_PYTHON_SUPPORTED_VERSIONS = [(2, 6), (2, 7)]
+
+if (sys.version_info[0], sys.version_info[1]) not in 
_PYTHON_SUPPORTED_VERSIONS:
+    raise NotImplementedError(
+        '{0} Package support Python version 2.6 & 2.7 Only'.format(
+            _PACKAGE_NAME))
+
+version = '0.0.1'
+execfile(os.path.join('.', _PACKAGE_NAME, 'VERSION.py'))
+
+
+try:
+    with open('./requirements.txt') as requirements:
+        install_requires = [requirement.strip() for requirement in 
requirements.readlines()]
+except IOError:
+    install_requires = []
+
+try:
+    import importlib
+except ImportError:
+    install_requires.append('importlib')
+
+setup(
+    name=_PACKAGE_NAME,
+    version=version,
+    author='aria-core',
+    author_email='cosmo-ad...@gigaspaces.com',
+    packages=find_packages(exclude=('*tests*',)),
+    license='LICENSE',
+    description='Aria Project',
+    zip_safe=False,
+    install_requires=install_requires,
+    entry_points={
+        'console_scripts': [
+            'aria = aria.cli.cli:main'
+        ]
+    }
+)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/__init__.py
----------------------------------------------------------------------
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/events/__init__.py
----------------------------------------------------------------------
diff --git a/tests/events/__init__.py b/tests/events/__init__.py
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/tests/events/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/events/test_builtin_event_handlers.py
----------------------------------------------------------------------
diff --git a/tests/events/test_builtin_event_handlers.py 
b/tests/events/test_builtin_event_handlers.py
new file mode 100644
index 0000000..2a1943e
--- /dev/null
+++ b/tests/events/test_builtin_event_handlers.py
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+import pytest
+
+from aria.events.builtin_event_handlers import (_OperationToNodeInstanceState,
+                                                _update_node_instance_state,
+                                                
_operation_to_node_instance_state)
+
+
+OPERATIONS = [
+    'cloudify.interfaces.lifecycle.create',
+    'cloudify.interfaces.lifecycle.configure',
+    'cloudify.interfaces.lifecycle.start',
+    'cloudify.interfaces.lifecycle.stop',
+    'cloudify.interfaces.lifecycle.delete',
+]
+
+
+class _Sender(object):
+    def __init__(self, task_name):
+        self.context = mock.MagicMock()
+        self.task_name = task_name
+
+
+def test_update_node_instance_state():
+    for op in OPERATIONS:
+        sender = _Sender(op)
+        _update_node_instance_state(sender=sender)
+
+        assert sender.context.storage.node_instance.store.called
+        assert 
sender.context.storage.node_instance.store.call_args[0][0].state == \
+               _operation_to_node_instance_state[op]
+
+    sender = _Sender('non_existing_op')
+    assert _update_node_instance_state(sender=sender) is None
+
+
+def test_operation_to_node_instance_state():
+    custom_op_to_node_state = _OperationToNodeInstanceState(dict(custom_op= 
'CUSTOM_OP'))
+
+    assert custom_op_to_node_state['custom_op'] == 'CUSTOM_OP'
+    assert custom_op_to_node_state['custom_op_cached'] == 'CUSTOM_OP'
+    with pytest.raises(KeyError):
+        assert custom_op_to_node_state['non_existing_key']

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/events/test_workflow_enginge_event_handlers.py
----------------------------------------------------------------------
diff --git a/tests/events/test_workflow_enginge_event_handlers.py 
b/tests/events/test_workflow_enginge_event_handlers.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/requirements.txt
----------------------------------------------------------------------
diff --git a/tests/requirements.txt b/tests/requirements.txt
new file mode 100644
index 0000000..07d82a6
--- /dev/null
+++ b/tests/requirements.txt
@@ -0,0 +1,6 @@
+mock>=1.0.1
+testtools
+tox==1.6.1
+pylint==1.5.5
+pytest==3.0.2
+pytest-cov==2.3.1

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/storage/__init__.py
----------------------------------------------------------------------
diff --git a/tests/storage/__init__.py b/tests/storage/__init__.py
new file mode 100644
index 0000000..715f130
--- /dev/null
+++ b/tests/storage/__init__.py
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempfile import mkdtemp
+from shutil import rmtree
+
+from aria.storage import ModelDriver
+
+
+class InMemoryModelDriver(ModelDriver):
+    def __init__(self, **kwargs):
+        super(InMemoryModelDriver, self).__init__(**kwargs)
+        self.storage = {}
+
+    def create(self, name, *args, **kwargs):
+        self.storage[name] = {}
+
+    def get(self, name, entry_id, **kwargs):
+        return self.storage[name][entry_id].copy()
+
+    def store(self, name, entry_id, entry, **kwargs):
+        self.storage[name][entry_id] = entry
+
+    def delete(self, name, entry_id, **kwargs):
+        self.storage[name].pop(entry_id)
+
+    def iter(self, name, **kwargs):
+        for item in self.storage[name].itervalues():
+            yield item.copy()
+
+    def update(self, name, entry_id, **kwargs):
+        self.storage[name][entry_id].update(**kwargs)
+
+
+class TestFileSystem(object):
+    def setup_method(self, method):
+        self.path = mkdtemp('{0}'.format(self.__class__.__name__))
+
+    def teardown_method(self, method):
+        rmtree(self.path)
+        self.path = None

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/storage/test_drivers.py
----------------------------------------------------------------------
diff --git a/tests/storage/test_drivers.py b/tests/storage/test_drivers.py
new file mode 100644
index 0000000..9223586
--- /dev/null
+++ b/tests/storage/test_drivers.py
@@ -0,0 +1,136 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import pytest
+
+from aria.storage.drivers import FileSystemModelDriver, Driver, ModelDriver, 
ResourceDriver
+from aria.exceptions import StorageError
+
+from . import InMemoryModelDriver, TestFileSystem
+
+
+def test_base_storage_driver():
+    driver = Driver()
+    driver.connect()
+    driver.disconnect()
+    driver.create('name')
+    with driver as connection:
+        assert driver is connection
+    with pytest.raises(StorageError):
+        with driver:
+            raise StorageError()
+
+
+def test_model_base_driver():
+    driver = ModelDriver()
+    with pytest.raises(NotImplementedError):
+        driver.get('name', 'id')
+    with pytest.raises(NotImplementedError):
+        driver.store('name', entry={}, entry_id=None)
+    with pytest.raises(NotImplementedError):
+        driver.update('name', 'id', update_field=1)
+    with pytest.raises(NotImplementedError):
+        driver.delete('name', 'id')
+    with pytest.raises(NotImplementedError):
+        driver.iter('name')
+
+
+def test_resource_base_driver():
+    driver = ResourceDriver()
+    with pytest.raises(NotImplementedError):
+        driver.download('name', 'id', destination='dest')
+    with pytest.raises(NotImplementedError):
+        driver.upload('name', 'id', source='')
+    with pytest.raises(NotImplementedError):
+        driver.data('name', 'id')
+
+
+def test_custom_driver():
+    entry_dict = {
+        'id': 'entry_id',
+        'entry_value': 'entry_value'
+    }
+
+    with InMemoryModelDriver() as driver:
+        driver.create('entry')
+        assert driver.storage['entry'] == {}
+
+        driver.store(name='entry', entry=entry_dict, entry_id=entry_dict['id'])
+        assert driver.get(name='entry', entry_id='entry_id') == entry_dict
+
+        assert list(node for node in driver.iter('entry')) == [entry_dict]
+
+        driver.update(name='entry', entry_id=entry_dict['id'], 
entry_value='new_value')
+        assert driver.get(name='entry', entry_id='entry_id') == entry_dict
+
+        driver.delete(name='entry', entry_id='entry_id')
+
+        with pytest.raises(KeyError):
+            driver.get(name='entry', entry_id='entry_id')
+
+
+class TestFileSystemDriver(TestFileSystem):
+    path = None
+
+    def setup_method(self, method):
+        super(TestFileSystemDriver, self).setup_method(method)
+        self.driver = FileSystemModelDriver(directory=self.path)
+
+    def test_name(self):
+        assert repr(self.driver) == (
+            'FileSystemModelDriver(directory={self.path})'.format(self=self))
+
+    def test_create(self):
+        self.driver.create(name='node')
+        assert os.path.exists(os.path.join(self.path, 'node'))
+
+    def test_store(self):
+        self.test_create()
+        self.driver.store(name='node', entry_id='test_id', entry={'test': 
'test'})
+        assert os.path.exists(os.path.join(self.path, 'node', 'test_id'))
+
+    def test_update(self):
+        self.test_store()
+        self.driver.update(name='node', entry_id='test_id', 
test='updated_test')
+        entry = self.driver.get(name='node', entry_id='test_id')
+        assert entry == {'test': 'updated_test'}
+
+    def test_get(self):
+        self.test_store()
+        entry = self.driver.get(name='node', entry_id='test_id')
+        assert entry == {'test': 'test'}
+
+    def test_delete(self):
+        self.test_store()
+        self.driver.delete(name='node', entry_id='test_id')
+        assert not os.path.exists(os.path.join(self.path, 'node', 'test_id'))
+
+    def test_iter(self):
+        self.test_create()
+        entries = [
+            {'test': 'test0'},
+            {'test': 'test1'},
+            {'test': 'test2'},
+            {'test': 'test3'},
+            {'test': 'test4'},
+        ]
+        for entry_id, entry in enumerate(entries):
+            self.driver.store('node', str(entry_id), entry)
+
+        for entry in self.driver.iter('node'):
+            entries.pop(entries.index(entry))
+
+        assert not entries

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/c52f949e/tests/storage/test_field.py
----------------------------------------------------------------------
diff --git a/tests/storage/test_field.py b/tests/storage/test_field.py
new file mode 100644
index 0000000..4bd2b96
--- /dev/null
+++ b/tests/storage/test_field.py
@@ -0,0 +1,108 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+from aria.storage.structures import (
+    Field,
+    IterField,
+    PointerField,
+    IterPointerField,
+    Model,
+)
+
+
+def model_factory():
+    class TestModel(Model):
+        id = Field(default='test_id')
+    return TestModel()
+
+
+def test_base_field():
+    field = Field()
+    assert vars(field) == vars(Field(type=None, choices=(), 
default=Field.NO_DEFAULT))
+
+
+def test_type_check():
+    field = Field(type=int)
+    assert vars(field) == vars(Field(type=int, choices=(), 
default=Field.NO_DEFAULT))
+    with pytest.raises(TypeError):
+        field.validate_instance('field', 'any_value', int)
+    field.validate_instance('field', 1, int)
+
+
+def test_field_choices():
+    field = Field(choices=[1, 2])
+    assert vars(field) == vars(Field(type=None, choices=[1, 2], 
default=Field.NO_DEFAULT))
+    field.validate_in_choice('field', 1, field.choices)
+
+    with pytest.raises(TypeError):
+        field.validate_in_choice('field', 'value', field.choices)
+
+
+def test_field_without_defaulf():
+    class Test(object):
+        field = Field()
+    test = Test()
+    with pytest.raises(AttributeError, message="'Test' object has no attribute 
'field'"):
+        test.field
+
+
+def test_field_default_func():
+    def true_func():
+        return True
+
+    field = Field(default=true_func)
+    assert vars(field) == vars(Field(type=None, choices=(), default=true_func))
+    assert field.default
+
+
+def test_field_default():
+    field = Field(default='value')
+    assert vars(field) == vars(Field(type=None, choices=(), default='value'))
+
+
+def test_iterable_field():
+    iter_field = IterField(type=int)
+    assert vars(iter_field) == vars(Field(type=int, default=Field.NO_DEFAULT))
+    iter_field.validate_value('iter_field', [1, 2])
+    with pytest.raises(TypeError):
+        iter_field.validate_value('iter_field', ['a', 1])
+
+
+def test_pointer_field():
+    test_model = model_factory()
+
+    pointer_field = PointerField(type=Model)
+    assert vars(pointer_field) == \
+        vars(PointerField(type=Model, choices=(), default=Field.NO_DEFAULT))
+    with pytest.raises(AssertionError):
+        PointerField(type=list)
+    pointer_field.validate_value('pointer_field', test_model)
+    with pytest.raises(TypeError):
+        pointer_field.validate_value('pointer_field', int)
+
+
+def test_iterable_pointer_field():
+    test_model = model_factory()
+    iter_pointer_field = IterPointerField(type=Model)
+    assert vars(iter_pointer_field) == \
+        vars(IterPointerField(type=Model, default=Field.NO_DEFAULT))
+    with pytest.raises(AssertionError):
+        IterPointerField(type=list)
+
+    iter_pointer_field.validate_value('iter_pointer_field', [test_model, 
test_model])
+    with pytest.raises(TypeError):
+        iter_pointer_field.validate_value('iter_pointer_field', [int, 
test_model])

Reply via email to