You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@ariatosca.apache.org by mx...@apache.org on 2017/06/20 16:14:50 UTC

[4/4] incubator-ariatosca git commit: ARIA-278 remove core tasks

ARIA-278 remove core tasks


Project: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/commit/1fee85c4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/tree/1fee85c4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/diff/1fee85c4

Branch: refs/heads/ARIA-278-Remove-core-tasks
Commit: 1fee85c4193c635d8598affbf769d306917760d8
Parents: 9907520
Author: max-orlov <ma...@gigaspaces.com>
Authored: Sun Jun 11 19:05:35 2017 +0300
Committer: max-orlov <ma...@gigaspaces.com>
Committed: Tue Jun 20 19:14:41 2017 +0300

----------------------------------------------------------------------
 aria/modeling/orchestration.py                  | 143 +++++++---
 aria/orchestrator/context/operation.py          |   7 +
 aria/orchestrator/context/workflow.py           |  21 ++
 aria/orchestrator/workflow_runner.py            |  22 +-
 aria/orchestrator/workflows/api/task.py         |   7 +
 aria/orchestrator/workflows/core/__init__.py    |   2 +-
 aria/orchestrator/workflows/core/compile.py     | 116 ++++++++
 aria/orchestrator/workflows/core/engine.py      | 121 +++++----
 .../workflows/core/events_handler.py            | 137 +++++-----
 aria/orchestrator/workflows/core/task.py        | 271 -------------------
 aria/orchestrator/workflows/core/translation.py | 109 --------
 aria/orchestrator/workflows/events_logging.py   |  25 +-
 aria/orchestrator/workflows/executor/base.py    |  31 ++-
 aria/orchestrator/workflows/executor/dry.py     |  57 ++--
 aria/orchestrator/workflows/executor/process.py |  18 +-
 aria/orchestrator/workflows/executor/thread.py  |  18 +-
 tests/__init__.py                               |   2 +
 tests/orchestrator/context/__init__.py          |   9 +-
 tests/orchestrator/context/test_operation.py    |  26 +-
 tests/orchestrator/context/test_serialize.py    |  10 +-
 .../orchestrator/execution_plugin/test_local.py |  10 +-
 tests/orchestrator/execution_plugin/test_ssh.py |  11 +-
 tests/orchestrator/test_workflow_runner.py      |  43 ++-
 .../orchestrator/workflows/core/test_engine.py  |  23 +-
 .../orchestrator/workflows/core/test_events.py  |  20 +-
 tests/orchestrator/workflows/core/test_task.py  |  31 +--
 .../test_task_graph_into_execution_graph.py     |  55 ++--
 .../orchestrator/workflows/executor/__init__.py |  89 +++---
 .../workflows/executor/test_executor.py         |  26 +-
 .../workflows/executor/test_process_executor.py |  26 +-
 .../executor/test_process_executor_extension.py |   7 +-
 .../test_process_executor_tracked_changes.py    |   7 +-
 32 files changed, 677 insertions(+), 823 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/modeling/orchestration.py
----------------------------------------------------------------------
diff --git a/aria/modeling/orchestration.py b/aria/modeling/orchestration.py
index 995c8c2..17d2476 100644
--- a/aria/modeling/orchestration.py
+++ b/aria/modeling/orchestration.py
@@ -21,7 +21,6 @@ classes:
 """
 
 # pylint: disable=no-self-argument, no-member, abstract-method
-
 from datetime import datetime
 
 from sqlalchemy import (
@@ -34,19 +33,19 @@ from sqlalchemy import (
     String,
     Float,
     orm,
-)
+    PickleType)
 from sqlalchemy.ext.associationproxy import association_proxy
 from sqlalchemy.ext.declarative import declared_attr
 
 from ..orchestrator.exceptions import (TaskAbortException, TaskRetryException)
-from .mixins import ModelMixin, ParameterMixin
+from . import mixins
 from . import (
     relationship,
     types as modeling_types
 )
 
 
-class ExecutionBase(ModelMixin):
+class ExecutionBase(mixins.ModelMixin):
     """
     Execution model representation.
     """
@@ -152,7 +151,7 @@ class ExecutionBase(ModelMixin):
         )
 
 
-class PluginBase(ModelMixin):
+class PluginBase(mixins.ModelMixin):
     """
     An installed plugin.
 
@@ -213,7 +212,7 @@ class PluginBase(ModelMixin):
     uploaded_at = Column(DateTime, nullable=False, index=True)
 
 
-class TaskBase(ModelMixin):
+class TaskBase(mixins.ModelMixin):
     """
     Represents the smallest unit of stateful execution in ARIA. The task state includes inputs,
     outputs, as well as an atomic status, ensuring that the task can only be running once at any
@@ -257,10 +256,24 @@ class TaskBase(ModelMixin):
 
     __tablename__ = 'task'
 
-    __private_fields__ = ['node_fk',
-                          'relationship_fk',
-                          'plugin_fk',
-                          'execution_fk']
+    __private_fields__ = ['dependency_operation_task_fk', 'dependency_stub_task_fk', 'node_fk',
+                          'relationship_fk', 'plugin_fk', 'execution_fk']
+
+    START_WORKFLOW = 'start_workflow'
+    END_WORKFLOW = 'end_workflow'
+    START_SUBWROFKLOW = 'start_subworkflow'
+    END_SUBWORKFLOW = 'end_subworkflow'
+    STUB = 'stub'
+    CONDITIONAL = 'conditional'
+
+    STUB_TYPES = (
+        START_WORKFLOW,
+        START_SUBWROFKLOW,
+        END_WORKFLOW,
+        END_SUBWORKFLOW,
+        STUB,
+        CONDITIONAL,
+    )
 
     PENDING = 'pending'
     RETRYING = 'retrying'
@@ -276,10 +289,28 @@ class TaskBase(ModelMixin):
         SUCCESS,
         FAILED,
     )
-
     INFINITE_RETRIES = -1
 
     @declared_attr
+    def execution(cls):
+        return relationship.many_to_one(cls, 'execution')
+
+    @declared_attr
+    def execution_fk(cls):
+        return relationship.foreign_key('execution', nullable=True)
+
+    status = Column(Enum(*STATES, name='status'), default=PENDING)
+    due_at = Column(DateTime, nullable=False, index=True, default=datetime.utcnow())
+    started_at = Column(DateTime, default=None)
+    ended_at = Column(DateTime, default=None)
+    attempts_count = Column(Integer, default=1)
+
+    _api_id = Column(String)
+    _executor = Column(PickleType)
+    _context_cls = Column(PickleType)
+    _stub_type = Column(Enum(*STUB_TYPES))
+
+    @declared_attr
     def logs(cls):
         return relationship.one_to_many(cls, 'log')
 
@@ -296,10 +327,6 @@ class TaskBase(ModelMixin):
         return relationship.many_to_one(cls, 'plugin')
 
     @declared_attr
-    def execution(cls):
-        return relationship.many_to_one(cls, 'execution')
-
-    @declared_attr
     def arguments(cls):
         return relationship.one_to_many(cls, 'argument', dict_key='name')
 
@@ -307,19 +334,8 @@ class TaskBase(ModelMixin):
     max_attempts = Column(Integer, default=1)
     retry_interval = Column(Float, default=0)
     ignore_failure = Column(Boolean, default=False)
-
-    # State
-    status = Column(Enum(*STATES, name='status'), default=PENDING)
-    due_at = Column(DateTime, nullable=False, index=True, default=datetime.utcnow())
-    started_at = Column(DateTime, default=None)
-    ended_at = Column(DateTime, default=None)
-    attempts_count = Column(Integer, default=1)
-
-    def has_ended(self):
-        return self.status in (self.SUCCESS, self.FAILED)
-
-    def is_waiting(self):
-        return self.status in (self.PENDING, self.RETRYING)
+    interface_name = Column(String)
+    operation_name = Column(String)
 
     @property
     def actor(self):
@@ -351,10 +367,6 @@ class TaskBase(ModelMixin):
     def plugin_fk(cls):
         return relationship.foreign_key('plugin', nullable=True)
 
-    @declared_attr
-    def execution_fk(cls):
-        return relationship.foreign_key('execution', nullable=True)
-
     # endregion
 
     # region association proxies
@@ -376,14 +388,6 @@ class TaskBase(ModelMixin):
 
     # endregion
 
-    @classmethod
-    def for_node(cls, actor, **kwargs):
-        return cls(node=actor, **kwargs)
-
-    @classmethod
-    def for_relationship(cls, actor, **kwargs):
-        return cls(relationship=actor, **kwargs)
-
     @staticmethod
     def abort(message=None):
         raise TaskAbortException(message)
@@ -392,8 +396,63 @@ class TaskBase(ModelMixin):
     def retry(message=None, retry_interval=None):
         raise TaskRetryException(message, retry_interval=retry_interval)
 
+    @declared_attr
+    def dependency_fk(self):
+        return relationship.foreign_key('task', nullable=True)
+
+    @declared_attr
+    def dependencies(cls):
+        # symmetric relationship causes funky graphs
+        return relationship.one_to_many_self(cls, 'dependency_fk')
+
+    def has_ended(self):
+        return self.status in (self.SUCCESS, self.FAILED)
+
+    def is_waiting(self):
+        if self._stub_type:
+            return not self.has_ended()
+        else:
+            return self.status in (self.PENDING, self.RETRYING)
+
+    @classmethod
+    def from_api_task(cls, api_task, executor, **kwargs):
+        instantiation_kwargs = {}
+
+        if hasattr(api_task.actor, 'outbound_relationships'):
+            instantiation_kwargs['node'] = api_task.actor
+        elif hasattr(api_task.actor, 'source_node'):
+            instantiation_kwargs['relationship'] = api_task.actor
+        else:
+            raise RuntimeError('No operation context could be created for {actor.model_cls}'
+                               .format(actor=api_task.actor))
+
+        instantiation_kwargs.update(
+            {
+                'name': api_task.name,
+                'status': cls.PENDING,
+                'max_attempts': api_task.max_attempts,
+                'retry_interval': api_task.retry_interval,
+                'ignore_failure': api_task.ignore_failure,
+                'execution': api_task._workflow_context.execution,
+                'interface_name': api_task.interface_name,
+                'operation_name': api_task.operation_name,
+
+                # Only non-stub tasks have these fields
+                'plugin': api_task.plugin,
+                'function': api_task.function,
+                'arguments': api_task.arguments,
+                '_api_id': api_task.id,
+                '_context_cls': api_task._context_cls,
+                '_executor': executor,
+            }
+        )
+
+        instantiation_kwargs.update(**kwargs)
+
+        return cls(**instantiation_kwargs)
+
 
-class LogBase(ModelMixin):
+class LogBase(mixins.ModelMixin):
 
     __tablename__ = 'log'
 
@@ -435,7 +494,7 @@ class LogBase(ModelMixin):
         return '{name}: {self.msg}'.format(name=name, self=self)
 
 
-class ArgumentBase(ParameterMixin):
+class ArgumentBase(mixins.ParameterMixin):
 
     __tablename__ = 'argument'
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/context/operation.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/context/operation.py b/aria/orchestrator/context/operation.py
index efdc04d..d43b847 100644
--- a/aria/orchestrator/context/operation.py
+++ b/aria/orchestrator/context/operation.py
@@ -18,6 +18,7 @@ Workflow and operation contexts
 """
 
 import threading
+from contextlib import contextmanager
 
 import aria
 from aria.utils import file
@@ -106,6 +107,12 @@ class BaseOperationContext(common.BaseContext):
             self.model.log._session.remove()
             self.model.log._engine.dispose()
 
+    @property
+    @contextmanager
+    def persist_changes(self):
+        yield
+        self.model.task.update(self.task)
+
 
 class NodeOperationContext(BaseOperationContext):
     """

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/context/workflow.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/context/workflow.py b/aria/orchestrator/context/workflow.py
index 920b237..aa5a786 100644
--- a/aria/orchestrator/context/workflow.py
+++ b/aria/orchestrator/context/workflow.py
@@ -20,6 +20,8 @@ Workflow and operation contexts
 import threading
 from contextlib import contextmanager
 
+from networkx import DiGraph
+
 from .exceptions import ContextException
 from .common import BaseContext
 
@@ -41,6 +43,7 @@ class WorkflowContext(BaseContext):
         self._task_max_attempts = task_max_attempts
         self._task_retry_interval = task_retry_interval
         self._task_ignore_failure = task_ignore_failure
+        self._execution_graph = None
         self._register_logger()
 
     def __repr__(self):
@@ -92,6 +95,24 @@ class WorkflowContext(BaseContext):
             }
         )
 
+    @property
+    def _graph(self):
+        if self._execution_graph is None:
+            graph = DiGraph()
+            for task in self.execution.tasks:
+                for dependency in task.dependencies:
+                    graph.add_edge(dependency, task)
+
+            self._execution_graph = graph
+
+        return self._execution_graph
+
+    @property
+    @contextmanager
+    def persist_changes(self):
+        yield
+        self._model.execution.update(self.execution)
+
 
 class _CurrentContext(threading.local):
     """

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflow_runner.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflow_runner.py b/aria/orchestrator/workflow_runner.py
index 848c59b..9e6b3ad 100644
--- a/aria/orchestrator/workflow_runner.py
+++ b/aria/orchestrator/workflow_runner.py
@@ -24,7 +24,7 @@ from datetime import datetime
 from . import exceptions
 from .context.workflow import WorkflowContext
 from .workflows import builtin
-from .workflows.core.engine import Engine
+from .workflows.core import engine, compile
 from .workflows.executor.process import ProcessExecutor
 from ..modeling import models
 from ..modeling import utils as modeling_utils
@@ -70,7 +70,7 @@ class WorkflowRunner(object):
         execution = self._create_execution_model(inputs)
         self._execution_id = execution.id
 
-        workflow_context = WorkflowContext(
+        self._workflow_context = WorkflowContext(
             name=self.__class__.__name__,
             model_storage=self._model_storage,
             resource_storage=resource_storage,
@@ -80,15 +80,17 @@ class WorkflowRunner(object):
             task_max_attempts=task_max_attempts,
             task_retry_interval=task_retry_interval)
 
+        # Set default executor and kwargs
+        executor = executor or ProcessExecutor(plugin_manager=plugin_manager)
+
         # transforming the execution inputs to dict, to pass them to the workflow function
         execution_inputs_dict = dict(inp.unwrapped for inp in self.execution.inputs.values())
-        self._tasks_graph = workflow_fn(ctx=workflow_context, **execution_inputs_dict)
 
-        executor = executor or ProcessExecutor(plugin_manager=plugin_manager)
-        self._engine = Engine(
-            executor=executor,
-            workflow_context=workflow_context,
-            tasks_graph=self._tasks_graph)
+        self._tasks_graph = workflow_fn(ctx=self._workflow_context, **execution_inputs_dict)
+        compile.create_execution_tasks(
+            self._workflow_context, self._tasks_graph, executor.__class__)
+
+        self._engine = engine.Engine(executors={executor.__class__: executor})
 
     @property
     def execution_id(self):
@@ -103,10 +105,10 @@ class WorkflowRunner(object):
         return self._model_storage.service.get(self._service_id)
 
     def execute(self):
-        self._engine.execute()
+        self._engine.execute(ctx=self._workflow_context)
 
     def cancel(self):
-        self._engine.cancel_execution()
+        self._engine.cancel_execution(ctx=self._workflow_context)
 
     def _create_execution_model(self, inputs):
         execution = models.Execution(

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/api/task.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/api/task.py b/aria/orchestrator/workflows/api/task.py
index ca125a8..f7d2c66 100644
--- a/aria/orchestrator/workflows/api/task.py
+++ b/aria/orchestrator/workflows/api/task.py
@@ -140,6 +140,13 @@ class OperationTask(BaseTask):
         self.arguments = modeling_utils.merge_parameter_values(arguments,
                                                                operation.arguments,
                                                                model_cls=models.Argument)
+        if getattr(self.actor, 'outbound_relationships', None) is not None:
+            self._context_cls = context.operation.NodeOperationContext
+        elif getattr(self.actor, 'source_node', None) is not None:
+            self._context_cls = context.operation.RelationshipOperationContext
+        else:
+            raise exceptions.TaskCreationException('Could not locate valid context for '
+                                                   '{actor.__class__}'.format(actor=self.actor))
 
     def __repr__(self):
         return self.name

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/core/__init__.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/core/__init__.py b/aria/orchestrator/workflows/core/__init__.py
index e377153..81db43f 100644
--- a/aria/orchestrator/workflows/core/__init__.py
+++ b/aria/orchestrator/workflows/core/__init__.py
@@ -17,4 +17,4 @@
 Core for the workflow execution mechanism
 """
 
-from . import task, translation, engine
+from . import engine

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/core/compile.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/core/compile.py b/aria/orchestrator/workflows/core/compile.py
new file mode 100644
index 0000000..932268a
--- /dev/null
+++ b/aria/orchestrator/workflows/core/compile.py
@@ -0,0 +1,116 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ....modeling import models
+from .. import executor, api
+
+
+def create_execution_tasks(ctx, task_graph, default_executor):
+    execution = ctx.execution
+    _construct_execution_tasks(execution, task_graph, default_executor)
+    ctx.model.execution.update(execution)
+    return execution.tasks
+
+
+def _construct_execution_tasks(execution,
+                               task_graph,
+                               default_executor,
+                               stub_executor=executor.base.StubTaskExecutor,
+                               start_stub_type=models.Task.START_WORKFLOW,
+                               end_stub_type=models.Task.END_WORKFLOW,
+                               depends_on=()):
+    """
+    Translates the user graph to the execution graph
+    :param task_graph: The user's graph
+    :param start_stub_type: internal use
+    :param end_stub_type: internal use
+    :param depends_on: internal use
+    """
+    depends_on = list(depends_on)
+
+    # Insert start marker
+    start_task = models.Task(execution=execution,
+                             dependencies=depends_on,
+                             _api_id=_start_graph_suffix(task_graph.id),
+                             _stub_type=start_stub_type,
+                             _executor=stub_executor)
+
+    for task in task_graph.topological_order(reverse=True):
+        operation_dependencies = _get_tasks_from_dependencies(
+            execution, task_graph.get_dependencies(task), [start_task])
+
+        if isinstance(task, api.task.OperationTask):
+            models.Task.from_api_task(api_task=task,
+                                      executor=default_executor,
+                                      dependencies=operation_dependencies)
+
+        elif isinstance(task, api.task.WorkflowTask):
+            # Build the graph recursively while adding start and end markers
+            _construct_execution_tasks(
+                execution=execution,
+                task_graph=task,
+                default_executor=default_executor,
+                stub_executor=stub_executor,
+                start_stub_type=models.Task.START_SUBWROFKLOW,
+                end_stub_type=models.Task.END_SUBWORKFLOW,
+                depends_on=operation_dependencies
+            )
+        elif isinstance(task, api.task.StubTask):
+            models.Task(execution=execution,
+                        dependencies=operation_dependencies,
+                        _api_id=task.id,
+                        _executor=stub_executor,
+                        _stub_type=models.Task.STUB,
+                       )
+        else:
+            raise RuntimeError('Undefined state')
+
+    # Insert end marker
+    models.Task(dependencies=_get_non_dependent_tasks(execution) or [start_task],
+                execution=execution,
+                _api_id=_end_graph_suffix(task_graph.id),
+                _executor=stub_executor,
+                _stub_type=end_stub_type)
+
+
+def _start_graph_suffix(api_id):
+    return '{0}-Start'.format(api_id)
+
+
+def _end_graph_suffix(api_id):
+    return '{0}-End'.format(api_id)
+
+
+def _get_non_dependent_tasks(execution):
+    tasks_with_dependencies = set()
+    for task in execution.tasks:
+        tasks_with_dependencies.update(task.dependencies)
+    return list(set(execution.tasks) - set(tasks_with_dependencies))
+
+
+def _get_tasks_from_dependencies(execution, dependencies, default=()):
+    """
+    Returns task list from dependencies.
+    """
+    tasks = []
+    for dependency in dependencies:
+        if getattr(dependency, 'actor', False):
+            # This is
+            dependency_name = dependency.id
+        else:
+            dependency_name = _end_graph_suffix(dependency.id)
+        tasks.extend(task for task in execution.tasks if task._api_id == dependency_name)
+    return tasks or default

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/core/engine.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/core/engine.py b/aria/orchestrator/workflows/core/engine.py
index 3a96804..9f0ddd7 100644
--- a/aria/orchestrator/workflows/core/engine.py
+++ b/aria/orchestrator/workflows/core/engine.py
@@ -20,15 +20,13 @@ The workflow engine. Executes workflows
 import time
 from datetime import datetime
 
-import networkx
-
 from aria import logger
 from aria.modeling import models
 from aria.orchestrator import events
+from aria.orchestrator.context import operation
 
 from .. import exceptions
-from . import task as engine_task
-from . import translation
+from ..executor.base import StubTaskExecutor
 # Import required so all signals are registered
 from . import events_handler  # pylint: disable=unused-import
 
@@ -38,84 +36,105 @@ class Engine(logger.LoggerMixin):
     The workflow engine. Executes workflows
     """
 
-    def __init__(self, executor, workflow_context, tasks_graph, **kwargs):
+    def __init__(self, executors, **kwargs):
         super(Engine, self).__init__(**kwargs)
-        self._workflow_context = workflow_context
-        self._execution_graph = networkx.DiGraph()
-        translation.build_execution_graph(task_graph=tasks_graph,
-                                          execution_graph=self._execution_graph,
-                                          default_executor=executor)
+        self._executors = executors.copy()
+        self._executors.setdefault(StubTaskExecutor, StubTaskExecutor())
 
-    def execute(self):
+    def execute(self, ctx):
         """
         execute the workflow
         """
+        executing_tasks = []
         try:
-            events.start_workflow_signal.send(self._workflow_context)
+            events.start_workflow_signal.send(ctx)
             while True:
-                cancel = self._is_cancel()
+                cancel = self._is_cancel(ctx)
                 if cancel:
                     break
-                for task in self._ended_tasks():
-                    self._handle_ended_tasks(task)
-                for task in self._executable_tasks():
-                    self._handle_executable_task(task)
-                if self._all_tasks_consumed():
+                for task in self._ended_tasks(ctx, executing_tasks):
+                    self._handle_ended_tasks(ctx, task, executing_tasks)
+                for task in self._executable_tasks(ctx):
+                    self._handle_executable_task(ctx, task, executing_tasks)
+                if self._all_tasks_consumed(ctx):
                     break
                 else:
                     time.sleep(0.1)
             if cancel:
-                events.on_cancelled_workflow_signal.send(self._workflow_context)
+                events.on_cancelled_workflow_signal.send(ctx)
             else:
-                events.on_success_workflow_signal.send(self._workflow_context)
+                events.on_success_workflow_signal.send(ctx)
         except BaseException as e:
-            events.on_failure_workflow_signal.send(self._workflow_context, exception=e)
+            events.on_failure_workflow_signal.send(ctx, exception=e)
             raise
 
-    def cancel_execution(self):
+    @staticmethod
+    def cancel_execution(ctx):
         """
         Send a cancel request to the engine. If execution already started, execution status
         will be modified to 'cancelling' status. If execution is in pending mode, execution status
         will be modified to 'cancelled' directly.
         """
-        events.on_cancelling_workflow_signal.send(self._workflow_context)
+        events.on_cancelling_workflow_signal.send(ctx)
 
-    def _is_cancel(self):
-        return self._workflow_context.execution.status in (models.Execution.CANCELLING,
-                                                           models.Execution.CANCELLED)
+    @staticmethod
+    def _is_cancel(ctx):
+        execution = ctx.model.execution.refresh(ctx.execution)
+        return execution.status in (models.Execution.CANCELLING, models.Execution.CANCELLED)
 
-    def _executable_tasks(self):
+    def _executable_tasks(self, ctx):
         now = datetime.utcnow()
-        return (task for task in self._tasks_iter()
-                if task.is_waiting() and
-                task.due_at <= now and
-                not self._task_has_dependencies(task))
-
-    def _ended_tasks(self):
-        return (task for task in self._tasks_iter() if task.has_ended())
+        return (
+            task for task in self._tasks_iter(ctx)
+            if task.is_waiting() and task.due_at <= now and \
+            not self._task_has_dependencies(ctx, task)
+        )
 
-    def _task_has_dependencies(self, task):
-        return len(self._execution_graph.pred.get(task.id, {})) > 0
+    @staticmethod
+    def _ended_tasks(ctx, executing_tasks):
+        for task in executing_tasks:
+            if task.has_ended() and task in ctx._graph:
+                yield task
 
-    def _all_tasks_consumed(self):
-        return len(self._execution_graph.node) == 0
+    @staticmethod
+    def _task_has_dependencies(ctx, task):
+        return len(ctx._graph.pred.get(task, [])) > 0
 
-    def _tasks_iter(self):
-        for _, data in self._execution_graph.nodes_iter(data=True):
-            task = data['task']
-            if isinstance(task, engine_task.OperationTask):
-                if not task.model_task.has_ended():
-                    self._workflow_context.model.task.refresh(task.model_task)
-            yield task
+    @staticmethod
+    def _all_tasks_consumed(ctx):
+        return len(ctx._graph.node) == 0
 
     @staticmethod
-    def _handle_executable_task(task):
-        if isinstance(task, engine_task.OperationTask):
-            events.sent_task_signal.send(task)
-        task.execute()
+    def _tasks_iter(ctx):
+        for task in ctx.execution.tasks:
+            yield ctx.model.task.refresh(task)
+
+    def _handle_executable_task(self, ctx, task, executing_tasks):
+        task_executor = self._executors[task._executor]
+
+        # If the task is a stub, a default context is provided, else it should hold the context cls
+        context_cls = operation.BaseOperationContext if task._stub_type else task._context_cls
+        op_ctx = context_cls(
+            model_storage=ctx.model,
+            resource_storage=ctx.resource,
+            workdir=ctx._workdir,
+            task_id=task.id,
+            actor_id=task.actor.id if task.actor else None,
+            service_id=task.execution.service.id,
+            execution_id=task.execution.id,
+            name=task.name
+        )
+
+        executing_tasks.append(task)
+
+        if not task._stub_type:
+            events.sent_task_signal.send(op_ctx)
+        task_executor.execute(op_ctx)
 
-    def _handle_ended_tasks(self, task):
+    @staticmethod
+    def _handle_ended_tasks(ctx, task, executing_tasks):
+        executing_tasks.remove(task)
         if task.status == models.Task.FAILED and not task.ignore_failure:
             raise exceptions.ExecutorException('Workflow failed')
         else:
-            self._execution_graph.remove_node(task.id)
+            ctx._graph.remove_node(task)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/core/events_handler.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/core/events_handler.py b/aria/orchestrator/workflows/core/events_handler.py
index 669fb43..2d71d2a 100644
--- a/aria/orchestrator/workflows/core/events_handler.py
+++ b/aria/orchestrator/workflows/core/events_handler.py
@@ -30,120 +30,121 @@ from ... import exceptions
 
 
 @events.sent_task_signal.connect
-def _task_sent(task, *args, **kwargs):
-    with task._update():
-        task.status = task.SENT
+def _task_sent(ctx, *args, **kwargs):
+    with ctx.persist_changes:
+        ctx.task.status = ctx.task.SENT
 
 
 @events.start_task_signal.connect
-def _task_started(task, *args, **kwargs):
-    with task._update():
-        task.started_at = datetime.utcnow()
-        task.status = task.STARTED
-    _update_node_state_if_necessary(task, is_transitional=True)
+def _task_started(ctx, *args, **kwargs):
+    with ctx.persist_changes:
+        ctx.task.started_at = datetime.utcnow()
+        ctx.task.status = ctx.task.STARTED
+        _update_node_state_if_necessary(ctx, is_transitional=True)
 
 
 @events.on_failure_task_signal.connect
-def _task_failed(task, exception, *args, **kwargs):
-    with task._update():
+def _task_failed(ctx, exception, *args, **kwargs):
+    with ctx.persist_changes:
         should_retry = all([
             not isinstance(exception, exceptions.TaskAbortException),
-            task.attempts_count < task.max_attempts or task.max_attempts == task.INFINITE_RETRIES,
-            # ignore_failure check here means the task will not be retries and it will be marked
+            ctx.task.attempts_count < ctx.task.max_attempts or
+            ctx.task.max_attempts == ctx.task.INFINITE_RETRIES,
+            # ignore_failure check here means the task will not be retried and it will be marked
             # as failed. The engine will also look at ignore_failure so it won't fail the
             # workflow.
-            not task.ignore_failure
+            not ctx.task.ignore_failure
         ])
         if should_retry:
             retry_interval = None
             if isinstance(exception, exceptions.TaskRetryException):
                 retry_interval = exception.retry_interval
             if retry_interval is None:
-                retry_interval = task.retry_interval
-            task.status = task.RETRYING
-            task.attempts_count += 1
-            task.due_at = datetime.utcnow() + timedelta(seconds=retry_interval)
+                retry_interval = ctx.task.retry_interval
+            ctx.task.status = ctx.task.RETRYING
+            ctx.task.attempts_count += 1
+            ctx.task.due_at = datetime.utcnow() + timedelta(seconds=retry_interval)
         else:
-            task.ended_at = datetime.utcnow()
-            task.status = task.FAILED
+            ctx.task.ended_at = datetime.utcnow()
+            ctx.task.status = ctx.task.FAILED
 
 
 @events.on_success_task_signal.connect
-def _task_succeeded(task, *args, **kwargs):
-    with task._update():
-        task.ended_at = datetime.utcnow()
-        task.status = task.SUCCESS
+def _task_succeeded(ctx, *args, **kwargs):
+    with ctx.persist_changes:
+        ctx.task.ended_at = datetime.utcnow()
+        ctx.task.status = ctx.task.SUCCESS
 
-    _update_node_state_if_necessary(task)
+        _update_node_state_if_necessary(ctx)
 
 
 @events.start_workflow_signal.connect
 def _workflow_started(workflow_context, *args, **kwargs):
-    execution = workflow_context.execution
-    # the execution may already be in the process of cancelling
-    if execution.status in (execution.CANCELLING, execution.CANCELLED):
-        return
-    execution.status = execution.STARTED
-    execution.started_at = datetime.utcnow()
-    workflow_context.execution = execution
+    with workflow_context.persist_changes:
+        execution = workflow_context.execution
+        # the execution may already be in the process of cancelling
+        if execution.status in (execution.CANCELLING, execution.CANCELLED):
+            return
+        execution.status = execution.STARTED
+        execution.started_at = datetime.utcnow()
 
 
 @events.on_failure_workflow_signal.connect
 def _workflow_failed(workflow_context, exception, *args, **kwargs):
-    execution = workflow_context.execution
-    execution.error = str(exception)
-    execution.status = execution.FAILED
-    execution.ended_at = datetime.utcnow()
-    workflow_context.execution = execution
+    with workflow_context.persist_changes:
+        execution = workflow_context.execution
+        execution.error = str(exception)
+        execution.status = execution.FAILED
+        execution.ended_at = datetime.utcnow()
 
 
 @events.on_success_workflow_signal.connect
 def _workflow_succeeded(workflow_context, *args, **kwargs):
-    execution = workflow_context.execution
-    execution.status = execution.SUCCEEDED
-    execution.ended_at = datetime.utcnow()
-    workflow_context.execution = execution
+    with workflow_context.persist_changes:
+        execution = workflow_context.execution
+        execution.status = execution.SUCCEEDED
+        execution.ended_at = datetime.utcnow()
 
 
 @events.on_cancelled_workflow_signal.connect
 def _workflow_cancelled(workflow_context, *args, **kwargs):
-    execution = workflow_context.execution
-    # _workflow_cancelling function may have called this function already
-    if execution.status == execution.CANCELLED:
-        return
-    # the execution may have already been finished
-    elif execution.status in (execution.SUCCEEDED, execution.FAILED):
-        _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, execution.status)
-    else:
-        execution.status = execution.CANCELLED
-        execution.ended_at = datetime.utcnow()
-        workflow_context.execution = execution
+    with workflow_context.persist_changes:
+        execution = workflow_context.execution
+        # _workflow_cancelling function may have called this function already
+        if execution.status == execution.CANCELLED:
+            return
+        # the execution may have already been finished
+        elif execution.status in (execution.SUCCEEDED, execution.FAILED):
+            _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, execution.status)
+        else:
+            execution.status = execution.CANCELLED
+            execution.ended_at = datetime.utcnow()
 
 
 @events.on_cancelling_workflow_signal.connect
 def _workflow_cancelling(workflow_context, *args, **kwargs):
-    execution = workflow_context.execution
-    if execution.status == execution.PENDING:
-        return _workflow_cancelled(workflow_context=workflow_context)
-    # the execution may have already been finished
-    elif execution.status in (execution.SUCCEEDED, execution.FAILED):
-        _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, execution.status)
-    else:
-        execution.status = execution.CANCELLING
-        workflow_context.execution = execution
-
-
-def _update_node_state_if_necessary(task, is_transitional=False):
+    with workflow_context.persist_changes:
+        execution = workflow_context.execution
+        if execution.status == execution.PENDING:
+            return _workflow_cancelled(workflow_context=workflow_context)
+        # the execution may have already been finished
+        elif execution.status in (execution.SUCCEEDED, execution.FAILED):
+            _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, execution.status)
+        else:
+            execution.status = execution.CANCELLING
+
+
+def _update_node_state_if_necessary(ctx, is_transitional=False):
     # TODO: this is not the right way to check! the interface name is arbitrary
     # and also will *never* be the type name
-    model_task = task.model_task
-    node = model_task.node if model_task is not None else None
+    node = ctx.task.node if ctx.task is not None else None
     if (node is not None) and \
-        (task.interface_name in ('Standard', 'tosca.interfaces.node.lifecycle.Standard')):
-        state = node.determine_state(op_name=task.operation_name, is_transitional=is_transitional)
+        (ctx.task.interface_name in ('Standard', 'tosca.interfaces.node.lifecycle.Standard')):
+        state = node.determine_state(op_name=ctx.task.operation_name,
+                                     is_transitional=is_transitional)
         if state:
             node.state = state
-            task.context.model.node.update(node)
+            ctx.model.node.update(node)
 
 
 def _log_tried_to_cancel_execution_but_it_already_ended(workflow_context, status):

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/core/task.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/core/task.py b/aria/orchestrator/workflows/core/task.py
deleted file mode 100644
index d732f09..0000000
--- a/aria/orchestrator/workflows/core/task.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Workflow tasks
-"""
-
-from contextlib import contextmanager
-from datetime import datetime
-from functools import (
-    partial,
-    wraps,
-)
-
-
-from ....modeling import models
-from ...context import operation as operation_context
-from .. import exceptions
-
-
-def _locked(func=None):
-    if func is None:
-        return partial(_locked, func=_locked)
-
-    @wraps(func)
-    def _wrapper(self, value, **kwargs):
-        if self._update_fields is None:
-            raise exceptions.TaskException('Task is not in update mode')
-        return func(self, value, **kwargs)
-    return _wrapper
-
-
-class BaseTask(object):
-    """
-    Base class for Task objects
-    """
-
-    def __init__(self, id, executor, *args, **kwargs):
-        super(BaseTask, self).__init__(*args, **kwargs)
-        self._id = id
-        self._executor = executor
-
-    def execute(self):
-        return self._executor.execute(self)
-
-    @property
-    def id(self):
-        """
-        :return: the task's id
-        """
-        return self._id
-
-
-class StubTask(BaseTask):
-    """
-    Base stub task for marker user tasks that only mark the start/end of a workflow
-    or sub-workflow
-    """
-    STARTED = models.Task.STARTED
-    SUCCESS = models.Task.SUCCESS
-
-    def __init__(self, *args, **kwargs):
-        super(StubTask, self).__init__(*args, **kwargs)
-        self.status = models.Task.PENDING
-        self.due_at = datetime.utcnow()
-
-    def has_ended(self):
-        return self.status == self.SUCCESS
-
-    def is_waiting(self):
-        return not self.has_ended()
-
-
-class StartWorkflowTask(StubTask):
-    """
-    Task marking a workflow start
-    """
-    pass
-
-
-class EndWorkflowTask(StubTask):
-    """
-    Task marking a workflow end
-    """
-    pass
-
-
-class StartSubWorkflowTask(StubTask):
-    """
-    Task marking a subworkflow start
-    """
-    pass
-
-
-class EndSubWorkflowTask(StubTask):
-    """
-    Task marking a subworkflow end
-    """
-    pass
-
-
-class OperationTask(BaseTask):
-    """
-    Operation task
-    """
-    def __init__(self, api_task, *args, **kwargs):
-        # If no executor is provided, we infer that this is an empty task which does not need to be
-        # executed.
-        super(OperationTask, self).__init__(id=api_task.id, *args, **kwargs)
-        self._workflow_context = api_task._workflow_context
-        self.interface_name = api_task.interface_name
-        self.operation_name = api_task.operation_name
-        model_storage = api_task._workflow_context.model
-
-        actor = getattr(api_task.actor, '_wrapped', api_task.actor)
-
-        base_task_model = model_storage.task.model_cls
-        if isinstance(actor, models.Node):
-            context_cls = operation_context.NodeOperationContext
-            create_task_model = base_task_model.for_node
-        elif isinstance(actor, models.Relationship):
-            context_cls = operation_context.RelationshipOperationContext
-            create_task_model = base_task_model.for_relationship
-        else:
-            raise RuntimeError('No operation context could be created for {actor.model_cls}'
-                               .format(actor=actor))
-
-        task_model = create_task_model(
-            name=api_task.name,
-            actor=actor,
-            status=base_task_model.PENDING,
-            max_attempts=api_task.max_attempts,
-            retry_interval=api_task.retry_interval,
-            ignore_failure=api_task.ignore_failure,
-            execution=self._workflow_context.execution,
-
-            # Only non-stub tasks have these fields
-            plugin=api_task.plugin,
-            function=api_task.function,
-            arguments=api_task.arguments
-        )
-        self._workflow_context.model.task.put(task_model)
-
-        self._ctx = context_cls(name=api_task.name,
-                                model_storage=self._workflow_context.model,
-                                resource_storage=self._workflow_context.resource,
-                                service_id=self._workflow_context._service_id,
-                                task_id=task_model.id,
-                                actor_id=actor.id,
-                                execution_id=self._workflow_context._execution_id,
-                                workdir=self._workflow_context._workdir)
-        self._task_id = task_model.id
-        self._update_fields = None
-
-    @contextmanager
-    def _update(self):
-        """
-        A context manager which puts the task into update mode, enabling fields update.
-        :yields: None
-        """
-        self._update_fields = {}
-        try:
-            yield
-            for key, value in self._update_fields.items():
-                setattr(self.model_task, key, value)
-            self.model_task = self.model_task
-        finally:
-            self._update_fields = None
-
-    @property
-    def model_task(self):
-        """
-        Returns the task model in storage
-        :return: task in storage
-        """
-        return self._workflow_context.model.task.get(self._task_id)
-
-    @model_task.setter
-    def model_task(self, value):
-        self._workflow_context.model.task.put(value)
-
-    @property
-    def context(self):
-        """
-        Contexts for the operation
-        :return:
-        """
-        return self._ctx
-
-    @property
-    def status(self):
-        """
-        Returns the task status
-        :return: task status
-        """
-        return self.model_task.status
-
-    @status.setter
-    @_locked
-    def status(self, value):
-        self._update_fields['status'] = value
-
-    @property
-    def started_at(self):
-        """
-        Returns when the task started
-        :return: when task started
-        """
-        return self.model_task.started_at
-
-    @started_at.setter
-    @_locked
-    def started_at(self, value):
-        self._update_fields['started_at'] = value
-
-    @property
-    def ended_at(self):
-        """
-        Returns when the task ended
-        :return: when task ended
-        """
-        return self.model_task.ended_at
-
-    @ended_at.setter
-    @_locked
-    def ended_at(self, value):
-        self._update_fields['ended_at'] = value
-
-    @property
-    def attempts_count(self):
-        """
-        Returns the attempts count for the task
-        :return: attempts count
-        """
-        return self.model_task.attempts_count
-
-    @attempts_count.setter
-    @_locked
-    def attempts_count(self, value):
-        self._update_fields['attempts_count'] = value
-
-    @property
-    def due_at(self):
-        """
-        Returns the minimum datetime in which the task can be executed
-        :return: eta
-        """
-        return self.model_task.due_at
-
-    @due_at.setter
-    @_locked
-    def due_at(self, value):
-        self._update_fields['due_at'] = value
-
-    def __getattr__(self, attr):
-        try:
-            return getattr(self.model_task, attr)
-        except AttributeError:
-            return super(OperationTask, self).__getattribute__(attr)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/core/translation.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/core/translation.py b/aria/orchestrator/workflows/core/translation.py
deleted file mode 100644
index fec108b..0000000
--- a/aria/orchestrator/workflows/core/translation.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Translation of user graph's API to the execution graph
-"""
-
-from .. import api
-from ..executor import base
-from . import task as core_task
-
-
-def build_execution_graph(
-        task_graph,
-        execution_graph,
-        default_executor,
-        start_cls=core_task.StartWorkflowTask,
-        end_cls=core_task.EndWorkflowTask,
-        depends_on=()):
-    """
-    Translates the user graph to the execution graph
-    :param task_graph: The user's graph
-    :param workflow_context: The workflow
-    :param execution_graph: The execution graph that is being built
-    :param start_cls: internal use
-    :param end_cls: internal use
-    :param depends_on: internal use
-    """
-    # Insert start marker
-    start_task = start_cls(id=_start_graph_suffix(task_graph.id), executor=base.StubTaskExecutor())
-    _add_task_and_dependencies(execution_graph, start_task, depends_on)
-
-    for api_task in task_graph.topological_order(reverse=True):
-        dependencies = task_graph.get_dependencies(api_task)
-        operation_dependencies = _get_tasks_from_dependencies(
-            execution_graph, dependencies, default=[start_task])
-
-        if isinstance(api_task, api.task.OperationTask):
-            operation_task = core_task.OperationTask(api_task, executor=default_executor)
-            _add_task_and_dependencies(execution_graph, operation_task, operation_dependencies)
-        elif isinstance(api_task, api.task.WorkflowTask):
-            # Build the graph recursively while adding start and end markers
-            build_execution_graph(
-                task_graph=api_task,
-                execution_graph=execution_graph,
-                default_executor=default_executor,
-                start_cls=core_task.StartSubWorkflowTask,
-                end_cls=core_task.EndSubWorkflowTask,
-                depends_on=operation_dependencies
-            )
-        elif isinstance(api_task, api.task.StubTask):
-            stub_task = core_task.StubTask(id=api_task.id, executor=base.StubTaskExecutor())
-            _add_task_and_dependencies(execution_graph, stub_task, operation_dependencies)
-        else:
-            raise RuntimeError('Undefined state')
-
-    # Insert end marker
-    workflow_dependencies = _get_tasks_from_dependencies(
-        execution_graph,
-        _get_non_dependency_tasks(task_graph),
-        default=[start_task])
-    end_task = end_cls(id=_end_graph_suffix(task_graph.id), executor=base.StubTaskExecutor())
-    _add_task_and_dependencies(execution_graph, end_task, workflow_dependencies)
-
-
-def _add_task_and_dependencies(execution_graph, operation_task, operation_dependencies=()):
-    execution_graph.add_node(operation_task.id, task=operation_task)
-    for dependency in operation_dependencies:
-        execution_graph.add_edge(dependency.id, operation_task.id)
-
-
-def _get_tasks_from_dependencies(execution_graph, dependencies, default=()):
-    """
-    Returns task list from dependencies.
-    """
-    tasks = []
-    for dependency in dependencies:
-        if isinstance(dependency, (api.task.OperationTask, api.task.StubTask)):
-            dependency_id = dependency.id
-        else:
-            dependency_id = _end_graph_suffix(dependency.id)
-        tasks.append(execution_graph.node[dependency_id]['task'])
-    return tasks or default
-
-
-def _start_graph_suffix(id):
-    return '{0}-Start'.format(id)
-
-
-def _end_graph_suffix(id):
-    return '{0}-End'.format(id)
-
-
-def _get_non_dependency_tasks(graph):
-    for task in graph.tasks:
-        if len(list(graph.get_dependents(task))) == 0:
-            yield task

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/events_logging.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/events_logging.py b/aria/orchestrator/workflows/events_logging.py
index 036c1f7..4cee867 100644
--- a/aria/orchestrator/workflows/events_logging.py
+++ b/aria/orchestrator/workflows/events_logging.py
@@ -34,31 +34,32 @@ def _get_task_name(task):
 
 
 @events.start_task_signal.connect
-def _start_task_handler(task, **kwargs):
+def _start_task_handler(ctx, **kwargs):
     # If the task has no function this is an empty task.
-    if task.function:
+    if ctx.task.function:
         suffix = 'started...'
-        logger = task.context.logger.info
+        logger = ctx.logger.info
     else:
         suffix = 'has no implementation'
-        logger = task.context.logger.debug
+        logger = ctx.logger.debug
 
     logger('{name} {task.interface_name}.{task.operation_name} {suffix}'.format(
-        name=_get_task_name(task), task=task, suffix=suffix))
+        name=_get_task_name(ctx.task), task=ctx.task, suffix=suffix))
+
 
 @events.on_success_task_signal.connect
-def _success_task_handler(task, **kwargs):
-    if not task.function:
+def _success_task_handler(ctx, **kwargs):
+    if not ctx.task.function:
         return
-    task.context.logger.info('{name} {task.interface_name}.{task.operation_name} successful'
-                             .format(name=_get_task_name(task), task=task))
+    ctx.logger.info('{name} {task.interface_name}.{task.operation_name} successful'
+                    .format(name=_get_task_name(ctx.task), task=ctx.task))
 
 
 @events.on_failure_task_signal.connect
-def _failure_operation_handler(task, traceback, **kwargs):
-    task.context.logger.error(
+def _failure_operation_handler(ctx, traceback, **kwargs):
+    ctx.logger.error(
         '{name} {task.interface_name}.{task.operation_name} failed'
-        .format(name=_get_task_name(task), task=task), extra=dict(traceback=traceback)
+        .format(name=_get_task_name(ctx.task), task=ctx.task), extra=dict(traceback=traceback)
     )
 
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/executor/base.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/executor/base.py b/aria/orchestrator/workflows/executor/base.py
index 7fece6f..6a3c9d2 100644
--- a/aria/orchestrator/workflows/executor/base.py
+++ b/aria/orchestrator/workflows/executor/base.py
@@ -28,19 +28,19 @@ class BaseExecutor(logger.LoggerMixin):
     def _execute(self, task):
         raise NotImplementedError
 
-    def execute(self, task):
+    def execute(self, ctx):
         """
         Execute a task
         :param task: task to execute
         """
-        if task.function:
-            self._execute(task)
+        if ctx.task.function:
+            self._execute(ctx)
         else:
             # In this case the task is missing a function. This task still gets to an
-            # executor, but since there is nothing to run, we by default simply skip the execution
-            # itself.
-            self._task_started(task)
-            self._task_succeeded(task)
+            # executor, but since there is nothing to run, we by default simply skip the
+            # execution itself.
+            self._task_started(ctx)
+            self._task_succeeded(ctx)
 
     def close(self):
         """
@@ -49,18 +49,19 @@ class BaseExecutor(logger.LoggerMixin):
         pass
 
     @staticmethod
-    def _task_started(task):
-        events.start_task_signal.send(task)
+    def _task_started(ctx):
+        events.start_task_signal.send(ctx)
 
     @staticmethod
-    def _task_failed(task, exception, traceback=None):
-        events.on_failure_task_signal.send(task, exception=exception, traceback=traceback)
+    def _task_failed(ctx, exception, traceback=None):
+        events.on_failure_task_signal.send(ctx, exception=exception, traceback=traceback)
 
     @staticmethod
-    def _task_succeeded(task):
-        events.on_success_task_signal.send(task)
+    def _task_succeeded(ctx):
+        events.on_success_task_signal.send(ctx)
 
 
 class StubTaskExecutor(BaseExecutor):                                                               # pylint: disable=abstract-method
-    def execute(self, task):
-        task.status = task.SUCCESS
+    def execute(self, ctx, *args, **kwargs):
+        with ctx.persist_changes:
+            ctx.task.status = ctx.task.SUCCESS

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/executor/dry.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/executor/dry.py b/aria/orchestrator/workflows/executor/dry.py
index 72080b4..9d86125 100644
--- a/aria/orchestrator/workflows/executor/dry.py
+++ b/aria/orchestrator/workflows/executor/dry.py
@@ -18,37 +18,36 @@ Dry executor
 """
 from datetime import datetime
 
-from .base import BaseExecutor
+from . import base
 
 
-class DryExecutor(BaseExecutor):                                                                    # pylint: disable=abstract-method
+class DryExecutor(base.BaseExecutor):                                                                    # pylint: disable=abstract-method
     """
     Executor which dry runs tasks - prints task information without causing any side effects
     """
-    def execute(self, task):
-        # updating the task manually instead of calling self._task_started(task),
-        # to avoid any side effects raising that event might cause
-        with task._update():
-            task.started_at = datetime.utcnow()
-            task.status = task.STARTED
-
-        dry_msg = '<dry> {name} {task.interface_name}.{task.operation_name} {suffix}'
-        logger = task.context.logger.info if task.function else task.context.logger.debug
-
-        if hasattr(task.actor, 'source_node'):
-            name = '{source_node.name}->{target_node.name}'.format(
-                source_node=task.actor.source_node, target_node=task.actor.target_node)
-        else:
-            name = task.actor.name
-
-        if task.function:
-            logger(dry_msg.format(name=name, task=task, suffix='started...'))
-            logger(dry_msg.format(name=name, task=task, suffix='successful'))
-        else:
-            logger(dry_msg.format(name=name, task=task, suffix='has no implementation'))
-
-        # updating the task manually instead of calling self._task_succeeded(task),
-        # to avoid any side effects raising that event might cause
-        with task._update():
-            task.ended_at = datetime.utcnow()
-            task.status = task.SUCCESS
+    def execute(self, ctx):
+        with ctx.persist_changes:
+            # updating the task manually instead of calling self._task_started(task),
+            # to avoid any side effects raising that event might cause
+            ctx.task.started_at = datetime.utcnow()
+            ctx.task.status = ctx.task.STARTED
+
+            dry_msg = '<dry> {name} {task.interface_name}.{task.operation_name} {suffix}'
+            logger = ctx.logger.info if ctx.task.function else ctx.logger.debug
+
+            if hasattr(ctx.task.actor, 'source_node'):
+                name = '{source_node.name}->{target_node.name}'.format(
+                    source_node=ctx.task.actor.source_node, target_node=ctx.task.actor.target_node)
+            else:
+                name = ctx.task.actor.name
+
+            if ctx.task.function:
+                logger(dry_msg.format(name=name, task=ctx.task, suffix='started...'))
+                logger(dry_msg.format(name=name, task=ctx.task, suffix='successful'))
+            else:
+                logger(dry_msg.format(name=name, task=ctx.task, suffix='has no implementation'))
+
+            # updating the task manually instead of calling self._task_succeeded(task),
+            # to avoid any side effects raising that event might cause
+            ctx.task.ended_at = datetime.utcnow()
+            ctx.task.status = ctx.task.SUCCESS

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/executor/process.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/executor/process.py b/aria/orchestrator/workflows/executor/process.py
index 634f1f2..8518b33 100644
--- a/aria/orchestrator/workflows/executor/process.py
+++ b/aria/orchestrator/workflows/executor/process.py
@@ -113,17 +113,17 @@ class ProcessExecutor(base.BaseExecutor):
         self._server_socket.close()
         self._listener_thread.join(timeout=60)
 
-    def _execute(self, task):
+    def _execute(self, ctx):
         self._check_closed()
-        self._tasks[task.id] = task
+        self._tasks[ctx.task.id] = ctx
 
         # Temporary file used to pass arguments to the started subprocess
         file_descriptor, arguments_json_path = tempfile.mkstemp(prefix='executor-', suffix='.json')
         os.close(file_descriptor)
         with open(arguments_json_path, 'wb') as f:
-            f.write(pickle.dumps(self._create_arguments_dict(task)))
+            f.write(pickle.dumps(self._create_arguments_dict(ctx)))
 
-        env = self._construct_subprocess_env(task=task)
+        env = self._construct_subprocess_env(task=ctx.task)
         # Asynchronously start the operation in a subprocess
         subprocess.Popen(
             '{0} {1} {2}'.format(sys.executable, __file__, arguments_json_path),
@@ -137,13 +137,13 @@ class ProcessExecutor(base.BaseExecutor):
         if self._stopped:
             raise RuntimeError('Executor closed')
 
-    def _create_arguments_dict(self, task):
+    def _create_arguments_dict(self, ctx):
         return {
-            'task_id': task.id,
-            'function': task.function,
-            'operation_arguments': dict(arg.unwrapped for arg in task.arguments.values()),
+            'task_id': ctx.task.id,
+            'function': ctx.task.function,
+            'operation_arguments': dict(arg.unwrapped for arg in ctx.task.arguments.values()),
             'port': self._server_port,
-            'context': task.context.serialization_dict,
+            'context': ctx.serialization_dict,
         }
 
     def _construct_subprocess_env(self, task):

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/aria/orchestrator/workflows/executor/thread.py
----------------------------------------------------------------------
diff --git a/aria/orchestrator/workflows/executor/thread.py b/aria/orchestrator/workflows/executor/thread.py
index 56a56a5..8c447b6 100644
--- a/aria/orchestrator/workflows/executor/thread.py
+++ b/aria/orchestrator/workflows/executor/thread.py
@@ -46,8 +46,8 @@ class ThreadExecutor(BaseExecutor):
             thread.start()
             self._pool.append(thread)
 
-    def _execute(self, task):
-        self._queue.put(task)
+    def _execute(self, ctx):
+        self._queue.put(ctx)
 
     def close(self):
         self._stopped = True
@@ -57,15 +57,15 @@ class ThreadExecutor(BaseExecutor):
     def _processor(self):
         while not self._stopped:
             try:
-                task = self._queue.get(timeout=1)
-                self._task_started(task)
+                ctx = self._queue.get(timeout=1)
+                self._task_started(ctx)
                 try:
-                    task_func = imports.load_attribute(task.function)
-                    arguments = dict(arg.unwrapped for arg in task.arguments.values())
-                    task_func(ctx=task.context, **arguments)
-                    self._task_succeeded(task)
+                    task_func = imports.load_attribute(ctx.task.function)
+                    arguments = dict(arg.unwrapped for arg in ctx.task.arguments.values())
+                    task_func(ctx=ctx, **arguments)
+                    self._task_succeeded(ctx)
                 except BaseException as e:
-                    self._task_failed(task,
+                    self._task_failed(ctx,
                                       exception=e,
                                       traceback=exceptions.get_exception_as_string(*sys.exc_info()))
             # Daemon threads

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/__init__.py
----------------------------------------------------------------------
diff --git a/tests/__init__.py b/tests/__init__.py
index d2858d2..ace30c8 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -15,4 +15,6 @@
 
 import os
 
+from . import storage, mock
+
 ROOT_DIR = os.path.dirname(os.path.dirname(__file__))

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/context/__init__.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/__init__.py b/tests/orchestrator/context/__init__.py
index 4fde0a7..086a066 100644
--- a/tests/orchestrator/context/__init__.py
+++ b/tests/orchestrator/context/__init__.py
@@ -15,7 +15,7 @@
 
 import sys
 
-from aria.orchestrator.workflows.core import engine
+from aria.orchestrator.workflows.core import engine, compile
 
 
 def op_path(func, module_path=None):
@@ -25,5 +25,8 @@ def op_path(func, module_path=None):
 
 def execute(workflow_func, workflow_context, executor):
     graph = workflow_func(ctx=workflow_context)
-    eng = engine.Engine(executor=executor, workflow_context=workflow_context, tasks_graph=graph)
-    eng.execute()
+
+    compile.create_execution_tasks(workflow_context, graph, executor.__class__)
+    eng = engine.Engine(executors={executor.__class__: executor})
+
+    eng.execute(workflow_context)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/context/test_operation.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_operation.py b/tests/orchestrator/context/test_operation.py
index 3dcfaa2..9550d12 100644
--- a/tests/orchestrator/context/test_operation.py
+++ b/tests/orchestrator/context/test_operation.py
@@ -50,21 +50,12 @@ def ctx(tmpdir):
 
 
 @pytest.fixture
-def process_executor():
-    ex = process.ProcessExecutor(**dict(python_path=tests.ROOT_DIR))
-    try:
-        yield ex
-    finally:
-        ex.close()
-
-
-@pytest.fixture
 def thread_executor():
-    ex = thread.ThreadExecutor()
+    result = thread.ThreadExecutor()
     try:
-        yield ex
+        yield result
     finally:
-        ex.close()
+        result.close()
 
 
 @pytest.fixture
@@ -266,12 +257,12 @@ def test_plugin_workdir(ctx, thread_executor, tmpdir):
     (process.ProcessExecutor, {'python_path': [tests.ROOT_DIR]}),
 ])
 def executor(request):
-    executor_cls, executor_kwargs = request.param
-    result = executor_cls(**executor_kwargs)
+    ex_cls, kwargs = request.param
+    ex = ex_cls(**kwargs)
     try:
-        yield result
+        yield ex
     finally:
-        result.close()
+        ex.close()
 
 
 def test_node_operation_logging(ctx, executor):
@@ -304,7 +295,6 @@ def test_node_operation_logging(ctx, executor):
                 arguments=arguments
             )
         )
-
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=executor)
     _assert_loggins(ctx, arguments)
 
@@ -418,7 +408,7 @@ def _assert_loggins(ctx, arguments):
     assert len(executions) == 1
     execution = executions[0]
 
-    tasks = ctx.model.task.list()
+    tasks = ctx.model.task.list(filters={'_stub_type': None})
     assert len(tasks) == 1
     task = tasks[0]
     assert len(task.logs) == 4

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/context/test_serialize.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_serialize.py b/tests/orchestrator/context/test_serialize.py
index 0919e81..5db5b63 100644
--- a/tests/orchestrator/context/test_serialize.py
+++ b/tests/orchestrator/context/test_serialize.py
@@ -16,7 +16,7 @@
 import pytest
 
 from aria.orchestrator.workflows import api
-from aria.orchestrator.workflows.core import engine
+from aria.orchestrator.workflows.core import engine, compile
 from aria.orchestrator.workflows.executor import process
 from aria.orchestrator import workflow, operation
 import tests
@@ -48,15 +48,15 @@ def test_serialize_operation_context(context, executor, tmpdir):
     context.model.node.update(node)
 
     graph = _mock_workflow(ctx=context)  # pylint: disable=no-value-for-parameter
-    eng = engine.Engine(executor=executor, workflow_context=context, tasks_graph=graph)
-    eng.execute()
+    compile.create_execution_tasks(context, graph, executor.__class__)
+    eng = engine.Engine({executor.__class__: executor})
+    eng.execute(context)
 
 
 @workflow
 def _mock_workflow(ctx, graph):
     node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
-    task = api.task.OperationTask(node, interface_name='test', operation_name='op')
-    graph.add_tasks(task)
+    graph.add_tasks(api.task.OperationTask(node, interface_name='test', operation_name='op'))
     return graph
 
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/execution_plugin/test_local.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/execution_plugin/test_local.py b/tests/orchestrator/execution_plugin/test_local.py
index f667460..1695320 100644
--- a/tests/orchestrator/execution_plugin/test_local.py
+++ b/tests/orchestrator/execution_plugin/test_local.py
@@ -28,7 +28,7 @@ from aria.orchestrator.execution_plugin.exceptions import ProcessException
 from aria.orchestrator.execution_plugin import local
 from aria.orchestrator.execution_plugin import constants
 from aria.orchestrator.workflows.executor import process
-from aria.orchestrator.workflows.core import engine
+from aria.orchestrator.workflows.core import engine, compile
 
 from tests import mock
 from tests import storage
@@ -500,11 +500,9 @@ if __name__ == '__main__':
                 arguments=arguments))
             return graph
         tasks_graph = mock_workflow(ctx=workflow_context)  # pylint: disable=no-value-for-parameter
-        eng = engine.Engine(
-            executor=executor,
-            workflow_context=workflow_context,
-            tasks_graph=tasks_graph)
-        eng.execute()
+        compile.create_execution_tasks(workflow_context, tasks_graph, executor.__class__)
+        eng = engine.Engine({executor.__class__: executor})
+        eng.execute(workflow_context)
         return workflow_context.model.node.get_by_name(
             mock.models.DEPENDENCY_NODE_NAME).attributes
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/execution_plugin/test_ssh.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/execution_plugin/test_ssh.py b/tests/orchestrator/execution_plugin/test_ssh.py
index 8c4dd2d..fb1dc09 100644
--- a/tests/orchestrator/execution_plugin/test_ssh.py
+++ b/tests/orchestrator/execution_plugin/test_ssh.py
@@ -29,7 +29,7 @@ from aria.orchestrator import events
 from aria.orchestrator import workflow
 from aria.orchestrator.workflows import api
 from aria.orchestrator.workflows.executor import process
-from aria.orchestrator.workflows.core import engine
+from aria.orchestrator.workflows.core import engine, compile
 from aria.orchestrator.workflows.exceptions import ExecutorException
 from aria.orchestrator.exceptions import TaskAbortException, TaskRetryException
 from aria.orchestrator.execution_plugin import operations
@@ -254,11 +254,10 @@ class TestWithActualSSHServer(object):
             graph.sequence(*ops)
             return graph
         tasks_graph = mock_workflow(ctx=self._workflow_context)  # pylint: disable=no-value-for-parameter
-        eng = engine.Engine(
-            executor=self._executor,
-            workflow_context=self._workflow_context,
-            tasks_graph=tasks_graph)
-        eng.execute()
+        compile.create_execution_tasks(
+            self._workflow_context, tasks_graph, self._executor.__class__)
+        eng = engine.Engine({self._executor.__class__: self._executor})
+        eng.execute(self._workflow_context)
         return self._workflow_context.model.node.get_by_name(
             mock.models.DEPENDENCY_NODE_NAME).attributes
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/test_workflow_runner.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/test_workflow_runner.py b/tests/orchestrator/test_workflow_runner.py
index c5a62ae..40f9035 100644
--- a/tests/orchestrator/test_workflow_runner.py
+++ b/tests/orchestrator/test_workflow_runner.py
@@ -96,20 +96,20 @@ def test_default_executor(request):
     # validates the ProcessExecutor is used by the workflow runner by default
     mock_workflow = _setup_mock_workflow_in_service(request)
 
-    with mock.patch('aria.orchestrator.workflow_runner.Engine') as mock_engine_cls:
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine') as mock_engine_cls:
         _create_workflow_runner(request, mock_workflow)
         _, engine_kwargs = mock_engine_cls.call_args
-        assert isinstance(engine_kwargs.get('executor'), ProcessExecutor)
+        assert isinstance(engine_kwargs.get('executors').values()[0], ProcessExecutor)
 
 
 def test_custom_executor(request):
     mock_workflow = _setup_mock_workflow_in_service(request)
 
     custom_executor = mock.MagicMock()
-    with mock.patch('aria.orchestrator.workflow_runner.Engine') as mock_engine_cls:
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine') as mock_engine_cls:
         _create_workflow_runner(request, mock_workflow, executor=custom_executor)
         _, engine_kwargs = mock_engine_cls.call_args
-        assert engine_kwargs.get('executor') == custom_executor
+        assert engine_kwargs.get('executors').values()[0] == custom_executor
 
 
 def test_task_configuration_parameters(request):
@@ -117,48 +117,47 @@ def test_task_configuration_parameters(request):
 
     task_max_attempts = 5
     task_retry_interval = 7
-    with mock.patch('aria.orchestrator.workflow_runner.Engine') as mock_engine_cls:
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine.execute') as \
+            mock_engine_execute:
         _create_workflow_runner(request, mock_workflow, task_max_attempts=task_max_attempts,
-                                task_retry_interval=task_retry_interval)
-        _, engine_kwargs = mock_engine_cls.call_args
-        assert engine_kwargs['workflow_context']._task_max_attempts == task_max_attempts
-        assert engine_kwargs['workflow_context']._task_retry_interval == task_retry_interval
+                                task_retry_interval=task_retry_interval).execute()
+        _, engine_kwargs = mock_engine_execute.call_args
+        assert engine_kwargs['ctx']._task_max_attempts == task_max_attempts
+        assert engine_kwargs['ctx']._task_retry_interval == task_retry_interval
 
 
 def test_execute(request, service):
     mock_workflow = _setup_mock_workflow_in_service(request)
 
     mock_engine = mock.MagicMock()
-    with mock.patch('aria.orchestrator.workflow_runner.Engine', return_value=mock_engine) \
-            as mock_engine_cls:
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine.execute',
+                    return_value=mock_engine) as mock_engine_execute:
         workflow_runner = _create_workflow_runner(request, mock_workflow)
+        workflow_runner.execute()
 
-        _, engine_kwargs = mock_engine_cls.call_args
-        assert engine_kwargs['workflow_context'].service.id == service.id
-        assert engine_kwargs['workflow_context'].execution.workflow_name == 'test_workflow'
+        _, engine_kwargs = mock_engine_execute.call_args
+        assert engine_kwargs['ctx'].service.id == service.id
+        assert engine_kwargs['ctx'].execution.workflow_name == 'test_workflow'
 
-        workflow_runner.execute()
-        mock_engine.execute.assert_called_once_with()
+        mock_engine_execute.assert_called_once_with(ctx=workflow_runner._workflow_context)
 
 
 def test_cancel_execution(request):
     mock_workflow = _setup_mock_workflow_in_service(request)
 
     mock_engine = mock.MagicMock()
-    with mock.patch('aria.orchestrator.workflow_runner.Engine', return_value=mock_engine):
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine', return_value=mock_engine):
         workflow_runner = _create_workflow_runner(request, mock_workflow)
         workflow_runner.cancel()
-        mock_engine.cancel_execution.assert_called_once_with()
+        mock_engine.cancel_execution.assert_called_once_with(ctx=workflow_runner._workflow_context)
 
 
 def test_execution_model_creation(request, service, model):
     mock_workflow = _setup_mock_workflow_in_service(request)
 
-    with mock.patch('aria.orchestrator.workflow_runner.Engine') as mock_engine_cls:
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine'):
         workflow_runner = _create_workflow_runner(request, mock_workflow)
 
-        _, engine_kwargs = mock_engine_cls.call_args
-        assert engine_kwargs['workflow_context'].execution == workflow_runner.execution
         assert model.execution.get(workflow_runner.execution.id) == workflow_runner.execution
         assert workflow_runner.execution.service.id == service.id
         assert workflow_runner.execution.workflow_name == mock_workflow
@@ -173,7 +172,7 @@ def test_execution_inputs_override_workflow_inputs(request):
         inputs=dict((name, models.Input.wrap(name, val)) for name, val
                     in wf_inputs.iteritems()))
 
-    with mock.patch('aria.orchestrator.workflow_runner.Engine'):
+    with mock.patch('aria.orchestrator.workflow_runner.engine.Engine'):
         workflow_runner = _create_workflow_runner(
             request, mock_workflow, inputs={'input2': 'overriding-value2', 'input3': 7})
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/workflows/core/test_engine.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/workflows/core/test_engine.py b/tests/orchestrator/workflows/core/test_engine.py
index 0438544..b77d284 100644
--- a/tests/orchestrator/workflows/core/test_engine.py
+++ b/tests/orchestrator/workflows/core/test_engine.py
@@ -28,7 +28,7 @@ from aria.orchestrator.workflows import (
     api,
     exceptions,
 )
-from aria.orchestrator.workflows.core import engine
+from aria.orchestrator.workflows.core import engine, compile
 from aria.orchestrator.workflows.executor import thread
 
 from tests import mock, storage
@@ -44,15 +44,15 @@ class BaseTest(object):
         eng = cls._engine(workflow_func=workflow_func,
                           workflow_context=workflow_context,
                           executor=executor)
-        eng.execute()
+        eng.execute(ctx=workflow_context)
         return eng
 
     @staticmethod
     def _engine(workflow_func, workflow_context, executor):
         graph = workflow_func(ctx=workflow_context)
-        return engine.Engine(executor=executor,
-                             workflow_context=workflow_context,
-                             tasks_graph=graph)
+        compile.create_execution_tasks(workflow_context, graph, executor.__class__)
+
+        return engine.Engine(executors={executor.__class__: executor})
 
     @staticmethod
     def _create_interface(ctx, func, arguments=None):
@@ -98,9 +98,10 @@ class BaseTest(object):
 
     @pytest.fixture(autouse=True)
     def signals_registration(self, ):
-        def sent_task_handler(*args, **kwargs):
-            calls = global_test_holder.setdefault('sent_task_signal_calls', 0)
-            global_test_holder['sent_task_signal_calls'] = calls + 1
+        def sent_task_handler(ctx, *args, **kwargs):
+            if ctx.task._stub_type is None:
+                calls = global_test_holder.setdefault('sent_task_signal_calls', 0)
+                global_test_holder['sent_task_signal_calls'] = calls + 1
 
         def start_workflow_handler(workflow_context, *args, **kwargs):
             workflow_context.states.append('start')
@@ -255,10 +256,10 @@ class TestCancel(BaseTest):
         eng = self._engine(workflow_func=mock_workflow,
                            workflow_context=workflow_context,
                            executor=executor)
-        t = threading.Thread(target=eng.execute)
+        t = threading.Thread(target=eng.execute, kwargs=dict(ctx=workflow_context))
         t.start()
         time.sleep(10)
-        eng.cancel_execution()
+        eng.cancel_execution(workflow_context)
         t.join(timeout=60) # we need to give this a *lot* of time because Travis can be *very* slow
         assert not t.is_alive() # if join is timed out it will not raise an exception
         assert workflow_context.states == ['start', 'cancel']
@@ -277,7 +278,7 @@ class TestCancel(BaseTest):
         eng = self._engine(workflow_func=mock_workflow,
                            workflow_context=workflow_context,
                            executor=executor)
-        eng.cancel_execution()
+        eng.cancel_execution(workflow_context)
         execution = workflow_context.execution
         assert execution.status == models.Execution.CANCELLED
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/workflows/core/test_events.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/workflows/core/test_events.py b/tests/orchestrator/workflows/core/test_events.py
index 6d542e9..2b82443 100644
--- a/tests/orchestrator/workflows/core/test_events.py
+++ b/tests/orchestrator/workflows/core/test_events.py
@@ -15,12 +15,13 @@
 
 import pytest
 
-from tests import mock, storage
-from aria.modeling.service_instance import NodeBase
 from aria.orchestrator.decorators import operation, workflow
-from aria.orchestrator.workflows.core import engine
+from aria.orchestrator.workflows.core import engine, compile
 from aria.orchestrator.workflows.executor.thread import ThreadExecutor
 from aria.orchestrator.workflows import api
+from aria.modeling.service_instance import NodeBase
+
+from tests import mock, storage
 
 global_test_dict = {}  # used to capture transitional node state changes
 
@@ -112,14 +113,13 @@ def run_operation_on_node(ctx, op_name, interface_name):
         operation_name=op_name,
         operation_kwargs=dict(function='{name}.{func.__name__}'.format(name=__name__, func=func)))
     node.interfaces[interface.name] = interface
+    compile.create_execution_tasks(
+        ctx,
+        single_operation_workflow(ctx, node=node, interface_name=interface_name, op_name=op_name),
+        ThreadExecutor)
 
-    eng = engine.Engine(executor=ThreadExecutor(),
-                        workflow_context=ctx,
-                        tasks_graph=single_operation_workflow(ctx=ctx,  # pylint: disable=no-value-for-parameter
-                                                              node=node,
-                                                              interface_name=interface_name,
-                                                              op_name=op_name))
-    eng.execute()
+    eng = engine.Engine(executors={ThreadExecutor: ThreadExecutor()})
+    eng.execute(ctx)
     return node
 
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/1fee85c4/tests/orchestrator/workflows/core/test_task.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/workflows/core/test_task.py b/tests/orchestrator/workflows/core/test_task.py
index c0d3616..2b3f7d7 100644
--- a/tests/orchestrator/workflows/core/test_task.py
+++ b/tests/orchestrator/workflows/core/test_task.py
@@ -22,9 +22,9 @@ import pytest
 from aria.orchestrator.context import workflow as workflow_context
 from aria.orchestrator.workflows import (
     api,
-    core,
     exceptions,
 )
+from aria.modeling import models
 
 from tests import mock, storage
 
@@ -70,8 +70,8 @@ class TestOperationTask(object):
                 node,
                 interface_name=NODE_INTERFACE_NAME,
                 operation_name=NODE_OPERATION_NAME)
-            core_task = core.task.OperationTask(api_task=api_task, executor=None)
-        return api_task, core_task
+            model_task = models.Task.from_api_task(api_task, None)
+        return api_task, model_task
 
     def _create_relationship_operation_task(self, ctx, relationship):
         with workflow_context.current.push(ctx):
@@ -79,7 +79,7 @@ class TestOperationTask(object):
                 relationship,
                 interface_name=RELATIONSHIP_INTERFACE_NAME,
                 operation_name=RELATIONSHIP_OPERATION_NAME)
-            core_task = core.task.OperationTask(api_task=api_task, executor=None)
+            core_task = models.Task.from_api_task(api_task, None)
         return api_task, core_task
 
     def test_node_operation_task_creation(self, ctx):
@@ -96,25 +96,21 @@ class TestOperationTask(object):
         )
         node.interfaces[interface.name] = interface
         ctx.model.node.update(node)
-        api_task, core_task = self._create_node_operation_task(ctx, node)
-        storage_task = ctx.model.task.get_by_name(core_task.name)
-        assert storage_task.plugin is storage_plugin
-        assert storage_task.execution_name == ctx.execution.name
-        assert storage_task.actor == core_task.context.node
-        assert core_task.model_task == storage_task
-        assert core_task.name == api_task.name
-        assert core_task.function == api_task.function
-        assert core_task.actor == api_task.actor == node
-        assert core_task.arguments == api_task.arguments == storage_task.arguments
-        assert core_task.plugin == storage_plugin
+        api_task, model_task = self._create_node_operation_task(ctx, node)
+        assert model_task.name == api_task.name
+        assert model_task.function == api_task.function
+        assert model_task.actor == api_task.actor == node
+        assert model_task.arguments == api_task.arguments
+        assert model_task.plugin == storage_plugin
 
     def test_relationship_operation_task_creation(self, ctx):
         relationship = ctx.model.relationship.list()[0]
         ctx.model.relationship.update(relationship)
-        _, core_task = self._create_relationship_operation_task(
+        _, model_task = self._create_relationship_operation_task(
             ctx, relationship)
-        assert core_task.model_task.actor == relationship
+        assert model_task.actor == relationship
 
+    @pytest.mark.skip("Currently not supported for model tasks")
     def test_operation_task_edit_locked_attribute(self, ctx):
         node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
 
@@ -131,6 +127,7 @@ class TestOperationTask(object):
         with pytest.raises(exceptions.TaskException):
             core_task.due_at = now
 
+    @pytest.mark.skip("Currently not supported for model tasks")
     def test_operation_task_edit_attributes(self, ctx):
         node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)