You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@ariatosca.apache.org by em...@apache.org on 2017/03/21 17:51:09 UTC

[03/12] incubator-ariatosca git commit: ARIA-105 Integrate parser and orchestrator models

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/modeling/test_models.py
----------------------------------------------------------------------
diff --git a/tests/modeling/test_models.py b/tests/modeling/test_models.py
new file mode 100644
index 0000000..5266d79
--- /dev/null
+++ b/tests/modeling/test_models.py
@@ -0,0 +1,837 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+from contextlib import contextmanager
+
+import pytest
+
+from aria import application_model_storage
+from aria.storage import (
+    sql_mapi,
+)
+from aria.storage.exceptions import StorageError
+from aria.modeling.exceptions import ValueFormatException
+from aria.modeling.models import (
+    ServiceTemplate,
+    Service,
+    ServiceUpdate,
+    ServiceUpdateStep,
+    ServiceModification,
+    Execution,
+    Task,
+    Plugin,
+    Relationship,
+    NodeTemplate,
+    Node,
+    Parameter
+)
+
+from tests import mock
+from ..storage import release_sqlite_storage, init_inmemory_model_storage
+
+
+@contextmanager
+def sql_storage(storage_func):
+    storage = None
+    try:
+        storage = storage_func()
+        yield storage
+    finally:
+        if storage:
+            release_sqlite_storage(storage)
+
+
+def _empty_storage():
+    return application_model_storage(sql_mapi.SQLAlchemyModelAPI,
+                                     initiator=init_inmemory_model_storage)
+
+
+def _service_template_storage():
+    storage = _empty_storage()
+    service_template = mock.models.create_service_template()
+    storage.service_template.put(service_template)
+    return storage
+
+
+def _service_storage():
+    storage = _service_template_storage()
+    service = mock.models.create_service(
+        storage.service_template.get_by_name(mock.models.SERVICE_TEMPLATE_NAME))
+    storage.service.put(service)
+    return storage
+
+
+def _service_update_storage():
+    storage = _service_storage()
+    service_update = ServiceUpdate(
+        service=storage.service.list()[0],
+        created_at=now,
+        service_plan={},
+    )
+    storage.service_update.put(service_update)
+    return storage
+
+
+def _node_template_storage():
+    storage = _service_storage()
+    service_template = storage.service_template.list()[0]
+    dependency_node_template = mock.models.create_dependency_node_template(service_template)
+    mock.models.create_dependent_node_template(service_template, dependency_node_template)
+    storage.service_template.update(service_template)
+    return storage
+
+
+def _node_storage():
+    storage = _node_template_storage()
+    service = storage.service.get_by_name(mock.models.SERVICE_NAME)
+    dependency_node_template = storage.node_template.get_by_name(
+        mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
+    dependent_node_template = storage.node_template.get_by_name(
+        mock.models.DEPENDENT_NODE_TEMPLATE_NAME)
+    mock.models.create_dependency_node(dependency_node_template, service)
+    mock.models.create_dependent_node(dependent_node_template, service)
+    storage.service.update(service)
+    return storage
+
+
+def _execution_storage():
+    storage = _service_storage()
+    execution = mock.models.create_execution(storage.service.list()[0])
+    plugin = mock.models.create_plugin()
+    storage.execution.put(execution)
+    storage.plugin.put(plugin)
+    return storage
+
+
+@pytest.fixture
+def empty_storage():
+    with sql_storage(_empty_storage) as storage:
+        yield storage
+
+
+@pytest.fixture
+def service_template_storage():
+    with sql_storage(_service_template_storage) as storage:
+        yield storage
+
+
+@pytest.fixture
+def service_storage():
+    with sql_storage(_service_storage) as storage:
+        yield storage
+
+
+@pytest.fixture
+def service_update_storage():
+    with sql_storage(_service_update_storage) as storage:
+        yield storage
+
+
+@pytest.fixture
+def node_template_storage():
+    with sql_storage(_node_template_storage) as storage:
+        yield storage
+
+
+@pytest.fixture
+def node_storage():
+    with sql_storage(_node_storage) as storage:
+        yield storage
+
+
+@pytest.fixture
+def execution_storage():
+    with sql_storage(_execution_storage) as storage:
+        yield storage
+
+
+m_cls = type('MockClass')
+now = datetime.utcnow()
+
+
+def _test_model(is_valid, storage, model_cls, model_kwargs):
+    if is_valid:
+        model = model_cls(**model_kwargs)
+        getattr(storage, model_cls.__modelname__).put(model)
+        return model
+    else:
+        with pytest.raises((ValueFormatException, StorageError, TypeError),):
+            getattr(storage, model_cls.__modelname__).put(model_cls(**model_kwargs))
+
+
+class TestServiceTemplate(object):
+
+    @pytest.mark.parametrize(
+        'is_valid, description, created_at, updated_at, main_file_name',
+        [
+            (False, {}, now, now, '/path'),
+            (False, 'description', 'error', now, '/path'),
+            (False, 'description', now, 'error', '/path'),
+            (False, 'description', now, now, {}),
+
+            (True, 'description', now, now, '/path'),
+        ]
+    )
+
+    def test_service_template_model_creation(self, empty_storage, is_valid, description, created_at,
+                                             updated_at, main_file_name):
+        _test_model(is_valid=is_valid,
+                    storage=empty_storage,
+                    model_cls=ServiceTemplate,
+                    model_kwargs=dict(
+                        description=description,
+                        created_at=created_at,
+                        updated_at=updated_at,
+                        main_file_name=main_file_name)
+                   )
+
+
+class TestService(object):
+
+    @pytest.mark.parametrize(
+        'is_valid, name, created_at, description, inputs, permalink, '
+        'outputs, scaling_groups, updated_at',
+        [
+            (False, m_cls, now, 'desc', {}, 'perlnk', {}, {}, now),
+            (False, 'name', m_cls, 'desc', {}, 'perlnk', {}, {}, now),
+            (False, 'name', now, m_cls, {}, 'perlnk', {}, {}, now),
+            (False, 'name', now, 'desc', {}, m_cls, {}, {}, now),
+            (False, 'name', now, 'desc', {}, 'perlnk', m_cls, {}, now),
+            (False, 'name', now, 'desc', {}, 'perlnk', {}, m_cls, now),
+            (False, 'name', now, 'desc', {}, 'perlnk', {}, {}, m_cls),
+
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, {}, now),
+            (True, None, now, 'desc', {}, 'perlnk', {}, {}, now),
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, {}, now),
+            (True, 'name', now, None, {}, 'perlnk', {}, {}, now),
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, {}, now),
+            (True, 'name', now, 'desc', {}, None, {}, {}, now),
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, {}, now),
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, None, now),
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, {}, None),
+            (True, 'name', now, 'desc', {}, 'perlnk', {}, {}, now),
+        ]
+    )
+    def test_service_model_creation(self, service_storage, is_valid, name, created_at, description,
+                                    inputs, permalink, outputs, scaling_groups, updated_at):
+        service = _test_model(
+            is_valid=is_valid,
+            storage=service_storage,
+            model_cls=Service,
+            model_kwargs=dict(
+                name=name,
+                service_template=service_storage.service_template.list()[0],
+                created_at=created_at,
+                description=description,
+                inputs=inputs,
+                permalink=permalink,
+                outputs=outputs,
+                scaling_groups=scaling_groups,
+                updated_at=updated_at
+            ))
+        if is_valid:
+            assert service.service_template == \
+                   service_storage.service_template.list()[0]
+
+
+class TestExecution(object):
+
+    @pytest.mark.parametrize(
+        'is_valid, created_at, started_at, ended_at, error, is_system_workflow, parameters, '
+        'status, workflow_name',
+        [
+            (False, m_cls, now, now, 'error', False, {}, Execution.STARTED, 'wf_name'),
+            (False, now, m_cls, now, 'error', False, {}, Execution.STARTED, 'wf_name'),
+            (False, now, now, m_cls, 'error', False, {}, Execution.STARTED, 'wf_name'),
+            (False, now, now, now, m_cls, False, {}, Execution.STARTED, 'wf_name'),
+            (False, now, now, now, 'error', False, m_cls, Execution.STARTED, 'wf_name'),
+            (False, now, now, now, 'error', False, {}, m_cls, 'wf_name'),
+            (False, now, now, now, 'error', False, {}, Execution.STARTED, m_cls),
+
+            (True, now, now, now, 'error', False, {}, Execution.STARTED, 'wf_name'),
+            (True, now, None, now, 'error', False, {}, Execution.STARTED, 'wf_name'),
+            (True, now, now, None, 'error', False, {}, Execution.STARTED, 'wf_name'),
+            (True, now, now, now, None, False, {}, Execution.STARTED, 'wf_name'),
+            (True, now, now, now, 'error', False, None, Execution.STARTED, 'wf_name'),
+        ]
+    )
+    def test_execution_model_creation(self, service_storage, is_valid, created_at, started_at,
+                                      ended_at, error, is_system_workflow, parameters, status,
+                                      workflow_name):
+        execution = _test_model(
+            is_valid=is_valid,
+            storage=service_storage,
+            model_cls=Execution,
+            model_kwargs=dict(
+                service=service_storage.service.list()[0],
+                created_at=created_at,
+                started_at=started_at,
+                ended_at=ended_at,
+                error=error,
+                is_system_workflow=is_system_workflow,
+                parameters=parameters,
+                status=status,
+                workflow_name=workflow_name,
+            ))
+        if is_valid:
+            assert execution.service == service_storage.service.list()[0]
+            assert execution.service_template == service_storage.service_template.list()[0]
+
+    def test_execution_status_transition(self):
+        def create_execution(status):
+            execution = Execution(
+                id='e_id',
+                workflow_name='w_name',
+                status=status,
+                parameters={},
+                created_at=now,
+            )
+            return execution
+
+        valid_transitions = {
+            Execution.PENDING: [Execution.STARTED,
+                                Execution.CANCELLED,
+                                Execution.PENDING],
+            Execution.STARTED: [Execution.FAILED,
+                                Execution.TERMINATED,
+                                Execution.CANCELLED,
+                                Execution.CANCELLING,
+                                Execution.STARTED],
+            Execution.CANCELLING: [Execution.FAILED,
+                                   Execution.TERMINATED,
+                                   Execution.CANCELLED,
+                                   Execution.CANCELLING],
+            Execution.FAILED: [Execution.FAILED],
+            Execution.TERMINATED: [Execution.TERMINATED],
+            Execution.CANCELLED: [Execution.CANCELLED]
+        }
+
+        invalid_transitions = {
+            Execution.PENDING: [Execution.FAILED,
+                                Execution.TERMINATED,
+                                Execution.CANCELLING],
+            Execution.STARTED: [Execution.PENDING],
+            Execution.CANCELLING: [Execution.PENDING,
+                                   Execution.STARTED],
+            Execution.FAILED: [Execution.PENDING,
+                               Execution.STARTED,
+                               Execution.TERMINATED,
+                               Execution.CANCELLED,
+                               Execution.CANCELLING],
+            Execution.TERMINATED: [Execution.PENDING,
+                                   Execution.STARTED,
+                                   Execution.FAILED,
+                                   Execution.CANCELLED,
+                                   Execution.CANCELLING],
+            Execution.CANCELLED: [Execution.PENDING,
+                                  Execution.STARTED,
+                                  Execution.FAILED,
+                                  Execution.TERMINATED,
+                                  Execution.CANCELLING],
+        }
+
+        for current_status, valid_transitioned_statues in valid_transitions.items():
+            for transitioned_status in valid_transitioned_statues:
+                execution = create_execution(current_status)
+                execution.status = transitioned_status
+
+        for current_status, invalid_transitioned_statues in invalid_transitions.items():
+            for transitioned_status in invalid_transitioned_statues:
+                execution = create_execution(current_status)
+                with pytest.raises(ValueError):
+                    execution.status = transitioned_status
+
+
+class TestServiceUpdate(object):
+    @pytest.mark.parametrize(
+        'is_valid, created_at, service_plan, service_update_nodes, '
+        'service_update_service, service_update_node_templates, '
+        'modified_entity_ids, state',
+        [
+            (False, m_cls, {}, {}, {}, [], {}, 'state'),
+            (False, now, m_cls, {}, {}, [], {}, 'state'),
+            (False, now, {}, m_cls, {}, [], {}, 'state'),
+            (False, now, {}, {}, m_cls, [], {}, 'state'),
+            (False, now, {}, {}, {}, m_cls, {}, 'state'),
+            (False, now, {}, {}, {}, [], m_cls, 'state'),
+            (False, now, {}, {}, {}, [], {}, m_cls),
+
+            (True, now, {}, {}, {}, [], {}, 'state'),
+            (True, now, {}, None, {}, [], {}, 'state'),
+            (True, now, {}, {}, None, [], {}, 'state'),
+            (True, now, {}, {}, {}, None, {}, 'state'),
+            (True, now, {}, {}, {}, [], None, 'state'),
+            (True, now, {}, {}, {}, [], {}, None),
+        ]
+    )
+    def test_service_update_model_creation(self, service_storage, is_valid, created_at,
+                                           service_plan, service_update_nodes,
+                                           service_update_service, service_update_node_templates,
+                                           modified_entity_ids, state):
+        service_update = _test_model(
+            is_valid=is_valid,
+            storage=service_storage,
+            model_cls=ServiceUpdate,
+            model_kwargs=dict(
+                service=service_storage.service.list()[0],
+                created_at=created_at,
+                service_plan=service_plan,
+                service_update_nodes=service_update_nodes,
+                service_update_service=service_update_service,
+                service_update_node_templates=service_update_node_templates,
+                modified_entity_ids=modified_entity_ids,
+                state=state
+            ))
+        if is_valid:
+            assert service_update.service == \
+                   service_storage.service.list()[0]
+
+
+class TestServiceUpdateStep(object):
+
+    @pytest.mark.parametrize(
+        'is_valid, action, entity_id, entity_type',
+        [
+            (False, m_cls, 'id', ServiceUpdateStep.ENTITY_TYPES.NODE),
+            (False, ServiceUpdateStep.ACTION_TYPES.ADD, m_cls,
+             ServiceUpdateStep.ENTITY_TYPES.NODE),
+            (False, ServiceUpdateStep.ACTION_TYPES.ADD, 'id', m_cls),
+
+            (True, ServiceUpdateStep.ACTION_TYPES.ADD, 'id',
+             ServiceUpdateStep.ENTITY_TYPES.NODE)
+        ]
+    )
+    def test_service_update_step_model_creation(self, service_update_storage, is_valid, action,
+                                                entity_id, entity_type):
+        service_update_step = _test_model(
+            is_valid=is_valid,
+            storage=service_update_storage,
+            model_cls=ServiceUpdateStep,
+            model_kwargs=dict(
+                service_update=
+                service_update_storage.service_update.list()[0],
+                action=action,
+                entity_id=entity_id,
+                entity_type=entity_type
+            ))
+        if is_valid:
+            assert service_update_step.service_update == \
+                   service_update_storage.service_update.list()[0]
+
+    def test_service_update_step_order(self):
+        add_node = ServiceUpdateStep(
+            id='add_step',
+            action='add',
+            entity_type='node',
+            entity_id='node_id')
+
+        modify_node = ServiceUpdateStep(
+            id='modify_step',
+            action='modify',
+            entity_type='node',
+            entity_id='node_id')
+
+        remove_node = ServiceUpdateStep(
+            id='remove_step',
+            action='remove',
+            entity_type='node',
+            entity_id='node_id')
+
+        for step in (add_node, modify_node, remove_node):
+            assert hash((step.id, step.entity_id)) == hash(step)
+
+        assert remove_node < modify_node < add_node
+        assert not remove_node > modify_node > add_node
+
+        add_rel = ServiceUpdateStep(
+            id='add_step',
+            action='add',
+            entity_type='relationship',
+            entity_id='relationship_id')
+
+        remove_rel = ServiceUpdateStep(
+            id='remove_step',
+            action='remove',
+            entity_type='relationship',
+            entity_id='relationship_id')
+
+        assert remove_rel < remove_node < add_node < add_rel
+        assert not add_node < None
+
+
+class TestServiceModification(object):
+    @pytest.mark.parametrize(
+        'is_valid, context, created_at, ended_at, modified_node_templates, nodes, status',
+        [
+            (False, m_cls, now, now, {}, {}, ServiceModification.STARTED),
+            (False, {}, m_cls, now, {}, {}, ServiceModification.STARTED),
+            (False, {}, now, m_cls, {}, {}, ServiceModification.STARTED),
+            (False, {}, now, now, m_cls, {}, ServiceModification.STARTED),
+            (False, {}, now, now, {}, m_cls, ServiceModification.STARTED),
+            (False, {}, now, now, {}, {}, m_cls),
+
+            (True, {}, now, now, {}, {}, ServiceModification.STARTED),
+            (True, {}, now, None, {}, {}, ServiceModification.STARTED),
+            (True, {}, now, now, None, {}, ServiceModification.STARTED),
+            (True, {}, now, now, {}, None, ServiceModification.STARTED),
+        ]
+    )
+    def test_service_modification_model_creation(self, service_storage, is_valid, context,
+                                                 created_at, ended_at, modified_node_templates,
+                                                 nodes, status):
+        service_modification = _test_model(
+            is_valid=is_valid,
+            storage=service_storage,
+            model_cls=ServiceModification,
+            model_kwargs=dict(
+                service=service_storage.service.list()[0],
+                context=context,
+                created_at=created_at,
+                ended_at=ended_at,
+                modified_node_templates=modified_node_templates,
+                nodes=nodes,
+                status=status,
+            ))
+        if is_valid:
+            assert service_modification.service == \
+                   service_storage.service.list()[0]
+
+
+class TestNodeTemplate(object):
+    @pytest.mark.parametrize(
+        'is_valid, name, default_instances, max_instances, min_instances, plugin_specifications, '
+        'properties',
+        [
+            (False, m_cls, 1, 1, 1, {}, {}),
+            (False, 'name', m_cls, 1, 1, {}, {}),
+            (False, 'name', 1, m_cls, 1, {}, {}),
+            (False, 'name', 1, 1, m_cls, {}, {}),
+            (False, 'name', 1, 1, 1, m_cls, {}),
+            (False, 'name', 1, 1, 1, None, {}),
+
+            (True, 'name', 1, 1, 1, {}, {}),
+        ]
+    )
+    def test_node_template_model_creation(self, service_storage, is_valid, name, default_instances,
+                                          max_instances, min_instances, plugin_specifications,
+                                          properties):
+        node_template = _test_model(
+            is_valid=is_valid,
+            storage=service_storage,
+            model_cls=NodeTemplate,
+            model_kwargs=dict(
+                name=name,
+                type=service_storage.type.list()[0],
+                default_instances=default_instances,
+                max_instances=max_instances,
+                min_instances=min_instances,
+                plugin_specifications=plugin_specifications,
+                properties=properties,
+                service_template=service_storage.service_template.list()[0]
+            ))
+        if is_valid:
+            assert node_template.service_template == \
+                   service_storage.service_template.list()[0]
+
+
+class TestNode(object):
+    @pytest.mark.parametrize(
+        'is_valid, name, runtime_properties, scaling_groups, state, version',
+        [
+            (False, m_cls, {}, [], 'state', 1),
+            (False, 'name', m_cls, [], 'state', 1),
+            (False, 'name', {}, m_cls, 'state', 1),
+            (False, 'name', {}, [], m_cls, 1),
+            (False, m_cls, {}, [], 'state', m_cls),
+
+            (True, 'name', {}, [], 'state', 1),
+            (True, None, {}, [], 'state', 1),
+            (True, 'name', None, [], 'state', 1),
+            (True, 'name', {}, None, 'state', 1),
+            (True, 'name', {}, [], 'state', None),
+        ]
+    )
+    def test_node_model_creation(self, node_template_storage, is_valid, name, runtime_properties,
+                                 scaling_groups, state, version):
+        node = _test_model(
+            is_valid=is_valid,
+            storage=node_template_storage,
+            model_cls=Node,
+            model_kwargs=dict(
+                node_template=node_template_storage.node_template.list()[0],
+                type=node_template_storage.type.list()[0],
+                name=name,
+                runtime_properties=runtime_properties,
+                scaling_groups=scaling_groups,
+                state=state,
+                version=version,
+                service=node_template_storage.service.list()[0]
+            ))
+        if is_valid:
+            assert node.node_template == node_template_storage.node_template.list()[0]
+            assert node.service == \
+                   node_template_storage.service.list()[0]
+
+
+class TestNodeIP(object):
+
+    ip = '1.1.1.1'
+
+    def test_ip_on_none_hosted_node(self, service_storage):
+        node_template = self._node_template(service_storage, ip='not considered')
+        node = self._node(service_storage,
+                          node_template,
+                          is_host=False,
+                          ip='not considered')
+        assert node.ip is None
+
+    def test_property_ip_on_host_node(self, service_storage):
+        node_template = self._node_template(service_storage, ip=self.ip)
+        node = self._node(service_storage, node_template, is_host=True, ip=None)
+        assert node.ip == self.ip
+
+    def test_runtime_property_ip_on_host_node(self, service_storage):
+        node_template = self._node_template(service_storage, ip='not considered')
+        node = self._node(service_storage, node_template, is_host=True, ip=self.ip)
+        assert node.ip == self.ip
+
+    def test_no_ip_configured_on_host_node(self, service_storage):
+        node_template = self._node_template(service_storage, ip=None)
+        node = self._node(service_storage, node_template, is_host=True, ip=None)
+        assert node.ip is None
+
+    def test_runtime_property_on_hosted_node(self, service_storage):
+        host_node_template = self._node_template(service_storage, ip=None)
+        host_node = self._node(service_storage,
+                               host_node_template,
+                               is_host=True,
+                               ip=self.ip)
+        node_template = self._node_template(service_storage, ip=None)
+        node = self._node(service_storage,
+                          node_template,
+                          is_host=False,
+                          ip=None,
+                          host_fk=host_node.id)
+        assert node.ip == self.ip
+
+    def _node_template(self, storage, ip):
+        kwargs = dict(
+            name='node_template',
+            type=storage.type.list()[0],
+            default_instances=1,
+            max_instances=1,
+            min_instances=1,
+            service_template=storage.service_template.list()[0]
+        )
+        if ip:
+            kwargs['properties'] = {'ip': Parameter.wrap('ip', ip)}
+        node = NodeTemplate(**kwargs)
+        storage.node_template.put(node)
+        return node
+
+    def _node(self, storage, node, is_host, ip, host_fk=None):
+        kwargs = dict(
+            name='node',
+            node_template=node,
+            type=storage.type.list()[0],
+            runtime_properties={},
+            state='',
+            service=storage.service.list()[0]
+        )
+        if ip:
+            kwargs['runtime_properties']['ip'] = ip
+        if is_host:
+            kwargs['host_fk'] = 1
+        elif host_fk:
+            kwargs['host_fk'] = host_fk
+        node = Node(**kwargs)
+        storage.node.put(node)
+        return node
+
+
+class TestRelationship(object):
+    @pytest.mark.parametrize(
+        'is_valid, source_position, target_position',
+        [
+            (False, m_cls, 0),
+            (False, 0, m_cls),
+
+            (True, 0, 0),
+            (True, None, 0),
+            (True, 0, None),
+        ]
+    )
+    def test_relationship_model_creation(self, node_storage, is_valid, source_position,
+                                         target_position):
+        nodes = node_storage.node
+        source_node = nodes.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+        target_node = nodes.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+        _test_model(is_valid=is_valid,
+                    storage=node_storage,
+                    model_cls=Relationship,
+                    model_kwargs=dict(
+                        source_node=source_node,
+                        target_node=target_node,
+                        source_position=source_position,
+                        target_position=target_position
+                    ))
+
+
+class TestPlugin(object):
+    @pytest.mark.parametrize(
+        'is_valid, archive_name, distribution, distribution_release, '
+        'distribution_version, package_name, package_source, '
+        'package_version, supported_platform, supported_py_versions, uploaded_at, wheels',
+        [
+            (False, m_cls, 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (False, 'arc_name', m_cls, 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (False, 'arc_name', 'dis_name', m_cls, 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', m_cls, 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', m_cls, 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', m_cls, 'pak_ver',
+             'sup_plat', [], now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', m_cls,
+             'sup_plat', [], now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', m_cls, [], now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', 'sup_plat', m_cls, now, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', 'sup_plat', [], m_cls, []),
+            (False, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', 'sup_plat', [], now, m_cls),
+
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (True, 'arc_name', None, 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (True, 'arc_name', 'dis_name', None, 'dis_ver', 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', None, 'pak_name', 'pak_src', 'pak_ver',
+             'sup_plat', [], now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', 'sup_plat', [], now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', None, 'pak_ver',
+             'sup_plat', [], now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src', None,
+             'sup_plat', [], now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', None, [], now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', 'sup_plat', None, now, []),
+            (True, 'arc_name', 'dis_name', 'dis_rel', 'dis_ver', 'pak_name', 'pak_src',
+             'pak_ver', 'sup_plat', [], now, []),
+        ]
+    )
+    def test_plugin_model_creation(self, empty_storage, is_valid, archive_name, distribution,
+                                   distribution_release, distribution_version, package_name,
+                                   package_source, package_version, supported_platform,
+                                   supported_py_versions, uploaded_at, wheels):
+        _test_model(is_valid=is_valid,
+                    storage=empty_storage,
+                    model_cls=Plugin,
+                    model_kwargs=dict(
+                        archive_name=archive_name,
+                        distribution=distribution,
+                        distribution_release=distribution_release,
+                        distribution_version=distribution_version,
+                        package_name=package_name,
+                        package_source=package_source,
+                        package_version=package_version,
+                        supported_platform=supported_platform,
+                        supported_py_versions=supported_py_versions,
+                        uploaded_at=uploaded_at,
+                        wheels=wheels,
+                    ))
+
+
+class TestTask(object):
+
+    @pytest.mark.parametrize(
+        'is_valid, status, due_at, started_at, ended_at, max_attempts, retry_count, '
+        'retry_interval, ignore_failure, name, operation_mapping, inputs, plugin_id',
+        [
+            (False, m_cls, now, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, m_cls, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, now, m_cls, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, now, now, m_cls, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, now, now, now, m_cls, 1, 1, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, now, now, now, 1, m_cls, 1, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, now, now, now, 1, 1, m_cls, True, 'name', 'map', {}, '1'),
+            (False, Task.STARTED, now, now, now, 1, 1, 1, True, m_cls, 'map', {}, '1'),
+            (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', m_cls, {}, '1'),
+            (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', m_cls, '1'),
+            (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', {}, m_cls),
+            (False, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', None, '1'),
+
+            (True, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, None, now, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, now, None, now, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, now, now, None, 1, 1, 1, True, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, now, now, now, 1, None, 1, True, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, now, now, now, 1, 1, None, True, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, now, now, now, 1, 1, 1, None, 'name', 'map', {}, '1'),
+            (True, Task.STARTED, now, now, now, 1, 1, 1, True, None, 'map', {}, '1'),
+            (True, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', None, {}, '1'),
+            (True, Task.STARTED, now, now, now, 1, 1, 1, True, 'name', 'map', {}, None),
+        ]
+    )
+    def test_task_model_creation(self, execution_storage, is_valid, status, due_at, started_at,
+                                 ended_at, max_attempts, retry_count, retry_interval,
+                                 ignore_failure, name, operation_mapping, inputs, plugin_id):
+        task = _test_model(
+            is_valid=is_valid,
+            storage=execution_storage,
+            model_cls=Task,
+            model_kwargs=dict(
+                status=status,
+                execution=execution_storage.execution.list()[0],
+                due_at=due_at,
+                started_at=started_at,
+                ended_at=ended_at,
+                max_attempts=max_attempts,
+                retry_count=retry_count,
+                retry_interval=retry_interval,
+                ignore_failure=ignore_failure,
+                name=name,
+                implementation=operation_mapping,
+                inputs=inputs,
+                plugin_fk=plugin_id,
+            ))
+        if is_valid:
+            assert task.execution == execution_storage.execution.list()[0]
+            if task.plugin:
+                assert task.plugin == execution_storage.plugin.list()[0]
+
+    def test_task_max_attempts_validation(self):
+        def create_task(max_attempts):
+            Task(execution_fk='eid',
+                 name='name',
+                 implementation='',
+                 inputs={},
+                 max_attempts=max_attempts)
+        create_task(max_attempts=1)
+        create_task(max_attempts=2)
+        create_task(max_attempts=Task.INFINITE_RETRIES)
+        with pytest.raises(ValueError):
+            create_task(max_attempts=0)
+        with pytest.raises(ValueError):
+            create_task(max_attempts=-2)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/context/__init__.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/__init__.py b/tests/orchestrator/context/__init__.py
index ea0fea9..4fde0a7 100644
--- a/tests/orchestrator/context/__init__.py
+++ b/tests/orchestrator/context/__init__.py
@@ -23,10 +23,6 @@ def op_path(func, module_path=None):
     return '{0}.{1}'.format(module_path, func.__name__)
 
 
-def op_name(actor, operation_name):
-    return '{name}.{actor.id}'.format(name=operation_name, actor=actor)
-
-
 def execute(workflow_func, workflow_context, executor):
     graph = workflow_func(ctx=workflow_context)
     eng = engine.Engine(executor=executor, workflow_context=workflow_context, tasks_graph=graph)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/context/test_operation.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_operation.py b/tests/orchestrator/context/test_operation.py
index b49b1cb..05c9656 100644
--- a/tests/orchestrator/context/test_operation.py
+++ b/tests/orchestrator/context/test_operation.py
@@ -31,7 +31,6 @@ import tests
 from tests import mock, storage
 from . import (
     op_path,
-    op_name,
     execute,
 )
 
@@ -67,40 +66,54 @@ def thread_executor():
 
 
 def test_node_operation_task_execution(ctx, thread_executor):
-    operation_name = 'aria.interfaces.lifecycle.create'
+    interface_name = 'Standard'
+    operation_name = 'create'
 
-    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    interface = mock.models.get_interface(
+    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    interface = mock.models.create_interface(
+        node.service,
+        interface_name,
         operation_name,
         operation_kwargs=dict(implementation=op_path(basic_operation, module_path=__name__))
     )
-    node.interfaces = [interface]
+    node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
     inputs = {'putput': True}
 
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.node(
-                name=operation_name,
-                instance=node,
+            api.task.OperationTask.for_node(
+                interface_name=interface_name,
+                operation_name=operation_name,
+                node=node,
                 inputs=inputs
             )
         )
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
 
-    operation_context = global_test_holder[op_name(node, operation_name)]
+    operation_context = global_test_holder[api.task.OperationTask.NAME_FORMAT.format(
+        type='node',
+        name=node.name,
+        interface=interface_name,
+        operation=operation_name
+    )]
 
     assert isinstance(operation_context, context.operation.NodeOperationContext)
 
     # Task bases assertions
     assert operation_context.task.actor == node
-    assert operation_context.task.name == op_name(node, operation_name)
-    operations = interface.operations.filter_by(name=operation_name)                                # pylint: disable=no-member
-    assert operations.count() == 1
-    assert operation_context.task.implementation == operations[0].implementation
-    assert operation_context.task.inputs == inputs
+    assert operation_context.task.name == api.task.OperationTask.NAME_FORMAT.format(
+        type='node',
+        name=node.name,
+        interface=interface_name,
+        operation=operation_name
+    )
+    operations = interface.operations
+    assert len(operations) == 1
+    assert operation_context.task.implementation == operations.values()[0].implementation           # pylint: disable=no-member
+    assert operation_context.task.inputs['putput'].value is True
 
     # Context based attributes (sugaring)
     assert operation_context.node_template == node.node_template
@@ -108,49 +121,57 @@ def test_node_operation_task_execution(ctx, thread_executor):
 
 
 def test_relationship_operation_task_execution(ctx, thread_executor):
-    operation_name = 'aria.interfaces.relationship_lifecycle.post_configure'
-    relationship = ctx.model.relationship.list()[0]
+    interface_name = 'Configure'
+    operation_name = 'post_configure'
 
-    interface = mock.models.get_interface(
-        operation_name=operation_name,
+    relationship = ctx.model.relationship.list()[0]
+    interface = mock.models.create_interface(
+        relationship.source_node.service,
+        interface_name,
+        operation_name,
         operation_kwargs=dict(implementation=op_path(basic_operation, module_path=__name__)),
-        edge='source'
     )
 
-    relationship.interfaces = [interface]
+    relationship.interfaces[interface.name] = interface
     ctx.model.relationship.update(relationship)
     inputs = {'putput': True}
 
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.relationship(
-                instance=relationship,
-                name=operation_name,
-                inputs=inputs,
-                edge='source'
+            api.task.OperationTask.for_relationship(
+                relationship=relationship,
+                interface_name=interface_name,
+                operation_name=operation_name,
+                inputs=inputs
             )
         )
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
 
-    operation_context = global_test_holder[op_name(relationship,
-                                                   operation_name)]
+    operation_context = global_test_holder[api.task.OperationTask.NAME_FORMAT.format(
+        type='relationship',
+        name=relationship.name,
+        interface=interface_name,
+        operation=operation_name
+    )]
 
     assert isinstance(operation_context, context.operation.RelationshipOperationContext)
 
     # Task bases assertions
     assert operation_context.task.actor == relationship
-    assert operation_context.task.name.startswith(operation_name)
-    operation = interface.operations.filter_by(name=operation_name)                                 # pylint: disable=no-member
-    assert operation_context.task.implementation == operation.all()[0].implementation
-    assert operation_context.task.inputs == inputs
+    assert interface_name in operation_context.task.name
+    operations = interface.operations
+    assert operation_context.task.implementation == operations.values()[0].implementation           # pylint: disable=no-member
+    assert operation_context.task.inputs['putput'].value is True
 
     # Context based attributes (sugaring)
-    dependency_node_template = ctx.model.node_template.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
-    dependency_node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    dependent_node_template = ctx.model.node_template.get_by_name(mock.models.DEPENDENT_NODE_NAME)
-    dependent_node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_INSTANCE_NAME)
+    dependency_node_template = ctx.model.node_template.get_by_name(
+        mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
+    dependency_node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    dependent_node_template = ctx.model.node_template.get_by_name(
+        mock.models.DEPENDENT_NODE_TEMPLATE_NAME)
+    dependent_node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
 
     assert operation_context.target_node_template == dependency_node_template
     assert operation_context.target_node == dependency_node
@@ -162,47 +183,65 @@ def test_relationship_operation_task_execution(ctx, thread_executor):
 def test_invalid_task_operation_id(ctx, thread_executor):
     """
     Checks that the right id is used. The task created with id == 1, thus running the task on
-    node_instance with id == 2. will check that indeed the node_instance uses the correct id.
+    node with id == 2. will check that indeed the node uses the correct id.
     :param ctx:
     :param thread_executor:
     :return:
     """
-    operation_name = 'aria.interfaces.lifecycle.create'
+    interface_name = 'Standard'
+    operation_name = 'create'
+
     other_node, node = ctx.model.node.list()
     assert other_node.id == 1
     assert node.id == 2
 
-    interface = mock.models.get_interface(
+    interface = mock.models.create_interface(
+        node.service,
+        interface_name=interface_name,
         operation_name=operation_name,
-        operation_kwargs=dict(implementation=op_path(get_node_instance_id, module_path=__name__))
+        operation_kwargs=dict(implementation=op_path(get_node_id, module_path=__name__))
     )
-    node.interfaces = [interface]
+    node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
 
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.node(name=operation_name, instance=node)
+            api.task.OperationTask.for_node(node=node,
+                                            interface_name=interface_name,
+                                            operation_name=operation_name)
         )
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
 
-    op_node_instance_id = global_test_holder[op_name(node, operation_name)]
-    assert op_node_instance_id == node.id
-    assert op_node_instance_id != other_node.id
+    op_node_id = global_test_holder[api.task.OperationTask.NAME_FORMAT.format(
+        type='node',
+        name=node.name,
+        interface=interface_name,
+        operation=operation_name
+    )]
+    assert op_node_id == node.id
+    assert op_node_id != other_node.id
 
 
 def test_plugin_workdir(ctx, thread_executor, tmpdir):
-    op = 'test.op'
-    plugin_name = 'mock_plugin'
-    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    node.interfaces = [mock.models.get_interface(
-        op,
+    interface_name = 'Standard'
+    operation_name = 'create'
+
+    plugin = mock.models.create_plugin()
+    ctx.model.plugin.put(plugin)
+    plugin_specification = mock.models.create_plugin_specification()
+    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    interface = mock.models.create_interface(
+        node.service,
+        interface_name,
+        operation_name,
         operation_kwargs=dict(
             implementation='{0}.{1}'.format(__name__, _test_plugin_workdir.__name__),
-            plugin=plugin_name)
-    )]
-    node.plugins = [{'name': plugin_name}]
+            plugin_specification=plugin_specification)
+    )
+    node.interfaces[interface.name] = interface
+    node.plugin_specifications[plugin_specification.name] = plugin_specification
     ctx.model.node.update(node)
 
     filename = 'test_file'
@@ -211,12 +250,14 @@ def test_plugin_workdir(ctx, thread_executor, tmpdir):
 
     @workflow
     def basic_workflow(graph, **_):
-        graph.add_tasks(api.task.OperationTask.node(
-            name=op, instance=node, inputs=inputs))
+        graph.add_tasks(api.task.OperationTask.for_node(node=node,
+                                                        interface_name=interface_name,
+                                                        operation_name=operation_name,
+                                                        inputs=inputs))
 
     execute(workflow_func=basic_workflow, workflow_context=ctx, executor=thread_executor)
-    expected_file = tmpdir.join('workdir', 'plugins', str(ctx.service_instance.id),
-                                plugin_name,
+    expected_file = tmpdir.join('workdir', 'plugins', str(ctx.service.id),
+                                plugin.name,
                                 filename)
     assert expected_file.read() == content
 
@@ -235,14 +276,17 @@ def executor(request):
 
 
 def test_node_operation_logging(ctx, executor):
-    operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
+    interface_name, operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
 
-    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    interface = mock.models.get_interface(
+    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    interface = mock.models.create_interface(
+        node.service,
+        interface_name,
         operation_name,
-        operation_kwargs=dict(implementation=op_path(logged_operation, module_path=__name__))
+        operation_kwargs=dict(
+            implementation=op_path(logged_operation, module_path=__name__))
     )
-    node.interfaces = [interface]
+    node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
 
     inputs = {
@@ -253,9 +297,10 @@ def test_node_operation_logging(ctx, executor):
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.node(
-                name=operation_name,
-                instance=node,
+            api.task.OperationTask.for_node(
+                interface_name=interface_name,
+                operation_name=operation_name,
+                node=node,
                 inputs=inputs
             )
         )
@@ -265,14 +310,16 @@ def test_node_operation_logging(ctx, executor):
 
 
 def test_relationship_operation_logging(ctx, executor):
-    operation_name = mock.operations.RELATIONSHIP_OPERATIONS_INSTALL[0].rsplit('_', 1)[0]
+    interface_name, operation_name = mock.operations.RELATIONSHIP_OPERATIONS_INSTALL[0]
 
     relationship = ctx.model.relationship.list()[0]
-    relationship.interfaces = [mock.models.get_interface(
+    interface = mock.models.create_interface(
+        relationship.source_node.service,
+        interface_name,
         operation_name,
-        operation_kwargs=dict(implementation=op_path(logged_operation, module_path=__name__)),
-        edge='source'
-    )]
+        operation_kwargs=dict(implementation=op_path(logged_operation, module_path=__name__))
+    )
+    relationship.interfaces[interface.name] = interface
     ctx.model.relationship.update(relationship)
 
     inputs = {
@@ -283,11 +330,11 @@ def test_relationship_operation_logging(ctx, executor):
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.relationship(
-                name=operation_name,
-                instance=relationship,
-                inputs=inputs,
-                edge='source',
+            api.task.OperationTask.for_relationship(
+                interface_name=interface_name,
+                operation_name=operation_name,
+                relationship=relationship,
+                inputs=inputs
             )
         )
 
@@ -316,11 +363,13 @@ def _assert_loggins(ctx, inputs):
     assert all(l.execution == execution for l in logs)
     assert all(l in logs and l.task == task for l in task.logs)
 
-    op_start_log = [l for l in logs if inputs['op_start'] in l.msg and l.level.lower() == 'info']
+    op_start_log = [l for l in logs if
+                    inputs['op_start'].value in l.msg and l.level.lower() == 'info']
     assert len(op_start_log) == 1
     op_start_log = op_start_log[0]
 
-    op_end_log = [l for l in logs if inputs['op_end'] in l.msg and l.level.lower() == 'debug']
+    op_end_log = [l for l in logs
+                  if inputs['op_end'].value in l.msg and l.level.lower() == 'debug']
     assert len(op_end_log) == 1
     op_end_log = op_end_log[0]
 
@@ -341,7 +390,7 @@ def basic_operation(ctx, **_):
 
 
 @operation
-def get_node_instance_id(ctx, **_):
+def get_node_id(ctx, **_):
     global_test_holder[ctx.name] = ctx.node.id
 
 

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/context/test_resource_render.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_resource_render.py b/tests/orchestrator/context/test_resource_render.py
index aef439c..696e9b3 100644
--- a/tests/orchestrator/context/test_resource_render.py
+++ b/tests/orchestrator/context/test_resource_render.py
@@ -17,7 +17,7 @@ import pytest
 
 from tests import mock, storage
 
-_IMPLICIT_CTX_TEMPLATE = '{{ctx.service_instance.name}}'
+_IMPLICIT_CTX_TEMPLATE = '{{ctx.service.name}}'
 _IMPLICIT_CTX_TEMPLATE_PATH = 'implicit-ctx.template'
 _VARIABLES_TEMPLATE = '{{variable}}'
 _VARIABLES_TEMPLATE_PATH = 'variables.template'
@@ -25,7 +25,7 @@ _VARIABLES_TEMPLATE_PATH = 'variables.template'
 
 def test_get_resource_and_render_implicit_ctx_no_variables(ctx):
     content = ctx.get_resource_and_render(_IMPLICIT_CTX_TEMPLATE_PATH)
-    assert content == mock.models.DEPLOYMENT_NAME
+    assert content == mock.models.SERVICE_NAME
 
 
 def test_get_resource_and_render_provided_variables(ctx):
@@ -39,7 +39,7 @@ def test_download_resource_and_render_implicit_ctx_no_variables(tmpdir, ctx):
     destination = tmpdir.join('destination')
     ctx.download_resource_and_render(destination=str(destination),
                                      path=_IMPLICIT_CTX_TEMPLATE_PATH)
-    assert destination.read() == mock.models.DEPLOYMENT_NAME
+    assert destination.read() == mock.models.SERVICE_NAME
 
 
 def test_download_resource_and_render_provided_variables(tmpdir, ctx):

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/context/test_serialize.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_serialize.py b/tests/orchestrator/context/test_serialize.py
index 03f9529..9a1250e 100644
--- a/tests/orchestrator/context/test_serialize.py
+++ b/tests/orchestrator/context/test_serialize.py
@@ -42,14 +42,20 @@ def test_serialize_operation_context(context, executor, tmpdir):
 
 @workflow
 def _mock_workflow(ctx, graph):
-    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    plugin_name = 'mock_plugin'
-    node.interfaces = [mock.models.get_interface(
-        'test.op',
-        operation_kwargs=dict(implementation=_operation_mapping(), plugin=plugin_name)
-    )]
-    node.plugins = [{'name': plugin_name}]
-    task = api.task.OperationTask.node(instance=node, name='test.op')
+    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    plugin = mock.models.create_plugin()
+    ctx.model.plugin.put(plugin)
+    plugin_specification = mock.models.create_plugin_specification()
+    interface = mock.models.create_interface(
+        node.service,
+        'test',
+        'op',
+        operation_kwargs=dict(implementation=_operation_mapping(),
+                              plugin_specification=plugin_specification)
+    )
+    node.interfaces[interface.name] = interface
+    node.plugin_specifications[plugin_specification.name] = plugin_specification
+    task = api.task.OperationTask.for_node(node=node, interface_name='test', operation_name='op')
     graph.add_tasks(task)
     return graph
 
@@ -61,12 +67,12 @@ def _mock_operation(ctx):
     # a correct ctx.task.implementation tells us we kept the correct task_id
     assert ctx.task.implementation == _operation_mapping()
     # a correct ctx.node.name tells us we kept the correct actor_id
-    assert ctx.node.name == mock.models.DEPENDENCY_NODE_INSTANCE_NAME
+    assert ctx.node.name == mock.models.DEPENDENCY_NODE_NAME
     # a correct ctx.name tells us we kept the correct name
     assert ctx.name is not None
     assert ctx.name == ctx.task.name
     # a correct ctx.deployment.name tells us we kept the correct deployment_id
-    assert ctx.service_instance.name == mock.models.DEPLOYMENT_NAME
+    assert ctx.service.name == mock.models.SERVICE_NAME
     # Here we test that the resource storage was properly re-created
     test_file_content = ctx.resource.blueprint.read(TEST_FILE_ENTRY_ID, TEST_FILE_NAME)
     assert test_file_content == TEST_FILE_CONTENT

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/context/test_toolbelt.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_toolbelt.py b/tests/orchestrator/context/test_toolbelt.py
index 28bd3d3..cf82127 100644
--- a/tests/orchestrator/context/test_toolbelt.py
+++ b/tests/orchestrator/context/test_toolbelt.py
@@ -24,7 +24,6 @@ from aria.orchestrator.context.toolbelt import RelationshipToolBelt
 from tests import mock, storage
 from . import (
     op_path,
-    op_name,
     execute,
 )
 
@@ -49,22 +48,22 @@ def executor():
 
 def _get_elements(workflow_context):
     dependency_node_template = workflow_context.model.node_template.get_by_name(
-        mock.models.DEPENDENCY_NODE_NAME)
+        mock.models.DEPENDENCY_NODE_TEMPLATE_NAME)
     dependency_node_template.host = dependency_node_template
     workflow_context.model.node.update(dependency_node_template)
 
     dependency_node = workflow_context.model.node.get_by_name(
-        mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
+        mock.models.DEPENDENCY_NODE_NAME)
     dependency_node.host_fk = dependency_node.id
     workflow_context.model.node.update(dependency_node)
 
     dependent_node_template = workflow_context.model.node_template.get_by_name(
-        mock.models.DEPENDENT_NODE_NAME)
+        mock.models.DEPENDENT_NODE_TEMPLATE_NAME)
     dependent_node_template.host = dependency_node_template
     workflow_context.model.node_template.update(dependent_node_template)
 
     dependent_node = workflow_context.model.node.get_by_name(
-        mock.models.DEPENDENT_NODE_INSTANCE_NAME)
+        mock.models.DEPENDENT_NODE_NAME)
     dependent_node.host = dependent_node
     workflow_context.model.node.update(dependent_node)
 
@@ -74,21 +73,26 @@ def _get_elements(workflow_context):
 
 
 def test_host_ip(workflow_context, executor):
-    operation_name = 'aria.interfaces.lifecycle.create'
+    interface_name = 'Standard'
+    operation_name = 'create'
     _, dependency_node, _, _, _ = _get_elements(workflow_context)
-    dependency_node.interfaces = [mock.models.get_interface(
-        operation_name,
+    interface = mock.models.create_interface(
+        dependency_node.service,
+        interface_name=interface_name,
+        operation_name=operation_name,
         operation_kwargs=dict(implementation=op_path(host_ip, module_path=__name__))
-    )]
+    )
+    dependency_node.interfaces[interface.name] = interface
     workflow_context.model.node.update(dependency_node)
     inputs = {'putput': True}
 
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.node(
-                instance=dependency_node,
-                name=operation_name,
+            api.task.OperationTask.for_node(
+                node=dependency_node,
+                interface_name=interface_name,
+                operation_name=operation_name,
                 inputs=inputs
             )
         )
@@ -99,15 +103,16 @@ def test_host_ip(workflow_context, executor):
 
 
 def test_relationship_tool_belt(workflow_context, executor):
-    operation_name = 'aria.interfaces.relationship_lifecycle.post_configure'
+    interface_name = 'Configure'
+    operation_name = 'post_configure'
     _, _, _, _, relationship = _get_elements(workflow_context)
-    relationship.interfaces = [
-        mock.models.get_interface(
-            operation_name,
-            operation_kwargs=dict(
-                implementation=op_path(relationship_operation, module_path=__name__)),
-            edge='source')
-    ]
+    interface = mock.models.create_interface(
+        relationship.source_node.service,
+        interface_name=interface_name,
+        operation_name=operation_name,
+        operation_kwargs=dict(implementation=op_path(relationship_operation, module_path=__name__))
+    )
+    relationship.interfaces[interface.name] = interface
     workflow_context.model.relationship.update(relationship)
 
     inputs = {'putput': True}
@@ -115,18 +120,22 @@ def test_relationship_tool_belt(workflow_context, executor):
     @workflow
     def basic_workflow(graph, **_):
         graph.add_tasks(
-            api.task.OperationTask.relationship(
-                instance=relationship,
-                name=operation_name,
-                edge='source',
+            api.task.OperationTask.for_relationship(
+                relationship=relationship,
+                interface_name=interface_name,
+                operation_name=operation_name,
                 inputs=inputs
             )
         )
 
     execute(workflow_func=basic_workflow, workflow_context=workflow_context, executor=executor)
 
-    assert isinstance(global_test_holder.get(op_name(relationship, operation_name)),
-                      RelationshipToolBelt)
+    assert isinstance(global_test_holder.get(api.task.OperationTask.NAME_FORMAT.format(
+        type='relationship',
+        name=relationship.name,
+        interface=interface_name,
+        operation=operation_name
+    )), RelationshipToolBelt)
 
 
 def test_wrong_model_toolbelt():

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/context/test_workflow.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/context/test_workflow.py b/tests/orchestrator/context/test_workflow.py
index 4e8eca4..fa1f387 100644
--- a/tests/orchestrator/context/test_workflow.py
+++ b/tests/orchestrator/context/test_workflow.py
@@ -29,11 +29,11 @@ class TestWorkflowContext(object):
     def test_execution_creation_on_workflow_context_creation(self, storage):
         ctx = self._create_ctx(storage)
         execution = storage.execution.get(ctx.execution.id)             # pylint: disable=no-member
-        assert execution.service_instance == storage.service_instance.get_by_name(
-            models.DEPLOYMENT_NAME)
+        assert execution.service == storage.service.get_by_name(
+            models.SERVICE_NAME)
         assert execution.workflow_name == models.WORKFLOW_NAME
         assert execution.service_template == storage.service_template.get_by_name(
-            models.BLUEPRINT_NAME)
+            models.SERVICE_TEMPLATE_NAME)
         assert execution.status == storage.execution.model_cls.PENDING
         assert execution.parameters == {}
         assert execution.created_at <= datetime.utcnow()
@@ -53,7 +53,7 @@ class TestWorkflowContext(object):
             name='simple_context',
             model_storage=storage,
             resource_storage=None,
-            service_instance_id=storage.service_instance.get_by_name(models.DEPLOYMENT_NAME).id,
+            service_id=storage.service.get_by_name(models.SERVICE_NAME).id,
             workflow_name=models.WORKFLOW_NAME,
             task_max_attempts=models.TASK_MAX_ATTEMPTS,
             task_retry_interval=models.TASK_RETRY_INTERVAL
@@ -64,8 +64,8 @@ class TestWorkflowContext(object):
 def storage():
     workflow_storage = application_model_storage(
         sql_mapi.SQLAlchemyModelAPI, initiator=test_storage.init_inmemory_model_storage)
-    workflow_storage.service_template.put(models.get_blueprint())
-    blueprint = workflow_storage.service_template.get_by_name(models.BLUEPRINT_NAME)
-    workflow_storage.service_instance.put(models.get_deployment(blueprint))
+    workflow_storage.service_template.put(models.create_service_template())
+    service_template = workflow_storage.service_template.get_by_name(models.SERVICE_TEMPLATE_NAME)
+    workflow_storage.service.put(models.create_service(service_template))
     yield workflow_storage
     test_storage.release_sqlite_storage(workflow_storage)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/execution_plugin/test_common.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/execution_plugin/test_common.py b/tests/orchestrator/execution_plugin/test_common.py
index 151b996..dd1e9fb 100644
--- a/tests/orchestrator/execution_plugin/test_common.py
+++ b/tests/orchestrator/execution_plugin/test_common.py
@@ -18,7 +18,7 @@ from collections import namedtuple
 import requests
 import pytest
 
-from aria.storage.modeling import model
+from aria.modeling import models
 from aria.orchestrator import exceptions
 from aria.orchestrator.execution_plugin import common
 
@@ -35,7 +35,7 @@ class TestDownloadScript(object):
 
     def _test_url(self, url):
         class Ctx(object):
-            task = model.Task
+            task = models.Task
 
         script_path = url
         result = common.download_script(Ctx, script_path)

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/execution_plugin/test_local.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/execution_plugin/test_local.py b/tests/orchestrator/execution_plugin/test_local.py
index a94fc83..e3612cf 100644
--- a/tests/orchestrator/execution_plugin/test_local.py
+++ b/tests/orchestrator/execution_plugin/test_local.py
@@ -463,7 +463,7 @@ if __name__ == '__main__':
         script_path = os.path.basename(local_script_path) if local_script_path else None
         if script_path:
             workflow_context.resource.deployment.upload(
-                entry_id=str(workflow_context.service_instance.id),
+                entry_id=str(workflow_context.service.id),
                 source=local_script_path,
                 path=script_path)
 
@@ -476,17 +476,20 @@ if __name__ == '__main__':
 
         @workflow
         def mock_workflow(ctx, graph):
-            op = 'test.op'
-            node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-            node.interfaces = [mock.models.get_interface(
-                op,
+            node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+            interface = mock.models.create_interface(
+                node.service,
+                'test',
+                'op',
                 operation_kwargs=dict(implementation='{0}.{1}'.format(
                     operations.__name__,
                     operations.run_script_locally.__name__))
-            )]
-            graph.add_tasks(api.task.OperationTask.node(
-                instance=node,
-                name=op,
+            )
+            node.interfaces[interface.name] = interface
+            graph.add_tasks(api.task.OperationTask.for_node(
+                node=node,
+                interface_name='test',
+                operation_name='op',
                 inputs=inputs))
             return graph
         tasks_graph = mock_workflow(ctx=workflow_context)  # pylint: disable=no-value-for-parameter
@@ -496,7 +499,7 @@ if __name__ == '__main__':
             tasks_graph=tasks_graph)
         eng.execute()
         return workflow_context.model.node.get_by_name(
-            mock.models.DEPENDENCY_NODE_INSTANCE_NAME).runtime_properties
+            mock.models.DEPENDENCY_NODE_NAME).runtime_properties
 
     @pytest.fixture
     def executor(self):

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/execution_plugin/test_ssh.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/execution_plugin/test_ssh.py b/tests/orchestrator/execution_plugin/test_ssh.py
index ad577f0..dd36466 100644
--- a/tests/orchestrator/execution_plugin/test_ssh.py
+++ b/tests/orchestrator/execution_plugin/test_ssh.py
@@ -24,7 +24,7 @@ import fabric.api
 from fabric.contrib import files
 from fabric import context_managers
 
-from aria.storage.modeling import model
+from aria.modeling import models
 from aria.orchestrator import events
 from aria.orchestrator import workflow
 from aria.orchestrator.workflows import api
@@ -124,10 +124,10 @@ class TestWithActualSSHServer(object):
 
     def test_run_script_download_resource_and_render(self, tmpdir):
         resource = tmpdir.join('resource')
-        resource.write('{{ctx.service_instance.name}}')
+        resource.write('{{ctx.service.name}}')
         self._upload(str(resource), 'test_resource')
         props = self._execute()
-        assert props['test_value'] == self._workflow_context.service_instance.name
+        assert props['test_value'] == self._workflow_context.service.name
 
     @pytest.mark.parametrize('value', ['string-value', [1, 2, 3], {'key': 'value'}])
     def test_run_script_inputs_as_env_variables_no_override(self, value):
@@ -216,15 +216,20 @@ class TestWithActualSSHServer(object):
 
         @workflow
         def mock_workflow(ctx, graph):
-            op = 'test.op'
-            node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-            node.interfaces = [mock.models.get_interface(
-                op,
-                dict(implementation='{0}.{1}'.format(operations.__name__, operation.__name__))
-            )]
-            graph.sequence(*[api.task.OperationTask.node(
-                instance=node,
-                name=op,
+            node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+            interface = mock.models.create_interface(
+                node.service,
+                'test',
+                'op',
+                operation_kwargs=dict(implementation='{0}.{1}'.format(
+                    operations.__name__,
+                    operation.__name__))
+            )
+            node.interfaces[interface.name] = interface
+            graph.sequence(*[api.task.OperationTask.for_node(
+                node=node,
+                interface_name='test',
+                operation_name='op',
                 inputs={
                     'script_path': script_path,
                     'fabric_env': _FABRIC_ENV,
@@ -243,7 +248,7 @@ class TestWithActualSSHServer(object):
             tasks_graph=tasks_graph)
         eng.execute()
         return self._workflow_context.model.node.get_by_name(
-            mock.models.DEPENDENCY_NODE_INSTANCE_NAME).runtime_properties
+            mock.models.DEPENDENCY_NODE_NAME).runtime_properties
 
     def _execute_and_get_task_exception(self, *args, **kwargs):
         signal = events.on_failure_task_signal
@@ -254,7 +259,7 @@ class TestWithActualSSHServer(object):
 
     def _upload(self, source, path):
         self._workflow_context.resource.deployment.upload(
-            entry_id=str(self._workflow_context.service_instance.id),
+            entry_id=str(self._workflow_context.service.id),
             source=source,
             path=path)
 
@@ -407,7 +412,7 @@ class TestFabricEnvHideGroupsAndRunCommands(object):
         class Stub(object):
             @staticmethod
             def abort(message=None):
-                model.Task.abort(message)
+                models.Task.abort(message)
             ip = None
         task = Stub
         task.runs_on = Stub

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/test_runner.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/test_runner.py b/tests/orchestrator/test_runner.py
index 17230df..74e98ad 100644
--- a/tests/orchestrator/test_runner.py
+++ b/tests/orchestrator/test_runner.py
@@ -49,8 +49,9 @@ def test_runner_tasks():
     def workflow_fn(ctx, graph):
         for node in ctx.model.node:
             graph.add_tasks(
-                OperationTask.node(instance=node,
-                                   name='tosca.interfaces.node.lifecycle.Standard.create'))
+                OperationTask.for_node(node=node,
+                                       interface_name='Standard',
+                                       operation_name='create'))
 
     _test_runner(workflow_fn)
 
@@ -69,5 +70,5 @@ def _test_runner(workflow_fn):
                     workflow_fn=workflow_fn,
                     inputs={},
                     initialize_model_storage_fn=_initialize_model_storage_fn,
-                    service_instance_id=1)
+                    service_id_fn=lambda: 1)
     runner.run()

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/workflows/api/test_task.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/workflows/api/test_task.py b/tests/orchestrator/workflows/api/test_task.py
index 79618c1..b635a88 100644
--- a/tests/orchestrator/workflows/api/test_task.py
+++ b/tests/orchestrator/workflows/api/test_task.py
@@ -18,7 +18,8 @@ import pytest
 
 from aria.orchestrator import context
 from aria.orchestrator.workflows import api
-from aria.storage.modeling import model
+from aria.modeling import models
+
 from tests import mock, storage
 
 
@@ -30,7 +31,7 @@ def ctx(tmpdir):
     :return:
     """
     simple_context = mock.context.simple(str(tmpdir), inmemory=False)
-    simple_context.model.execution.put(mock.models.get_execution(simple_context.service_instance))
+    simple_context.model.execution.put(mock.models.create_execution(simple_context.service))
     yield simple_context
     storage.release_sqlite_storage(simple_context.model)
 
@@ -38,133 +39,183 @@ def ctx(tmpdir):
 class TestOperationTask(object):
 
     def test_node_operation_task_creation(self, ctx):
-        operation_name = 'aria.interfaces.lifecycle.create'
-        interface = mock.models.get_interface(
+        interface_name = 'test_interface'
+        operation_name = 'create'
+
+        plugin = mock.models.create_plugin('package', '0.1')
+        ctx.model.node.update(plugin)
+
+        plugin_specification = mock.models.create_plugin_specification('package', '0.1')
+
+        interface = mock.models.create_interface(
+            ctx.service,
+            interface_name,
             operation_name,
-            operation_kwargs=dict(plugin='plugin', implementation='op_path'))
-
-        node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_INSTANCE_NAME)
-        node.interfaces = [interface]
-        node.plugins = [{'name': 'plugin',
-                         'package_name': 'package',
-                         'package_version': '0.1'}]
-        ctx.model.node_template.update(node)
-        inputs = {'name': True}
+            operation_kwargs=dict(plugin_specification=plugin_specification,
+                                  implementation='op_path'))
+
+        node = ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+        node.interfaces[interface_name] = interface
+        node.plugin_specifications[plugin_specification.name] = plugin_specification
+        ctx.model.node.update(node)
+        inputs = {'test_input': True}
         max_attempts = 10
         retry_interval = 10
         ignore_failure = True
 
         with context.workflow.current.push(ctx):
-            api_task = api.task.OperationTask.node(
-                name=operation_name,
-                instance=node,
+            api_task = api.task.OperationTask.for_node(
+                node=node,
+                interface_name=interface_name,
+                operation_name=operation_name,
                 inputs=inputs,
                 max_attempts=max_attempts,
                 retry_interval=retry_interval,
                 ignore_failure=ignore_failure)
 
-        assert api_task.name == '{0}.{1}'.format(operation_name, node.id)
+        assert api_task.name == api.task.OperationTask.NAME_FORMAT.format(
+            type='node',
+            name=node.name,
+            interface=interface_name,
+            operation=operation_name
+        )
         assert api_task.implementation == 'op_path'
         assert api_task.actor == node
-        assert api_task.inputs == inputs
+        assert api_task.inputs['test_input'].value is True
         assert api_task.retry_interval == retry_interval
         assert api_task.max_attempts == max_attempts
         assert api_task.ignore_failure == ignore_failure
-        assert api_task.plugin == {'name': 'plugin',
-                                   'package_name': 'package',
-                                   'package_version': '0.1'}
-        assert api_task.runs_on == model.Task.RUNS_ON_NODE_INSTANCE
+        assert api_task.plugin.name == 'test_plugin'
+        assert api_task.runs_on == models.Task.RUNS_ON_NODE
 
     def test_source_relationship_operation_task_creation(self, ctx):
-        operation_name = 'aria.interfaces.relationship_lifecycle.preconfigure'
+        interface_name = 'test_interface'
+        operation_name = 'preconfigure'
 
-        interface = mock.models.get_interface(
+        plugin = mock.models.create_plugin('package', '0.1')
+        ctx.model.node.update(plugin)
+
+        plugin_specification = mock.models.create_plugin_specification('package', '0.1')
+
+        interface = mock.models.create_interface(
+            ctx.service,
+            interface_name,
             operation_name,
-            operation_kwargs=dict(implementation='op_path', plugin='plugin'),
-            edge='source'
+            operation_kwargs=dict(plugin_specification=plugin_specification,
+                                  implementation='op_path')
         )
 
         relationship = ctx.model.relationship.list()[0]
-        relationship.interfaces = [interface]
-        relationship.source_node.plugins = [{'name': 'plugin',
-                                             'package_name': 'package',
-                                             'package_version': '0.1'}]
-        inputs = {'name': True}
+        relationship.interfaces[interface.name] = interface
+        relationship.source_node.plugin_specifications[plugin_specification.name] = \
+            plugin_specification
+        inputs = {'test_input': True}
         max_attempts = 10
         retry_interval = 10
 
         with context.workflow.current.push(ctx):
-            api_task = api.task.OperationTask.relationship(
-                name=operation_name,
-                instance=relationship,
-                edge='source',
+            api_task = api.task.OperationTask.for_relationship(
+                relationship=relationship,
+                interface_name=interface_name,
+                operation_name=operation_name,
                 inputs=inputs,
                 max_attempts=max_attempts,
                 retry_interval=retry_interval)
 
-        assert api_task.name == '{0}.{1}'.format(operation_name, relationship.id)
+        assert api_task.name == api.task.OperationTask.NAME_FORMAT.format(
+            type='relationship',
+            name=relationship.name,
+            interface=interface_name,
+            operation=operation_name
+        )
         assert api_task.implementation == 'op_path'
         assert api_task.actor == relationship
-        assert api_task.inputs == inputs
+        assert api_task.inputs['test_input'].value is True
         assert api_task.retry_interval == retry_interval
         assert api_task.max_attempts == max_attempts
-        assert api_task.plugin == {'name': 'plugin',
-                                   'package_name': 'package',
-                                   'package_version': '0.1'}
-        assert api_task.runs_on == model.Task.RUNS_ON_SOURCE
+        assert api_task.plugin.name == 'test_plugin'
+        assert api_task.runs_on == models.Task.RUNS_ON_SOURCE
 
     def test_target_relationship_operation_task_creation(self, ctx):
-        operation_name = 'aria.interfaces.relationship_lifecycle.preconfigure'
-        interface = mock.models.get_interface(
+        interface_name = 'test_interface'
+        operation_name = 'preconfigure'
+
+        plugin = mock.models.create_plugin('package', '0.1')
+        ctx.model.node.update(plugin)
+
+        plugin_specification = mock.models.create_plugin_specification('package', '0.1')
+
+        interface = mock.models.create_interface(
+            ctx.service,
+            interface_name,
             operation_name,
-            operation_kwargs=dict(implementation='op_path', plugin='plugin'),
-            edge='target'
+            operation_kwargs=dict(plugin_specification=plugin_specification,
+                                  implementation='op_path')
         )
 
         relationship = ctx.model.relationship.list()[0]
-        relationship.interfaces = [interface]
-        relationship.target_node.plugins = [{'name': 'plugin',
-                                             'package_name': 'package',
-                                             'package_version': '0.1'}]
-        inputs = {'name': True}
+        relationship.interfaces[interface.name] = interface
+        relationship.target_node.plugin_specifications[plugin_specification.name] = \
+            plugin_specification
+        inputs = {'test_input': True}
         max_attempts = 10
         retry_interval = 10
 
         with context.workflow.current.push(ctx):
-            api_task = api.task.OperationTask.relationship(
-                name=operation_name,
-                instance=relationship,
-                edge='target',
+            api_task = api.task.OperationTask.for_relationship(
+                relationship=relationship,
+                interface_name=interface_name,
+                operation_name=operation_name,
                 inputs=inputs,
                 max_attempts=max_attempts,
-                retry_interval=retry_interval)
+                retry_interval=retry_interval,
+                runs_on=models.Task.RUNS_ON_TARGET)
 
-        assert api_task.name == '{0}.{1}'.format(operation_name, relationship.id)
+        assert api_task.name == api.task.OperationTask.NAME_FORMAT.format(
+            type='relationship',
+            name=relationship.name,
+            interface=interface_name,
+            operation=operation_name
+        )
         assert api_task.implementation == 'op_path'
         assert api_task.actor == relationship
-        assert api_task.inputs == inputs
+        assert api_task.inputs['test_input'].value is True
         assert api_task.retry_interval == retry_interval
         assert api_task.max_attempts == max_attempts
-        assert api_task.plugin == {'name': 'plugin',
-                                   'package_name': 'package',
-                                   'package_version': '0.1'}
-        assert api_task.runs_on == model.Task.RUNS_ON_TARGET
+        assert api_task.plugin.name == 'test_plugin'
+        assert api_task.runs_on == models.Task.RUNS_ON_TARGET
 
     def test_operation_task_default_values(self, ctx):
-        dependency_node_instance = ctx.model.node.get_by_name(
-            mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
+        interface_name = 'test_interface'
+        operation_name = 'create'
+
+        plugin = mock.models.create_plugin('package', '0.1')
+        ctx.model.node.update(plugin)
+
+        plugin_specification = mock.models.create_plugin_specification('package', '0.1')
+
+        dependency_node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+
+        interface = mock.models.create_interface(
+            ctx.service,
+            interface_name,
+            operation_name,
+            operation_kwargs=dict(plugin_specification=plugin_specification,
+                                  implementation='op_path'))
+        dependency_node.interfaces[interface_name] = interface
+
         with context.workflow.current.push(ctx):
-            task = api.task.OperationTask(
-                name='stub',
-                implementation='',
-                actor=dependency_node_instance)
+            task = api.task.OperationTask.for_node(
+                node=dependency_node,
+                interface_name=interface_name,
+                operation_name=operation_name)
 
         assert task.inputs == {}
         assert task.retry_interval == ctx._task_retry_interval
         assert task.max_attempts == ctx._task_max_attempts
         assert task.ignore_failure == ctx._task_ignore_failure
-        assert task.plugin == {}
-        assert task.runs_on is None
+        assert task.plugin is plugin
+        assert task.runs_on == models.Task.RUNS_ON_NODE
 
 
 class TestWorkflowTask(object):

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/workflows/builtin/test_execute_operation.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/workflows/builtin/test_execute_operation.py b/tests/orchestrator/workflows/builtin/test_execute_operation.py
index cc9a8a8..360e17d 100644
--- a/tests/orchestrator/workflows/builtin/test_execute_operation.py
+++ b/tests/orchestrator/workflows/builtin/test_execute_operation.py
@@ -18,28 +18,33 @@ import pytest
 from aria.orchestrator.workflows.api import task
 from aria.orchestrator.workflows.builtin.execute_operation import execute_operation
 
-from tests import mock
-from tests import storage
+from tests import mock, storage
 
 
 @pytest.fixture
 def ctx(tmpdir):
-    context = mock.context.simple(str(tmpdir))
+    context = mock.context.simple(str(tmpdir), inmemory=False)
     yield context
     storage.release_sqlite_storage(context.model)
 
 
 def test_execute_operation(ctx):
-    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    node.interfaces = [mock.models.get_interface(mock.operations.NODE_OPERATIONS_INSTALL[0])]
+    node = ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    interface_name, operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
+    interface = mock.models.create_interface(
+        ctx.service,
+        interface_name,
+        operation_name
+    )
+    node.interfaces[interface.name] = interface
     ctx.model.node.update(node)
-    operation_name = mock.operations.NODE_OPERATIONS_INSTALL[0]
 
     execute_tasks = list(
         task.WorkflowTask(
             execute_operation,
             ctx=ctx,
-            operation=operation_name,
+            interface_name=interface_name,
+            operation_name=operation_name,
             operation_kwargs={},
             allow_kwargs_override=False,
             run_by_dependency_order=False,
@@ -50,8 +55,12 @@ def test_execute_operation(ctx):
     )
 
     assert len(execute_tasks) == 1
-    assert execute_tasks[0].name == '{0}.{1}'.format(operation_name, node.id)
-
+    assert execute_tasks[0].name == task.OperationTask.NAME_FORMAT.format(
+        type='node',
+        name=node.name,
+        interface=interface_name,
+        operation=operation_name
+    )
 
 
 # TODO: add more scenarios

http://git-wip-us.apache.org/repos/asf/incubator-ariatosca/blob/9841ca4a/tests/orchestrator/workflows/builtin/test_heal.py
----------------------------------------------------------------------
diff --git a/tests/orchestrator/workflows/builtin/test_heal.py b/tests/orchestrator/workflows/builtin/test_heal.py
index b553049..92fa7ea 100644
--- a/tests/orchestrator/workflows/builtin/test_heal.py
+++ b/tests/orchestrator/workflows/builtin/test_heal.py
@@ -33,11 +33,11 @@ def ctx(tmpdir):
 
 @pytest.mark.skip(reason='heal is not implemented for now')
 def test_heal_dependent_node(ctx):
-    dependent_node_instance = \
-        ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_INSTANCE_NAME)
-    dependent_node_instance.host_fk = dependent_node_instance.id
-    ctx.model.node.update(dependent_node_instance)
-    heal_graph = task.WorkflowTask(heal, ctx=ctx, node_instance_id=dependent_node_instance.id)
+    dependent_node = \
+        ctx.model.node.get_by_name(mock.models.DEPENDENT_NODE_NAME)
+    dependent_node.host_fk = dependent_node.id
+    ctx.model.node.update(dependent_node)
+    heal_graph = task.WorkflowTask(heal, ctx=ctx, node_id=dependent_node.id)
 
     assert len(list(heal_graph.tasks)) == 2
     uninstall_subgraph, install_subgraph = list(heal_graph.topological_order(reverse=True))
@@ -63,11 +63,11 @@ def test_heal_dependent_node(ctx):
 
 @pytest.mark.skip(reason='heal is not implemented for now')
 def test_heal_dependency_node(ctx):
-    dependency_node_instance = \
-        ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_INSTANCE_NAME)
-    dependency_node_instance.host_fk = dependency_node_instance.id
-    ctx.model.node.update(dependency_node_instance)
-    heal_graph = task.WorkflowTask(heal, ctx=ctx, node_instance_id=dependency_node_instance.id)
+    dependency_node = \
+        ctx.model.node.get_by_name(mock.models.DEPENDENCY_NODE_NAME)
+    dependency_node.host_fk = dependency_node.id
+    ctx.model.node.update(dependency_node)
+    heal_graph = task.WorkflowTask(heal, ctx=ctx, node_id=dependency_node.id)
     # both subgraphs should contain un\install for both the dependent and the dependency
     assert len(list(heal_graph.tasks)) == 2
     uninstall_subgraph, install_subgraph = list(heal_graph.topological_order(reverse=True))