You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwhisk.apache.org by ra...@apache.org on 2020/10/22 18:11:30 UTC

[openwhisk-runtime-python] branch master updated: Remove non-action loop proxies. (#93)

This is an automated email from the ASF dual-hosted git repository.

rabbah pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/openwhisk-runtime-python.git


The following commit(s) were added to refs/heads/master by this push:
     new 8dd54c7  Remove non-action loop proxies. (#93)
8dd54c7 is described below

commit 8dd54c76056d32382c4be945fe5b6e45588b083a
Author: rodric rabbah <ro...@gmail.com>
AuthorDate: Thu Oct 22 14:11:18 2020 -0400

    Remove non-action loop proxies. (#93)
    
    * Remove old proxies.
    * Update README.
---
 .travis.yml                                        |   2 +-
 README.md                                          | 118 ++-
 core/python3Action/CHANGELOG.md                    |  89 ---
 core/python3Action/Dockerfile                      |  51 --
 core/python3Action/build.gradle                    |  19 -
 core/python3Action/pythonrunner.py                 | 100 ---
 core/python3AiAction/Dockerfile                    |  61 --
 core/python3AiAction/README.md                     |  85 --
 core/python3AiAction/build.gradle                  |  19 -
 core/python3AiAction/pythonrunner.py               | 100 ---
 core/python3AiAction/requirements.txt              |  27 -
 .../samples/smart-body-crop/.gitignore             |   5 -
 .../samples/smart-body-crop/common.py              | 332 --------
 .../samples/smart-body-crop/crop.ipynb             | 872 ---------------------
 .../samples/smart-body-crop/fashion-men-1.jpg      | Bin 2471074 -> 0 bytes
 .../samples/smart-body-crop/inference.py           | 246 ------
 core/python3AiActionLoop/README.md                 |   6 +-
 settings.gradle                                    |   2 -
 .../Python3AiActionContainerTests.scala            |  94 ---
 .../Python3AiActionLoopContainerTests.scala        |   8 +-
 .../PythonActionContainerTests.scala               |  22 +-
 .../PythonActionLoopExtraTests.scala               |   1 -
 tools/travis/publish.sh                            |   6 +-
 23 files changed, 76 insertions(+), 2189 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 217a680..b1f39b6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -45,7 +45,7 @@ deploy:
       all_branches: true
       repo: apache/openwhisk-runtime-python
   - provider: script
-    script: "./tools/travis/publish.sh openwhisk 3 nightly && ./tools/travis/publish.sh openwhisk 3-ai nightly && ./tools/travis/publish.sh openwhisk 3-loop nightly && ./tools/travis/publish.sh openwhisk 3-loopai nightly"
+    script: "./tools/travis/publish.sh openwhisk 3 nightly && ./tools/travis/publish.sh openwhisk 3-ai nightly"
     on:
       branch: master
       repo: apache/openwhisk-runtime-python
diff --git a/README.md b/README.md
index 9bfcf6e..38b0834 100644
--- a/README.md
+++ b/README.md
@@ -21,96 +21,82 @@
 [![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0)
 [![Build Status](https://travis-ci.org/apache/openwhisk-runtime-python.svg?branch=master)](https://travis-ci.org/apache/openwhisk-runtime-python)
 
+## Build Runtimes
+
+The runtimes are built using Gradle.
+The file [settings.gradle](settings.gradle) lists the images that are build by default.
+To build all those images, run the following command.
 
-### Give it a try today
-To use as a docker action using python 3
 ```
-wsk action update myAction myAction.py --docker openwhisk/python3action:1.0.2
+./gradlew distDocker
 ```
 
-### To use on deployment that contains the rutime as a kind
-To use as a kind action using python 3
+You can optionally build a specific image by modifying the Gradle command. For example:
 ```
-wsk action update myAction myAction.py --kind python:3
+./gradlew core:python3ActionLoop:distDocker
 ```
-Replace `python:3` with `python:2` to use python 2.
 
+The build will produce Docker images such as `actionloop-python-v3.7`
+and will also tag the same image with the `whisk/` prefix. The latter
+is a convenience, which if you're testing with a local OpenWhisk
+stack, allows you to skip pushing the image to Docker Hub.
 
-### Python 3 AI Action
-This action enables developers to create AI Services with OpenWhisk. It comes with preinstalled libraries useful for running machine learning and deep learning inferences. See more about [python3aiaction](./core/python3AiAction).
+The image will need to be pushed to Docker Hub if you want to test it
+with a hosted OpenWhisk installation.
 
-### Local development
-```
-./gradlew core:pythonAction:distDocker
-```
-This will produce the image `whisk/python3action`
+### Using Gradle to push to a Docker Registry
 
-Build and Push image
-```
-docker login
-./gradlew core:pythonAction:distDocker -PdockerImagePrefix=$prefix-user -PdockerRegistry=docker.io
-```
+The Gradle build parameters `dockerImagePrefix` and `dockerRegistry`
+can be configured for your Docker Registery. Make usre you are logged
+in first with the `docker` CLI.
 
-Deploy OpenWhisk using ansible environment that contains the kind `python:3` and `python:2`
-Assuming you have OpenWhisk already deploy locally and `OPENWHISK_HOME` pointing to root directory of OpenWhisk core repository.
+- Use the `docker` CLI to login. The following assume you will substitute `$DOCKER_USER` with an appropriate value.
+  ```
+  docker login --username $DOCKER_USER
+  ```
 
-Set `ROOTDIR` to the root directory of this repository.
+- Now build, tag and push the image accordingly.
+  ```
+  ./gradlew distDocker -PdockerImagePrefix=$DOCKER_USER -PdockerRegistry=docker.io
+  ```
 
-Redeploy OpenWhisk
-```
-cd $OPENWHISK_HOME/ansible
-ANSIBLE_CMD="ansible-playbook -i ${ROOTDIR}/ansible/environments/local"
-$ANSIBLE_CMD setup.yml
-$ANSIBLE_CMD couchdb.yml
-$ANSIBLE_CMD initdb.yml
-$ANSIBLE_CMD wipe.yml
-$ANSIBLE_CMD openwhisk.yml
-```
+### Using Your Image as an OpenWhisk Action
 
-Or you can use `wskdev` and create a soft link to the target ansible environment, for example:
-```
-ln -s ${ROOTDIR}/ansible/environments/local ${OPENWHISK_HOME}/ansible/environments/local-python
-wskdev fresh -t local-python
-```
+You can now use this image as an OpenWhisk action. For example, to use
+the image `actionloop-python-v3.7` as an action runtime, you would run
+the following command.
 
-Use the `docker` commands to tag the image and push it into your own Docker Hub account
-```
-docker tag whisk/python3action $user_prefix/python3action
-docker push $user_prefix/python3action
-```
-Then create the action using your image from dockerhub
 ```
-wsk action update myAction myAction.py --docker $user_prefix/python3action
+wsk action update myAction myAction.py --docker $DOCKER_USER/actionloop-python-v3.7
 ```
-The `$user_prefix` is usually your dockerhub user id.
 
-### Testing
-Install dependencies from the root directory on $OPENWHISK_HOME repository
+## Test Runtimes
+
+There are suites of tests that are generic for all runtimes, and some that are specific to a runtime version.
+To run all tests, there are two steps.
+
+First, you need to create an OpenWhisk snapshot release. Do this from your OpenWhisk home directory.
 ```
 ./gradlew install
 ```
 
-Using gradle for the ActionContainer tests you need to use a proxy if running on Mac, if Linux then don't use proxy options
-You can pass the flags `-Dhttp.proxyHost=localhost -Dhttp.proxyPort=3128` directly in gradle command.
-Or save in your `$HOME/.gradle/gradle.properties`
-```
-systemProp.http.proxyHost=localhost
-systemProp.http.proxyPort=3128
-```
-Using gradle to run all tests
-```
-./gradlew :tests:test
-```
-Using gradle to run some tests
+Now you can build and run the tests in this repository.
 ```
-./gradlew :tests:test --tests *ActionContainerTests*
+./gradlew tests:test
 ```
-Using IntelliJ:
-- Import project as gradle project.
-- Make sure working directory is root of the project/repo
-- Add the following Java VM properties in ScalaTests Run Configuration, easiest is to change the Defaults for all ScalaTests to use this VM properties
+
+Gradle allows you to selectively run tests. For example, the following
+command runs tests which match the given pattern and excludes all
+others.
 ```
--Dhttp.proxyHost=localhost
--Dhttp.proxyPort=3128
+./gradlew :tests:test --tests *ActionLoopContainerTests*
 ```
 
+## Python 3 AI Runtime
+This action runtime enables developers to create AI Services with OpenWhisk. It comes with preinstalled libraries useful for running machine learning and deep learning inferences. [Read more about this runtime here](./core/python3AiActionLoop).
+
+## Import Project into IntelliJ
+
+Follow these steps to import the project into your IntelliJ IDE.
+- Import project as gradle project.
+- Make sure working directory is root of the project/repo.
diff --git a/core/python3Action/CHANGELOG.md b/core/python3Action/CHANGELOG.md
deleted file mode 100644
index 1f1a4ca..0000000
--- a/core/python3Action/CHANGELOG.md
+++ /dev/null
@@ -1,89 +0,0 @@
-<!--
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
--->
-
-# Python 3 OpenWhisk Runtime Container
-
-## 1.15.0
-  - Build proxy from 1.16.0 release of openwhisk-runtime-go
-  - Update to golang:1.15 and buster. (#90)
-  - Ambiguous name changed to python3ActionLoop (#89)
-  - Updated Python runtimes to use "Action Loop" Proxy with new async handshake (#82)
-
-## 1.14.0
-  - Update base image to openwhisk/dockerskeleton:1.14.0
-  - Support for __OW_ACTION_VERSION (openwhisk/4761)
-
-## 1.0.3
-Changes:
-  - Update base image to openwhisk/dockerskeleton:1.3.3
-
-## 1.0.2
-Changes:
-  - Update base image to openwhisk/dockerskeleton:1.3.2
-
-## 1.0.1
-Changes:
-  - Update base image to openwhisk/dockerskeleton:1.3.1
-
-## 1.0.0
-Initial release.
-
-Python version = 3.6.1
-
-- asn1crypto (0.23.0)
-- attrs (17.2.0)
-- Automat (0.6.0)
-- beautifulsoup4 (4.5.3)
-- cffi (1.11.1)
-- click (6.7)
-- constantly (15.1.0)
-- cryptography (2.0.3)
-- cssselect (1.0.1)
-- Flask (0.12)
-- gevent (1.2.1)
-- greenlet (0.4.12)
-- httplib2 (0.10.3)
-- idna (2.6)
-- incremental (17.5.0)
-- itsdangerous (0.24)
-- Jinja2 (2.9.6)
-- kafka-python (1.3.4)
-- lxml (3.7.3)
-- MarkupSafe (1.0)
-- parsel (1.2.0)
-- pip (9.0.1)
-- pyasn1 (0.3.7)
-- pyasn1-modules (0.1.4)
-- pycparser (2.18)
-- PyDispatcher (2.0.5)
-- pyOpenSSL (17.3.0)
-- python-dateutil (2.6.0)
-- queuelib (1.4.2)
-- requests (2.13.0)
-- Scrapy (1.3.3)
-- service-identity (17.0.0)
-- setuptools (36.5.0)
-- simplejson (3.10.0)
-- six (1.11.0)
-- Twisted (17.1.0)
-- virtualenv (15.1.0)
-- w3lib (1.18.0)
-- Werkzeug (0.12.2)
-- wheel (0.29.0)
-- zope.interface (4.4.3)
diff --git a/core/python3Action/Dockerfile b/core/python3Action/Dockerfile
deleted file mode 100644
index 4ac8cc2..0000000
--- a/core/python3Action/Dockerfile
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Dockerfile for python actions, overrides and extends ActionRunner from actionProxy
-FROM openwhisk/dockerskeleton:1.14.0
-
-RUN apk add --no-cache \
-        bzip2-dev \
-        gcc \
-        libc-dev \
-        libxslt-dev \
-        libxml2-dev \
-        libffi-dev \
-        linux-headers \
-        openssl-dev
-
-# Install common modules for python
-RUN pip install \
-    beautifulsoup4==4.6.3 \
-    httplib2==0.11.3 \
-    kafka_python==1.4.3 \
-    lxml==4.2.5 \
-    python-dateutil==2.7.3 \
-    requests==2.19.1 \
-    scrapy==1.5.1 \
-    simplejson==3.16.0 \
-    virtualenv==16.0.0 \
-    twisted==18.7.0
-
-ENV FLASK_PROXY_PORT 8080
-
-RUN mkdir -p /pythonAction
-ADD pythonrunner.py /pythonAction/
-RUN rm -rf /action
-RUN mkdir /action
-
-CMD ["/bin/bash", "-c", "cd pythonAction && python -u pythonrunner.py"]
diff --git a/core/python3Action/build.gradle b/core/python3Action/build.gradle
deleted file mode 100644
index f120d86..0000000
--- a/core/python3Action/build.gradle
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-ext.dockerImageName = 'python3action'
-apply from: '../../gradle/docker.gradle'
diff --git a/core/python3Action/pythonrunner.py b/core/python3Action/pythonrunner.py
deleted file mode 100644
index 70df3fc..0000000
--- a/core/python3Action/pythonrunner.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""Executable Python script for running Python actions.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-"""
-
-import os
-import sys
-import codecs
-import traceback
-sys.path.append('../actionProxy')
-from actionproxy import ActionRunner, main, setRunner
-
-
-class PythonRunner(ActionRunner):
-
-    def __init__(self):
-        ActionRunner.__init__(self, '/action/__main__.py')
-        self.fn = None
-        self.mainFn = 'main'
-        self.global_context = {}
-
-    def initCodeFromString(self, message):
-        # do nothing, defer to build step
-        return True
-
-    def build(self, message):
-        binary = message['binary'] if 'binary' in message else False
-        if not binary:
-            code = message['code']
-            filename = 'action'
-        elif os.path.isfile(self.source):
-            with codecs.open(self.source, 'r', 'utf-8') as m:
-                code = m.read()
-            workdir = os.path.dirname(self.source)
-            sys.path.insert(0, workdir)
-            os.chdir(workdir)
-        else:
-            sys.stderr.write('Zip file does not include ' + os.path.basename(self.source) + '\n')
-            return False
-
-        try:
-            filename = os.path.basename(self.source)
-            self.fn = compile(code, filename=filename, mode='exec')
-            if 'main' in message:
-                self.mainFn = message['main']
-
-            # if the directory 'virtualenv' is extracted out of a zip file
-            path_to_virtualenv = os.path.dirname(self.source) + '/virtualenv'
-            if os.path.isdir(path_to_virtualenv):
-                # activate the virtualenv using activate_this.py contained in the virtualenv
-                activate_this_file = path_to_virtualenv + '/bin/activate_this.py'
-                if os.path.exists(activate_this_file):
-                    with open(activate_this_file) as f:
-                        code = compile(f.read(), activate_this_file, 'exec')
-                        exec(code, dict(__file__=activate_this_file))
-                else:
-                    sys.stderr.write('Invalid virtualenv. Zip file does not include /virtualenv/bin/' + os.path.basename(activate_this_file) + '\n')
-                    return False
-            exec(self.fn, self.global_context)
-            return True
-        except Exception:
-            traceback.print_exc(file=sys.stderr, limit=0)
-            return False
-
-    def verify(self):
-        return self.fn is not None
-
-    def run(self, args, env):
-        result = None
-        try:
-            os.environ = env
-            self.global_context['param'] = args
-            exec('fun = %s(param)' % self.mainFn, self.global_context)
-            result = self.global_context['fun']
-        except Exception:
-            traceback.print_exc(file=sys.stderr)
-
-        if result and isinstance(result, dict):
-            return (200, result)
-        else:
-            return (502, {'error': 'The action did not return a dictionary.'})
-
-if __name__ == '__main__':
-    setRunner(PythonRunner())
-    main()
diff --git a/core/python3AiAction/Dockerfile b/core/python3AiAction/Dockerfile
deleted file mode 100644
index eae6c7c..0000000
--- a/core/python3AiAction/Dockerfile
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Dockerfile for python AI actions, overrides and extends ActionRunner from actionProxy
-FROM tensorflow/tensorflow:1.11.0-py3
-
-ENV FLASK_PROXY_PORT 8080
-ENV PYTHONIOENCODING "UTF-8"
-
-RUN apt-get update && apt-get upgrade -y && apt-get install -y \
-        gcc \
-        libc-dev \
-        libxslt-dev \
-        libxml2-dev \
-        libffi-dev \
-        libssl-dev \
-        zip \
-        unzip \
-        vim \
-        && rm -rf /var/lib/apt/lists/*
-
-RUN apt-cache search linux-headers-generic
-
-# PyTorch
-RUN pip3 install http://download.pytorch.org/whl/cpu/torch-0.4.1-cp35-cp35m-linux_x86_64.whl \
-    && pip3 install torchvision==0.2.1
-# Caffe
-# RUN apt-get update && apt-get upgrade -y \
-#     && apt-get install -y \
-#     build-essential cmake git pkg-config \
-#     libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler \
-#     && apt-get install -y --no-install-recommends libboost-all-dev
-
-RUN curl -L https://downloads.rclone.org/rclone-current-linux-amd64.deb -o rclone.deb \
-    && dpkg -i rclone.deb \
-    && rm rclone.deb
-
-COPY requirements.txt requirements.txt
-RUN pip3 install --upgrade pip six && pip3 install --no-cache-dir -r requirements.txt
-
-RUN mkdir -p /actionProxy
-ADD https://raw.githubusercontent.com/apache/openwhisk-runtime-docker/dockerskeleton%401.3.3/core/actionProxy/actionproxy.py /actionProxy/actionproxy.py
-
-RUN mkdir -p /pythonAction
-COPY pythonrunner.py /pythonAction/pythonrunner.py
-
-CMD ["/bin/bash", "-c", "cd /pythonAction && python -u pythonrunner.py"]
diff --git a/core/python3AiAction/README.md b/core/python3AiAction/README.md
deleted file mode 100644
index 55684d8..0000000
--- a/core/python3AiAction/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-<!--
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
--->
-
-# AI Action
-
-This image contains libraries and frameworks useful for running AI Services.
-
-Bellow are the versions for the included libraries:
-
-| Image Version | Package | Notes |
-| ------------- | ------- | ----- |
-| 1.1.0      | Tensorflow 1.11.0, PyTorch 0.4.1 | Based on Ubuntu 16.04.5, Python 3.5.2.
-
-### Opening Notebooks
-
-This image has Jupyter Notebook installed. You may find useful to run quick Notebooks directly on the image which may run the actual code. To start Jupyter Notebook execute:
-
-```bash
-$ docker run -it -p 8888:8888 --rm --entrypoint jupyter-notebook openwhisk/python3aiaction  --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root
-```
-
-#### AI Action Sample
-
-To view an example with this AI Action check the [samples/smart-body-crop notebook](./samples/smart-body-crop/crop.ipynb) and follow the instructions.
-
-### 1.1.0 Details
-#### Available python packages
-
-| Package               | Version               |
-| --------------------- | --------------------- |
-| tensorboard           | 1.11.0                |
-| tensorflow            | 1.11.0                |
-| torch                 | 0.4.1                 |
-| torchvision           | 0.2.1                 |
-| scikit-learn          | 0.19.2                |
-| scipy                 | 1.1.0                 |
-| sklearn               | 0.0                   |
-| numpy                 | 1.15.2                |
-| pandas                | 0.23.4                |
-| Pillow                | 5.2.0                 |
-| Cython                | 0.28.5                |
-| ipykernel             | 4.9.0                 |
-| ipython               | 6.5.0                 |
-| ipywidgets            | 7.4.2                 |
-| jupyter               | 1.0.0                 |
-| jupyter-client        | 5.2.3                 |
-| jupyter-console       | 5.2.0                 |
-| jupyter-core          | 4.4.0                 |
-| Keras                 | 2.2.2                 |
-| Keras-Applications    | 1.0.4                 |
-| Keras-Preprocessing   | 1.0.2                 |
-| matplotlib            | 3.0.0                 |
-| notebook              | 5.7.0                 |
-| opencv-contrib-python | 3.4.2.17              |
-| protobuf              | 3.6.1                 |
-
-For a complete list execute:
-
-```bash
-$ docker run --rm --entrypoint pip openwhisk/python3aiaction list
-```
-
-#### Available Ubuntu packages
-
-For a complete list execute:
-
-```bash
-$ docker run --rm --entrypoint apt openwhisk/python3aiaction list --installed
-```
diff --git a/core/python3AiAction/build.gradle b/core/python3AiAction/build.gradle
deleted file mode 100644
index ea7d8ad..0000000
--- a/core/python3AiAction/build.gradle
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-ext.dockerImageName = 'python3aiaction'
-apply from: '../../gradle/docker.gradle'
diff --git a/core/python3AiAction/pythonrunner.py b/core/python3AiAction/pythonrunner.py
deleted file mode 100644
index 70df3fc..0000000
--- a/core/python3AiAction/pythonrunner.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""Executable Python script for running Python actions.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-"""
-
-import os
-import sys
-import codecs
-import traceback
-sys.path.append('../actionProxy')
-from actionproxy import ActionRunner, main, setRunner
-
-
-class PythonRunner(ActionRunner):
-
-    def __init__(self):
-        ActionRunner.__init__(self, '/action/__main__.py')
-        self.fn = None
-        self.mainFn = 'main'
-        self.global_context = {}
-
-    def initCodeFromString(self, message):
-        # do nothing, defer to build step
-        return True
-
-    def build(self, message):
-        binary = message['binary'] if 'binary' in message else False
-        if not binary:
-            code = message['code']
-            filename = 'action'
-        elif os.path.isfile(self.source):
-            with codecs.open(self.source, 'r', 'utf-8') as m:
-                code = m.read()
-            workdir = os.path.dirname(self.source)
-            sys.path.insert(0, workdir)
-            os.chdir(workdir)
-        else:
-            sys.stderr.write('Zip file does not include ' + os.path.basename(self.source) + '\n')
-            return False
-
-        try:
-            filename = os.path.basename(self.source)
-            self.fn = compile(code, filename=filename, mode='exec')
-            if 'main' in message:
-                self.mainFn = message['main']
-
-            # if the directory 'virtualenv' is extracted out of a zip file
-            path_to_virtualenv = os.path.dirname(self.source) + '/virtualenv'
-            if os.path.isdir(path_to_virtualenv):
-                # activate the virtualenv using activate_this.py contained in the virtualenv
-                activate_this_file = path_to_virtualenv + '/bin/activate_this.py'
-                if os.path.exists(activate_this_file):
-                    with open(activate_this_file) as f:
-                        code = compile(f.read(), activate_this_file, 'exec')
-                        exec(code, dict(__file__=activate_this_file))
-                else:
-                    sys.stderr.write('Invalid virtualenv. Zip file does not include /virtualenv/bin/' + os.path.basename(activate_this_file) + '\n')
-                    return False
-            exec(self.fn, self.global_context)
-            return True
-        except Exception:
-            traceback.print_exc(file=sys.stderr, limit=0)
-            return False
-
-    def verify(self):
-        return self.fn is not None
-
-    def run(self, args, env):
-        result = None
-        try:
-            os.environ = env
-            self.global_context['param'] = args
-            exec('fun = %s(param)' % self.mainFn, self.global_context)
-            result = self.global_context['fun']
-        except Exception:
-            traceback.print_exc(file=sys.stderr)
-
-        if result and isinstance(result, dict):
-            return (200, result)
-        else:
-            return (502, {'error': 'The action did not return a dictionary.'})
-
-if __name__ == '__main__':
-    setRunner(PythonRunner())
-    main()
diff --git a/core/python3AiAction/requirements.txt b/core/python3AiAction/requirements.txt
deleted file mode 100644
index 88dd1da..0000000
--- a/core/python3AiAction/requirements.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-# Setup modules
-gevent == 1.2.2
-flask == 1.0.2
-
-# default available packages for python3action
-beautifulsoup4 == 4.6.3
-httplib2 == 0.11.3
-kafka_python == 1.4.3
-lxml == 4.2.4
-python-dateutil == 2.7.3
-requests == 2.19.1
-scrapy == 1.5.1
-simplejson == 3.16.0
-virtualenv == 16.0.0
-twisted == 18.7.0
-netifaces == 0.10.7
-# package to sync from a variety of cloud blob storage
-python-rclone == 0.0.2
-
-# more ML/DL packages
-keras == 2.2.2
-opencv-contrib-python == 3.4.2.17
-Cython == 0.28.5
-tools == 0.1.9
-scikit-image == 0.14.1
-
-nltk == 3.3
diff --git a/core/python3AiAction/samples/smart-body-crop/.gitignore b/core/python3AiAction/samples/smart-body-crop/.gitignore
deleted file mode 100644
index e5d0af6..0000000
--- a/core/python3AiAction/samples/smart-body-crop/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-models
-action.zip
-action_package
-.ipynb_checkpoints
-__pycache__
diff --git a/core/python3AiAction/samples/smart-body-crop/common.py b/core/python3AiAction/samples/smart-body-crop/common.py
deleted file mode 100644
index 4c6ece1..0000000
--- a/core/python3AiAction/samples/smart-body-crop/common.py
+++ /dev/null
@@ -1,332 +0,0 @@
-"""Executable Python script for running Python actions.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-"""
-
-'''
-Some code is based on Ildoo Kim's code (https://github.com/ildoonet/tf-openpose) and https://gist.github.com/alesolano/b073d8ec9603246f766f9f15d002f4f4
-and derived from the OpenPose Library (https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/LICENSE)
-'''
-
-from collections import defaultdict
-from enum import Enum
-import math
-import numpy as np
-import itertools
-from scipy.ndimage.filters import maximum_filter
-from PIL import Image, ImageDraw
-
-
-class CocoPart(Enum):
-    Nose = 0
-    Neck = 1
-    RShoulder = 2
-    RElbow = 3
-    RWrist = 4
-    LShoulder = 5
-    LElbow = 6
-    LWrist = 7
-    RHip = 8
-    RKnee = 9
-    RAnkle = 10
-    LHip = 11
-    LKnee = 12
-    LAnkle = 13
-    REye = 14
-    LEye = 15
-    REar = 16
-    LEar = 17
-    Background = 18
-
-
-parts_dict = {'Nose': [0], 'Neck': [1], 'Shoulders': [2, 5], 'Elbows': [3, 6], 'Wrists': [
-    4, 7], 'Hips': [8, 11], 'Knees': [9, 12], 'Ankles': [10, 13], 'Eyes': [14, 15], 'Ears': [16, 17]}
-# parts_if_notfound_upper={'Eyes':'Ears','Ears':'Eyes','Nose':'Ears','Neck':'Nose','Shoulders':'Neck','Elbows':'Shoulders','Wrists':'Elbows','Hips':'Wrists#','Knees':'Hips'}
-# parts_if_notfound_lower=#{'Ears':'Nose','Nose':'Neck','Neck':'Shoulders','Shoulders':'Elbows','Elbows':'Wrists','Wrists':'Hips','Hips':'Knees',
-#                  'Knees':'Ankles','Ankles':'Knees'}
-body_parts_dict = {0: 'Eyes', 1: 'Ears', 2: 'Nose', 3: 'Neck', 4: 'Shoulders',
-                   5: 'Elbows', 6: 'Wrists', 7: 'Hips', 8: 'Knees', 9: 'Ankles'}
-
-CocoPairs = [
-    (1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6,
-                                             7), (1, 8), (8, 9), (9, 10), (1, 11),
-    (11, 12), (12, 13), (1, 0), (0, 14), (14,
-                                          16), (0, 15), (15, 17), (2, 16), (5, 17)
-]   # = 19
-CocoPairsRender = CocoPairs[:-2]
-CocoPairsNetwork = [
-    (12, 13), (20, 21), (14, 15), (16, 17), (22,
-                                             23), (24, 25), (0, 1), (2, 3), (4, 5),
-    (6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34,
-                                                   35), (32, 33), (36, 37), (18, 19), (26, 27)
-]  # = 19
-
-CocoColors = [(255, 0, 0), (255, 85, 0), (255, 170, 0), (255, 255, 0), (170, 255, 0), (85, 255, 0), (0, 255, 0),
-              (0, 255, 85), (0, 255, 170), (0, 255, 255), (0,
-                                                           170, 255), (0, 85, 255), (0, 0, 255), (85, 0, 255),
-              (170, 0, 255), (255, 0, 255), (255, 0, 170), (255, 0, 85)]
-
-
-NMS_Threshold = 0.1
-InterMinAbove_Threshold = 6
-Inter_Threashold = 0.1
-Min_Subset_Cnt = 4
-Min_Subset_Score = 0.8
-Max_Human = 96
-
-
-def human_conns_to_human_parts(human_conns, heatMat):
-    human_parts = defaultdict(lambda: None)
-    for conn in human_conns:
-        human_parts[conn['partIdx'][0]] = (
-            conn['partIdx'][0],  # part index
-            (conn['coord_p1'][0] / heatMat.shape[2], conn['coord_p1']
-             [1] / heatMat.shape[1]),  # relative coordinates
-            heatMat[conn['partIdx'][0], conn['coord_p1']
-                    [1], conn['coord_p1'][0]]  # score
-        )
-        human_parts[conn['partIdx'][1]] = (
-            conn['partIdx'][1],
-            (conn['coord_p2'][0] / heatMat.shape[2],
-             conn['coord_p2'][1] / heatMat.shape[1]),
-            heatMat[conn['partIdx'][1], conn['coord_p2']
-                    [1], conn['coord_p2'][0]]
-        )
-    return human_parts
-
-
-def non_max_suppression(heatmap, window_size=3, threshold=NMS_Threshold):
-    heatmap[heatmap < threshold] = 0  # set low values to 0
-    part_candidates = heatmap * \
-        (heatmap == maximum_filter(heatmap, footprint=np.ones((window_size, window_size))))
-    return part_candidates
-
-
-def estimate_pose(heatMat, pafMat):
-    if heatMat.shape[2] == 19:
-        # transform from [height, width, n_parts] to [n_parts, height, width]
-        heatMat = np.rollaxis(heatMat, 2, 0)
-    if pafMat.shape[2] == 38:
-        # transform from [height, width, 2*n_pairs] to [2*n_pairs, height, width]
-        pafMat = np.rollaxis(pafMat, 2, 0)
-
-    # reliability issue.
-    heatMat = heatMat - heatMat.min(axis=1).min(axis=1).reshape(19, 1, 1)
-    heatMat = heatMat - heatMat.min(axis=2).reshape(19, heatMat.shape[1], 1)
-
-    _NMS_Threshold = max(np.average(heatMat) * 4.0, NMS_Threshold)
-    _NMS_Threshold = min(_NMS_Threshold, 0.3)
-
-    coords = []  # for each part index, it stores coordinates of candidates
-    for heatmap in heatMat[:-1]:  # remove background
-        part_candidates = non_max_suppression(heatmap, 5, _NMS_Threshold)
-        coords.append(np.where(part_candidates >= _NMS_Threshold))
-
-    # all connections detected. no information about what humans they belong to
-    connection_all = []
-    for (idx1, idx2), (paf_x_idx, paf_y_idx) in zip(CocoPairs, CocoPairsNetwork):
-        connection = estimate_pose_pair(
-            coords, idx1, idx2, pafMat[paf_x_idx], pafMat[paf_y_idx])
-        connection_all.extend(connection)
-
-    conns_by_human = dict()
-    for idx, c in enumerate(connection_all):
-        # at first, all connections belong to different humans
-        conns_by_human['human_%d' % idx] = [c]
-
-    no_merge_cache = defaultdict(list)
-    empty_set = set()
-    while True:
-        is_merged = False
-        for h1, h2 in itertools.combinations(conns_by_human.keys(), 2):
-            if h1 == h2:
-                continue
-            if h2 in no_merge_cache[h1]:
-                continue
-            for c1, c2 in itertools.product(conns_by_human[h1], conns_by_human[h2]):
-                # if two humans share a part (same part idx and coordinates), merge those humans
-                if set(c1['uPartIdx']) & set(c2['uPartIdx']) != empty_set:
-                    is_merged = True
-                    # extend human1 connectios with human2 connections
-                    conns_by_human[h1].extend(conns_by_human[h2])
-                    conns_by_human.pop(h2)  # delete human2
-                    break
-            if is_merged:
-                no_merge_cache.pop(h1, None)
-                break
-            else:
-                no_merge_cache[h1].append(h2)
-
-        if not is_merged:  # if no more mergings are possible, then break
-            break
-
-    conns_by_human = {h: conns for (
-        h, conns) in conns_by_human.items() if len(conns) >= Min_Subset_Cnt}
-    conns_by_human = {h: conns for (h, conns) in conns_by_human.items() if max(
-        [conn['score'] for conn in conns]) >= Min_Subset_Score}
-
-    humans = [human_conns_to_human_parts(
-        human_conns, heatMat) for human_conns in conns_by_human.values()]
-    return humans
-
-
-def estimate_pose_pair(coords, partIdx1, partIdx2, pafMatX, pafMatY):
-    connection_temp = []  # all possible connections
-    peak_coord1, peak_coord2 = coords[partIdx1], coords[partIdx2]
-
-    for idx1, (y1, x1) in enumerate(zip(peak_coord1[0], peak_coord1[1])):
-        for idx2, (y2, x2) in enumerate(zip(peak_coord2[0], peak_coord2[1])):
-            score, count = get_score(x1, y1, x2, y2, pafMatX, pafMatY)
-            if (partIdx1, partIdx2) in [(2, 3), (3, 4), (5, 6), (6, 7)]:  # arms
-                if count < InterMinAbove_Threshold // 2 or score <= 0.0:
-                    continue
-            elif count < InterMinAbove_Threshold or score <= 0.0:
-                continue
-            connection_temp.append({
-                'score': score,
-                'coord_p1': (x1, y1),
-                'coord_p2': (x2, y2),
-                'idx': (idx1, idx2),  # connection candidate identifier
-                'partIdx': (partIdx1, partIdx2),
-                'uPartIdx': ('{}-{}-{}'.format(x1, y1, partIdx1), '{}-{}-{}'.format(x2, y2, partIdx2))
-            })
-
-    connection = []
-    used_idx1, used_idx2 = [], []
-    for conn_candidate in sorted(connection_temp, key=lambda x: x['score'], reverse=True):
-        if conn_candidate['idx'][0] in used_idx1 or conn_candidate['idx'][1] in used_idx2:
-            continue
-        connection.append(conn_candidate)
-        used_idx1.append(conn_candidate['idx'][0])
-        used_idx2.append(conn_candidate['idx'][1])
-
-    return connection
-
-
-def get_score(x1, y1, x2, y2, pafMatX, pafMatY):
-    num_inter = 10
-    dx, dy = x2 - x1, y2 - y1
-    normVec = math.sqrt(dx ** 2 + dy ** 2)
-
-    if normVec < 1e-4:
-        return 0.0, 0
-
-    vx, vy = dx / normVec, dy / normVec
-
-    xs = np.arange(
-        x1, x2, dx / num_inter) if x1 != x2 else np.full((num_inter, ), x1)
-    ys = np.arange(
-        y1, y2, dy / num_inter) if y1 != y2 else np.full((num_inter, ), y1)
-    xs = (xs + 0.5).astype(np.int8)
-    ys = (ys + 0.5).astype(np.int8)
-
-    # without vectorization
-    pafXs = np.zeros(num_inter)
-    pafYs = np.zeros(num_inter)
-    for idx, (mx, my) in enumerate(zip(xs, ys)):
-        pafXs[idx] = pafMatX[my][mx]
-        pafYs[idx] = pafMatY[my][mx]
-
-    local_scores = pafXs * vx + pafYs * vy
-    thidxs = local_scores > Inter_Threashold
-
-    return sum(local_scores * thidxs), sum(thidxs)
-
-
-def draw_humans(img1_raw, human_list):
-    img = np.asarray(img1_raw)
-    img_copied = np.copy(img)
-    image_h, image_w = img_copied.shape[:2]
-    centers = {}
-    c = 10
-    for human in human_list:
-        part_idxs = human.keys()
-
-        # draw point
-        draw = ImageDraw.Draw(img1_raw)
-        for i in range(CocoPart.Background.value):
-            if i not in part_idxs:
-                continue
-            part_coord = human[i][1]
-            center = (int(part_coord[0] * image_w + 0.5),
-                      int(part_coord[1] * image_h + 0.5))
-            centers[i] = center
-            bbox = (center[0] - c, center[1] - c, center[0] + c, center[1] + c)
-            draw.ellipse(bbox, fill=CocoColors[i])
-
-        # draw line
-        ctr = 1
-        for pair_order, pair in enumerate(CocoPairsRender):
-            if pair[0] not in part_idxs or pair[1] not in part_idxs:
-                continue
-            draw.line((centers[pair[0]][0], centers[pair[0]][1], centers[pair[1]]
-                       [0], centers[pair[1]][1]), fill=CocoColors[pair_order], width=5)
-    img1_raw = np.asarray(img1_raw)
-    del draw
-    return img1_raw
-
-
-def crop_image(img, humans_list, upper_body, lower_body):
-    upper_coord = 0.0
-    upper_coord_x = 0.0
-    lower_coord = 0.0
-    lower_coord_x = 0.0
-
-    img = np.asarray(img)
-    image_h, image_w = img.shape[:2]
-
-    if upper_body == 'Ankles' or lower_body == 'Eyes':
-        raise NameError('Body parts not consistent')
-
-    for human in humans_list:
-        parts = human.keys()
-        inte = parts_dict[upper_body]  # could be [1] or [2,3]
-
-        if upper_body == 'Nose' or upper_body == 'Neck':
-            upper_coord = human[inte[0]][1][1]  # interested only in heights.
-            upper_coord_x = human[inte[0]][1][0]
-        else:
-            upper_coord = (human[inte[0]][1][1] + human[inte[1]][1][1])/2
-            upper_coord_x = (human[inte[0]][1][0] + human[inte[1]][1][0])/2
-
-        inte = parts_dict[lower_body]
-        if lower_body == 'Nose' or lower_body == 'Neck':
-            lower_coord = human[inte[0]][1][1]  # interested only in heights.
-            lower_coord_x = human[inte[0]][1][0]
-        else:
-            lower_coord = (human[inte[0]][1][1] + human[inte[1]][1][1])/2
-            lower_coord_x = (human[inte[0]][1][0] + human[inte[1]][1][0])/2
-
-    image_h_u = int(upper_coord * image_h)
-    image_h_l = int(lower_coord * image_h)
-
-    image_w_left = int(upper_coord_x * image_w)
-    image_w_right = int(lower_coord_x * image_w)
-    aspect_ratio = image_h / image_w
-    image_w = int((image_w_left + image_w_right)/2)
-
-    img = img[image_h_u:image_h_l]
-    wid = int((img.shape[0]/aspect_ratio)/2)
-    img = img.transpose(1, 0, 2)
-    img = img[image_w-2*wid:image_w+2*wid]
-    img = img.transpose(1, 0, 2)
-
-    crop_position = (image_w-2*wid, image_h_u)
-    crop_size = (img.shape[1], img.shape[0])
-
-    return img, crop_position, crop_size
diff --git a/core/python3AiAction/samples/smart-body-crop/crop.ipynb b/core/python3AiAction/samples/smart-body-crop/crop.ipynb
deleted file mode 100644
index 3ea6633..0000000
--- a/core/python3AiAction/samples/smart-body-crop/crop.ipynb
+++ /dev/null
@@ -1,872 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# AI Action example: Smart Body Crop "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This notebook illustrates how a ML engineer develops an algorithm and deploys it in a serverless environment directly from the notebook itself. \n",
-    "\n",
-    "To make it faster to run, the training is skipped. This example reuses a pre-trained OpenPose model to identify a person in a picture, and then crops the body to highlight the desired clothing item.\n",
-    "\n",
-    "### Running the notebook locally\n",
-    "\n",
-    "Simply execute:\n",
-    "        \n",
-    "        $ docker run -it -p 8888:8888 -e OPENWHISK_AUTH=`cat ~/.wskprops | grep ^AUTH= | awk -F= '{print $2}'` -e OPENWHISK_APIHOST=`cat ~/.wskprops | grep ^APIHOST= | awk -F= '{print $2}'` --rm -v `pwd`:/notebooks/sf  --entrypoint jupyter-notebook adobeapiplatform/openwhisk-python3aiaction:0.11.0  --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root\n",
-    "\n",
-    "> This command reads the local `~/.wskprops` and uses the Apache OpenWhisk credentials within."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from matplotlib import pyplot as plt\n",
-    "import matplotlib.patches as patches\n",
-    "\n",
-    "%matplotlib inline\n",
-    "from inference import SmartBodyCrop\n",
-    "from PIL import Image\n",
-    "import numpy as np"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#model_url = \"https://s3.amazonaws.com/rt-dev-public-models/openpose/2dw1oz9l9hi9avg/optimized_openpose.pb\"\n",
-    "model_url = \"models/optimized_openpose.pb\"\n",
-    "inf = SmartBodyCrop(model_url = model_url)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Download the deep learning (open pose) model\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
-      "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\n",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\n",
-      "100  1204    0  1204    0     0    983      0 --:--:--  0:00:01 --:--:-- 1175k\n",
-      "100  199M  100  199M    0     0  13.8M      0  0:00:14  0:00:14 --:--:-- 19.5M\n"
-     ]
-    }
-   ],
-   "source": [
-    "!mkdir -p models\n",
-    "# Comment the line bellow downloading the model, once you have it locally.\n",
-    "!curl -L https://www.dropbox.com/s/2dw1oz9l9hi9avg/optimized_openpose.pb -o models/optimized_openpose.pb"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 23,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "245\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOAAAAEyCAYAAADjrNxxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvWmMpdlZJvicG3dfYs3IiMi1sjbXYuPyboQFRUMzNBpsDxbQRqLNDMJIuH+MhBCeXyCNWuIH02DRqC1bTbctD7SRcYMFlqGxaOzBdrnKlKtcpF1bZlZmRmVmrDfi7us3PyKfE+994z3f/W5UFb41k690db/lfGd/3u1sLooi3KE7dIe+P5T6fmfgDt2h/z/THQDeoTv0faQ7ALxDd+j7SHcAeIfu0PeR7gDwDt2h7yPdAeAdukPfR3rNAOic+0nn3LPOuReccx99rdK5 [...]
-      "text/plain": [
-       "<Figure size 720x360 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "image = Image.open('fashion-men-1.jpg')\n",
-    "image.thumbnail( (368,368) )\n",
-    "print(image.size[0])\n",
-    "image = np.asarray(image)\n",
-    "plt.figure(figsize = (10,5))\n",
-    "plt.imshow(image)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## How algorithm sees the body"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image loaded in:      0.1135\n",
-      "Loading the model...\n",
-      "model imported in :     1.5586\n",
-      "tf session executed in:      5.4359\n",
-      "pose estimated in:      0.0048\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOAAAAEyCAYAAADjrNxxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvWmQZNd1Jvbd3NfKWru7eu/GwgaanAYFbhIgCeDikRRkkBwZrSXCQ40miLFFWZKt8Ij8YVPhiJmQ7bHGntHEhKkwNaRojgTQJqUQGRQXU9JoOFxAgqBALASIbnR3dXdV15JVWZV75vOPrHPrvJPn3vdedYNMhPtEZOR799393u+cc8/dTBAEuE236Tb9aCj1o87AbbpN/3+m2wC8TbfpR0i3AXibbtOPkG4D8Dbdph8h3QbgbbpNP0K6DcDbdJt+hPSKAdAY8zPGmOeN [...]
-      "text/plain": [
-       "<Figure size 720x360 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "x = inf.detect_parts('fashion-men-1.jpg')\n",
-    "#x = inf.detect_parts('https://cdn.shopify.com/s/files/1/1970/6605/products/Pioneer-Camp-2017-spring-new-fashion-men-shirt-long-sleeve-brand-clothing-quality-cotton-soft-shirt_e262fa2c-a279-4190-9cf7-707982189e9e.jpg?v=1501310825')\n",
-    "plt.figure(figsize=(10,5))\n",
-    "plt.imshow(x)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Bodycrop based on detected body parts"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 20,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image (3840x5760) loaded in:      0.1047\n",
-      "Loading the model...\n",
-      "model imported in :     1.4202\n",
-      "tf session executed in:      5.4498\n",
-      "pose estimated in:      0.0051\n",
-      "image cropped in:      0.0002\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUEAAAD8CAYAAADpLRYuAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJztnXuwbVdV5n+TG94Y8uTm5t48LiRFKjwSrolEiYAGFWyUUCKiFk0LXfnHRrSsUmj/sLrKKttqS6CraLujIHRXikdDeAi0PCKUUqUhSScGSEhIbl73ksclD0BQIbL6j3O+s8ceZ6x55lp7n3327T2+qlNn773WmnOuudZe+xtjfGPM0nUdiUQisap4zE4PIJFIJHYS+RBMJBIrjXwIJhKJlUY+BBOJxEojH4KJRGKlkQ/BRCKx0siHYCKRWGnM9BAspby0lHJLKeW2Usqb [...]
-      "text/plain": [
-       "<Figure size 576x288 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CPU times: user 8.79 s, sys: 1.87 s, total: 10.7 s\n",
-      "Wall time: 5.78 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "x, coordinates, imgpath = inf.infer('fashion-men-1.jpg','Eyes','Hips')\n",
-    "plt.figure(figsize = (8,4))\n",
-    "plt.imshow(x)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Draw the crop coordinates on the original image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 22,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZwAAAJCCAYAAAD0nXH7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvUtzXNl17/k/efKdCeQDCYB4EeCbVWSV6mFVWQ7Zcjh05Q5PenpveODZjXBEf4D+Av0l7syzjh560OEeeOCwwrYsyVKpqshiFd8EQbwTyAfynacHqd/KladQKqo7usRw40QwSAKZ5+yz93r813+tvXYQRZEurovr4rq4Lq6L6//rK/GHHsDFdXFdXBfXxfX/j+vC4VxcF9fFdXFdXN/JdeFwLq6L6+K6uC6u7+S6cDgX18V1cV1cF9d3cl04nIvr4rq4Lq6L6zu5LhzO [...]
-      "text/plain": [
-       "<Figure size 1008x720 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "img = Image.open('fashion-men-1.jpg')\n",
-    "\n",
-    "# Create figure and axes\n",
-    "fig,ax = plt.subplots(1,figsize=(14,10))\n",
-    "ax.imshow(img)\n",
-    "\n",
-    "# Create a Rectangle patch\n",
-    "rect = patches.Rectangle(\n",
-    "    (coordinates.get('x'),coordinates.get('y')),\n",
-    "    coordinates.get('width'),coordinates.get('height'),\n",
-    "    linewidth = 3, \n",
-    "    edgecolor = 'r',\n",
-    "    facecolor = 'none')\n",
-    "\n",
-    "# Add the patch to the Axes\n",
-    "ax.add_patch(rect)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Test with a remote image "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 24,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image downloaded in :     0.3233\n",
-      "image (2192x4299) loaded in:      0.4292\n",
-      "Loading the model...\n",
-      "model imported in :     1.6012\n",
-      "tf session executed in:      5.5260\n",
-      "pose estimated in:      0.0030\n",
-      "image cropped in:      0.0001\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQUAAAD8CAYAAAB+fLH0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvVvMbUt2HvRV1byuy3/b971Pn3O63afdbZu0jU0sgRRFWCgQLIwQskiQ5YClfgIFAcIOT7wgmRfA4iFSi4CMFCkJYGEeokTIOFEcQtPdtsGOu0/36XPfl7Mv/21d56Vq8FBjVNVc//r35VzaO9Ia0t5r/WvOWbPmnDWrxvjGN8ZQRISd7GQnOxHRf9od2MlOdvJyyW5S2MlOdjKQ3aSwk53sZCC7SWEnO9nJQHaTwk52spOB7CaFnexkJwP5zCYFpdS/qpR6Uyn1llLq [...]
-      "text/plain": [
-       "<Figure size 576x288 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "# https://i.pinimg.com/736x/eb/61/fa/eb61fa047dcd0a20001392c13da93709--mens-fashion-blog-mens-fashion-styles.jpg\n",
-    "# 2192x4299 - https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\n",
-    "x, coordinates, imgpath = inf.infer('https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg',\n",
-    "                           'Eyes',\n",
-    "                           'Hips')\n",
-    "plt.figure(figsize = (8,4))\n",
-    "plt.imshow(x)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Deploy the algorithm as a function"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 1. Write the function\n",
-    "\n",
-    "The function expects the following parameters as input:\n",
-    "* `model_url` - the location of the model\n",
-    "* `image` - the image location. It can be given as a request object, or a location string (provided no extra authorization headers are required to read the file). I.e. \n",
-    "  ```javascript\n",
-    "  {\n",
-    "      uri: \"https://...\",\n",
-    "      headers: {\n",
-    "          \"Authorization\": \"Bearer ...\",\n",
-    "      }\n",
-    "  }\n",
-    "  ```\n",
-    "* `from_upper` - the upper part of the body to start the crop from. I.e. _Eyes_, _Nose_, _Neck_\n",
-    "* `to_lower` - the lower part of the body to stop the crop at. I.e. _Hip_, _Knees_, _Ankles_\n",
-    "\n",
-    "For flexibility, this function returns only the information needed to crop the body. I.e. \n",
-    "```javascript\n",
-    "{\n",
-    "  X: 100,\n",
-    "  Y: 100,\n",
-    "  W: 200,\n",
-    "  H: 100\n",
-    "}\n",
-    "```\n",
-    "\n",
-    "On the premise that the cropped image may exceed the max response size of an action, the actual cropping may be performed by another action, which should upload the cropped image to a blob storage. Bellow is the code that can crop the image based on the coordinates \n",
-    "\n",
-    "```python\n",
-    "from PIL import Image\n",
-    "import os\n",
-    "\n",
-    "img_crop = Image.open(local_image_path)\n",
-    "\n",
-    "img_crop = img_crop.crop(\n",
-    "    (coordinates.get('X'),                              # left \n",
-    "     coordinates.get('Y'),                              # upper\n",
-    "     coordinates.get('X') + coordinates.get('W'),       # right\n",
-    "     coordinates.get('Y') + coordinates.get('H')))      # lower\n",
-    "     \n",
-    "img_crop_filename = (os.environ.get('__OW_ACTIVATION_ID') or '_local') + \".jpg\"\n",
-    "img_crop_path = '/tmp/' + img_crop_filename\n",
-    "\n",
-    "img_crop.save(img_crop_path, \"JPEG\", optimize=True)\n",
-    "\n",
-    "print(\"The cropped image has been saved in:\", img_crop_path)\n",
-    "```"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 25,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Overwriting smart_body_crop.py\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%writefile smart_body_crop.py\n",
-    "\n",
-    "from inference import SmartBodyCrop\n",
-    "import os\n",
-    "\n",
-    "def action_handler(args):\n",
-    "    print(args)\n",
-    "    model_url = args.get('model_url')\n",
-    "    \n",
-    "    body_crop = SmartBodyCrop(model_url = model_url)\n",
-    "    print(\"SmartBodyCrop.initialized=\", SmartBodyCrop.initialized)\n",
-    "    \n",
-    "    crop_img, crop_coordinates, local_image_path = body_crop.infer(\n",
-    "                    args.get('image'), \n",
-    "                    args.get('from_upper'), \n",
-    "                    args.get('to_lower'))\n",
-    "    \n",
-    "    # if you want to crop the image, you can insert the code demonstrated above\n",
-    "    # then return the image as a base64 encoded string in the response body\n",
-    "    \n",
-    "    return {\n",
-    "        'X': crop_coordinates.get('x'),\n",
-    "        'Y': crop_coordinates.get('y'),\n",
-    "        'W': crop_coordinates.get('width'),\n",
-    "        'H': crop_coordinates.get('height')\n",
-    "    }\n",
-    "    "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Test the function locally"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 26,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'model_url': 'models/optimized_openpose.pb', 'image': 'https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg', 'from_upper': 'Eyes', 'to_lower': 'Elbows'}\n",
-      "SmartBodyCrop.initialized= False\n",
-      "image downloaded in :     0.1114\n",
-      "image (2192x4299) loaded in:      0.2350\n",
-      "Loading the model...\n",
-      "model imported in :     1.5238\n",
-      "tf session executed in:      5.0700\n",
-      "pose estimated in:      0.0056\n",
-      "image cropped in:      0.0002\n",
-      "{'H': 1028.0217391304348, 'Y': 467.2826086956522, 'W': 1031.5294117647059, 'X': 550.9304812834225}\n",
-      "CPU times: user 6.18 s, sys: 1.88 s, total: 8.06 s\n",
-      "Wall time: 5.35 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "from smart_body_crop import action_handler\n",
-    "action_response = action_handler({ \n",
-    "    'model_url': model_url,\n",
-    "    'image': \"https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\",\n",
-    "    'from_upper': 'Eyes',\n",
-    "    'to_lower': 'Elbows'})\n",
-    "\n",
-    "print(action_response)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Verify that the returned coordinates are correct"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 27,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image downloaded in :     0.1307\n",
-      "(2192, 4299)\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAANkAAADGCAYAAABfPiU4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvdmvbNl93/dZa+255jrzPXfq2+yBTYqjJWowHUGSJVuwID8EgoIgsIMAfkj8ECAPcf4DI0gCGwkgQEEerCSIbcEZnEhJ4EgiZIkmKVFmk+xu9u3ue2/f6cx1atzjGvKwa9epc/uSajbFVoO4P6Bw6uzau2rvtdZv/v5+SzjneEbP6Bn98Ej+Zd/AM3pGP+r0jMme0TP6IdMzJntGz+iHTM+Y7Bk9ox8yPWOyZ/SMfsj0jMme0TP6IdOHzmRCiL8hhHhTCPG2EOIffNi/ [...]
-      "text/plain": [
-       "<Figure size 504x216 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "imgpath = inf._download_image('https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg')\n",
-    "image = Image.open(imgpath)\n",
-    "print(image.size)\n",
-    "\n",
-    "img_crop = image.crop(\n",
-    "    (action_response.get('X'),                              # left \n",
-    "     action_response.get('Y'),                              # upper\n",
-    "     action_response.get('X') + action_response.get('W'),       # right\n",
-    "     action_response.get('Y') + action_response.get('H')))      # lower\n",
-    "\n",
-    "image = np.asarray(img_crop)\n",
-    "plt.figure(figsize = (7,3))\n",
-    "plt.imshow(image)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 2. Configure Apache OpenWhisk as the FaaS Provider "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 28,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Installing wsk CLI ...\n",
-      "wsk\n",
-      "NOTICE.txt\n",
-      "README.md\n",
-      "LICENSE.txt\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
-      "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-      "\r",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-      "100   626    0   626    0     0   1501      0 --:--:-- --:--:-- --:--:--  1538\n",
-      "\r",
-      "  3 3845k    3  135k    0     0   115k      0  0:00:33  0:00:01  0:00:32  115k\r",
-      " 58 3845k   58 2260k    0     0  1050k      0  0:00:03  0:00:02  0:00:01 2172k\r",
-      "100 3845k  100 3845k    0     0  1344k      0  0:00:02  0:00:02 --:--:-- 2202k\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "which wsk && exit\n",
-    "echo \"Installing wsk CLI ...\"\n",
-    "curl -L https://github.com/apache/incubator-openwhisk-cli/releases/download/latest/OpenWhisk_CLI-latest-linux-amd64.tgz -o /tmp/wsk.tgz \n",
-    "tar xvfz /tmp/wsk.tgz -C /tmp/\n",
-    "mv /tmp/wsk /usr/local/bin"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Configure Apache OpenWhisk credentials\n",
-    "\n",
-    "Use `OPENWHISK_AUTH` and `OPENWHISK_APIHOST` environment variables."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 29,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from pathlib import Path\n",
-    "import os\n",
-    "home = str(Path.home())\n",
-    "file = open(home + \"/.wskprops\",\"w\") \n",
-    "file.write('AUTH=' + os.environ.get('OPENWHISK_AUTH') + \"\\n\")\n",
-    "file.write('APIHOST=' + os.environ.get('OPENWHISK_APIHOST') + \"\\n\")\n",
-    "file.close()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 3. Deploy the function"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The function must ZIP the other dependent python scripts used to train the model. The action code must be placed in a file called `__main__.py`."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 30,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "  adding: smart_body_crop.py (deflated 58%)\n",
-      "  adding: common.py (deflated 68%)\n",
-      "  adding: inference.py (deflated 73%)\n",
-      "  adding: __main__.py (deflated 58%)\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "mkdir -p action_package\n",
-    "\n",
-    "cp smart_body_crop.py action_package/__main__.py\n",
-    "cp *.py action_package/\n",
-    "cd action_package && zip -9 -r ../action.zip ./"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 31,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model_url = \"https://s3.amazonaws.com/rt-dev-public-models/openpose/2dw1oz9l9hi9avg/optimized_openpose.pb\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 32,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "ok: updated action smart_body_crop\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash -s \"$model_url\"\n",
-    "\n",
-    "wsk action update smart_body_crop action.zip --main action_handler  \\\n",
-    "    --param model_url \"$1\" \\\n",
-    "    --param from_upper Eyes \\\n",
-    "    --param to_lower Hips \\\n",
-    "    --memory 3891 \\\n",
-    "    --docker adobeapiplatform/openwhisk-python3aiaction:0.11.0"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 33,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# !wsk action get smart_body_crop"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 4. Invoke the function"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 34,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32mok:\u001b[0m invoked /\u001b[1m_\u001b[0m/\u001b[1msmart_body_crop\u001b[0m with id \u001b[1m6c1536170686492a9536170686692a59\u001b[0m\r\n"
-     ]
-    }
-   ],
-   "source": [
-    "!wsk action invoke smart_body_crop --param image \"https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\" \\\n",
-    "  --param from_upper Eyes --param to_lower Elbows\n",
-    "# !wsk action invoke smart_body_crop --param image \"https://i.pinimg.com/236x/17/1c/a6/171ca6b06111529aa6f10b1f4e418339--style-men-my-style.jpg\" \\\n",
-    "#   --param from_upper Eyes --param to_lower Elbows"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Note on first run \n",
-    "On initial run the function has to:\n",
-    "* download the model\n",
-    "* initialize tensorflow \n",
-    "\n",
-    "These steps will take a few seconds."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 35,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32mok:\u001b[0m got activation \u001b[1m6c1536170686492a9536170686692a59\u001b[0m\r\n",
-      "{\r\n",
-      "    \"namespace\": \"bladerunner-test\",\r\n",
-      "    \"name\": \"smart_body_crop\",\r\n",
-      "    \"version\": \"0.0.20\",\r\n",
-      "    \"subject\": \"bladerunner-test\",\r\n",
-      "    \"activationId\": \"6c1536170686492a9536170686692a59\",\r\n",
-      "    \"start\": 1545333346300,\r\n",
-      "    \"end\": 1545333353046,\r\n",
-      "    \"duration\": 6746,\r\n",
-      "    \"response\": {\r\n",
-      "        \"status\": \"success\",\r\n",
-      "        \"statusCode\": 0,\r\n",
-      "        \"success\": true,\r\n",
-      "        \"result\": {\r\n",
-      "            \"H\": 1028.0217391304348,\r\n",
-      "            \"W\": 1031.5294117647059,\r\n",
-      "            \"X\": 550.9304812834225,\r\n",
-      "            \"Y\": 467.2826086956522\r\n",
-      "        }\r\n",
-      "    },\r\n",
-      "    \"logs\": [],\r\n",
-      "    \"annotations\": [\r\n",
-      "        {\r\n",
-      "            \"key\": \"path\",\r\n",
-      "            \"value\": \"bladerunner-test/smart_body_crop\"\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"waitTime\",\r\n",
-      "            \"value\": 2787\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"kind\",\r\n",
-      "            \"value\": \"blackbox\"\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"limits\",\r\n",
-      "            \"value\": {\r\n",
-      "                \"concurrency\": 1,\r\n",
-      "                \"logs\": 10,\r\n",
-      "                \"memory\": 3891,\r\n",
-      "                \"timeout\": 60000\r\n",
-      "            }\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"initTime\",\r\n",
-      "            \"value\": 19\r\n",
-      "        }\r\n",
-      "    ],\r\n",
-      "    \"publish\": false\r\n",
-      "}\r\n"
-     ]
-    }
-   ],
-   "source": [
-    "!wsk activation get 6c1536170686492a9536170686692a59"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Invoke the action again\n",
-    "\n",
-    "This time it should respond much faster as it has been pre-warmed."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 36,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32mok:\u001b[0m invoked /\u001b[1m_\u001b[0m/\u001b[1msmart_body_crop\u001b[0m with id \u001b[1mfd729b5d47e6415ab29b5d47e6915aa9\u001b[0m\n",
-      "{\n",
-      "    \"activationId\": \"fd729b5d47e6415ab29b5d47e6915aa9\",\n",
-      "    \"annotations\": [\n",
-      "        {\n",
-      "            \"key\": \"limits\",\n",
-      "            \"value\": {\n",
-      "                \"concurrency\": 1,\n",
-      "                \"logs\": 10,\n",
-      "                \"memory\": 3891,\n",
-      "                \"timeout\": 60000\n",
-      "            }\n",
-      "        },\n",
-      "        {\n",
-      "            \"key\": \"path\",\n",
-      "            \"value\": \"bladerunner-test/smart_body_crop\"\n",
-      "        },\n",
-      "        {\n",
-      "            \"key\": \"kind\",\n",
-      "            \"value\": \"blackbox\"\n",
-      "        },\n",
-      "        {\n",
-      "            \"key\": \"waitTime\",\n",
-      "            \"value\": 6\n",
-      "        }\n",
-      "    ],\n",
-      "    \"duration\": 2160,\n",
-      "    \"end\": 1545333364902,\n",
-      "    \"logs\": [],\n",
-      "    \"name\": \"smart_body_crop\",\n",
-      "    \"namespace\": \"bladerunner-test\",\n",
-      "    \"publish\": false,\n",
-      "    \"response\": {\n",
-      "        \"result\": {\n",
-      "            \"H\": 1028.0217391304348,\n",
-      "            \"W\": 1031.5294117647059,\n",
-      "            \"X\": 550.9304812834225,\n",
-      "            \"Y\": 467.2826086956522\n",
-      "        },\n",
-      "        \"status\": \"success\",\n",
-      "        \"success\": true\n",
-      "    },\n",
-      "    \"start\": 1545333362742,\n",
-      "    \"subject\": \"bladerunner-test\",\n",
-      "    \"version\": \"0.0.20\"\n",
-      "}\n",
-      "CPU times: user 100 ms, sys: 140 ms, total: 240 ms\n",
-      "Wall time: 3.42 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "!wsk action invoke smart_body_crop --param image \"https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\" \\\n",
-    "  --param from_upper Eyes --param to_lower Elbows -b"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/core/python3AiAction/samples/smart-body-crop/fashion-men-1.jpg b/core/python3AiAction/samples/smart-body-crop/fashion-men-1.jpg
deleted file mode 100644
index 8d440e3..0000000
Binary files a/core/python3AiAction/samples/smart-body-crop/fashion-men-1.jpg and /dev/null differ
diff --git a/core/python3AiAction/samples/smart-body-crop/inference.py b/core/python3AiAction/samples/smart-body-crop/inference.py
deleted file mode 100644
index dbb5f87..0000000
--- a/core/python3AiAction/samples/smart-body-crop/inference.py
+++ /dev/null
@@ -1,246 +0,0 @@
-"""Executable Python script for running Python actions.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-"""
-
-'''
-Some is based on Ildoo Kim's code (https://github.com/ildoonet/tf-openpose) and https://gist.github.com/alesolano/b073d8ec9603246f766f9f15d002f4f4
-and derived from the OpenPose Library (https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/LICENSE)
-'''
-
-import tensorflow as tf
-import numpy as np
-from PIL import Image
-from tensorflow.core.framework import graph_pb2
-import urllib3
-import certifi
-import os
-import shutil
-
-from common import estimate_pose, crop_image, draw_humans
-
-import time
-
-
-def print_time(message, start):
-    print(message, "{:10.4f}".format(time.time() - start))
-    return time.time()
-
-
-class SmartBodyCrop:
-    initialized = False
-    tmp_path = '/tmp/'
-    tmpfs_path = '/mnt/action/'
-
-    def __init__(self, model_url):
-        self.model_url = model_url
-
-    def read_img(self, imgpath, width, height):
-        img = Image.open(imgpath)
-        orig_width, orig_height = img.size
-        # resize the image to match openpose's training data
-        # https://github.com/ildoonet/tf-pose-estimation#inference-time
-        img.thumbnail((width, height))
-        thumbnail_w, thumbnail_h = img.size
-        #val_img = val_img.resize((width, height))
-        val_img = np.asarray(img, dtype=np.float32)
-        val_img = val_img.reshape([1, thumbnail_h, thumbnail_w, 3])
-        # val_img = val_img.astype(float)
-        val_img = val_img * (2.0 / 255.0) - 1.0  # normalization
-
-        return val_img, img, orig_width, orig_height
-
-    def _download_model(self):
-        # check if the model is a ref to local file path
-        if type(self.model_url) is str:
-            if not self.model_url.startswith('http'):
-                return self.model_url
-
-        start = time.time()
-        local_model_path = SmartBodyCrop.tmp_path + 'optimized_openpose.pb'
-        tmpfs_model_path = SmartBodyCrop.tmpfs_path + 'optimized_openpose.pb'
-
-        if (os.path.isfile(local_model_path)):
-            print_time("model was found in the local storage: " +
-                       local_model_path, start)
-            return local_model_path
-
-        # check if this model was downloaded by another invocation in the tmpfs path
-        if (os.path.isfile(tmpfs_model_path)):
-            print_time("model was found in the tmpfs storage: " +
-                       tmpfs_model_path, start)
-            shutil.copy(tmpfs_model_path, local_model_path)
-            print_time("model copied FROM tmpfs:" + tmpfs_model_path, start)
-            return local_model_path
-
-        http = urllib3.PoolManager(
-            cert_reqs='CERT_REQUIRED',
-            ca_certs=certifi.where(),
-            headers={
-                'Accept': 'application/octet-stream',
-                'Content-Type': 'application/octet-stream'
-            })
-        urllib3.disable_warnings()
-
-        r = http.request('GET', self.model_url,
-                         preload_content=False,
-                         retries=urllib3.Retry(5, redirect=5))
-
-        with open(local_model_path, 'wb') as out:
-            while True:
-                data = r.read(8192)  # 64 # 8192
-                if not data:
-                    break
-                out.write(data)
-
-        r.release_conn()
-        print_time("model downloaded in :", start)
-
-        # copy the file to the tmpfs_model_path to be reused by other actions
-        # this seems to work concurrently as per: https://stackoverflow.com/questions/35605463/why-is-concurrent-copy-of-a-file-not-failing
-        if (os.path.isdir(SmartBodyCrop.tmpfs_path)):
-            shutil.copy(local_model_path, tmpfs_model_path)
-            print_time("model copied to tmpfs:" + tmpfs_model_path, start)
-
-        return local_model_path
-
-    def _download_image(self, image):
-        start = time.time()
-        headers = {}
-        image_url = image
-        local_image_path = SmartBodyCrop.tmp_path + 'image'
-        if type(image) is dict:
-            headers = image.get('headers')
-            image_url = image.get('uri')
-        # check if the image is a local file path
-        if type(image) is str:
-            if not image.startswith('http'):
-                return image
-
-        http = urllib3.PoolManager(
-            cert_reqs='CERT_REQUIRED',
-            ca_certs=certifi.where(),
-            headers=headers)
-        urllib3.disable_warnings()
-
-        r = http.request('GET', image_url,
-                         preload_content=False,
-                         retries=urllib3.Retry(5, redirect=5))
-
-        with open(local_image_path, 'wb') as out:
-            while True:
-                data = r.read(1024)  # 8192
-                if not data:
-                    break
-                out.write(data)
-
-        r.release_conn()
-        print_time("image downloaded in :", start)
-        return local_image_path
-
-    def load_graph_def(self):
-        start = time.time()
-
-        local_model_path = self._download_model()
-
-        tf.reset_default_graph()
-        graph_def = graph_pb2.GraphDef()
-        with open(local_model_path, 'rb') as f:
-            graph_def.ParseFromString(f.read())
-        tf.import_graph_def(graph_def, name='')
-
-        start = print_time("model imported in :", start)
-        start = time.time()
-
-        # SmartBodyCrop.initialized = True
-
-    def infer(self, image, upper_body, lower_body):
-        start = time.time()
-
-        imgpath = self._download_image(image)
-        image, thumbnail, input_width, input_height = self.read_img(
-            imgpath, 368, 368)
-        start = print_time("image (" + str(input_width) +
-                           "x" + str(input_height) + ") loaded in: ", start)
-
-        if not SmartBodyCrop.initialized:
-            print("Loading the model...")
-            self.load_graph_def()
-
-        with tf.Session() as sess:
-            inputs = tf.get_default_graph().get_tensor_by_name('inputs:0')
-            heatmaps_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L2/BiasAdd:0')
-            pafs_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L1/BiasAdd:0')
-
-            heatMat, pafMat = sess.run(
-                [heatmaps_tensor, pafs_tensor], feed_dict={inputs: image})
-
-            start = print_time("tf session executed in: ", start)
-
-            humans = estimate_pose(heatMat[0], pafMat[0])
-            start = print_time("pose estimated in: ", start)
-            # send the thumbnail to render an initial crop
-            img, crop_position, crop_size = crop_image(
-                thumbnail, humans, upper_body, lower_body)
-            # scale back the crop_coordinates to match the original picture size
-            scale_factor_w = input_width / thumbnail.size[0]
-            scale_factor_h = input_height / thumbnail.size[1]
-            crop_coordinates = {
-                'x':      crop_position[0] * scale_factor_w,
-                'y':      crop_position[1] * scale_factor_h,
-                'width':  crop_size[0] * scale_factor_w,
-                'height': crop_size[1] * scale_factor_h
-            }
-
-            start = print_time("image cropped in: ", start)
-
-            sess.close()
-            return img, crop_coordinates, imgpath
-
-    def detect_parts(self, image):
-        start = time.time()
-
-        imgpath = self._download_image(image)
-        image, thumbnail, input_width, input_height = self.read_img(
-            imgpath, 368, 368)
-        start = print_time("image loaded in: ", start)
-
-        if not SmartBodyCrop.initialized:
-            print("Loading the model...")
-            self.load_graph_def()
-
-        with tf.Session() as sess:
-            inputs = tf.get_default_graph().get_tensor_by_name('inputs:0')
-            heatmaps_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L2/BiasAdd:0')
-            pafs_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L1/BiasAdd:0')
-
-            heatMat, pafMat = sess.run(
-                [heatmaps_tensor, pafs_tensor], feed_dict={inputs: image})
-
-            start = print_time("tf session executed in: ", start)
-
-            humans = estimate_pose(heatMat[0], pafMat[0])
-            start = print_time("pose estimated in: ", start)
-
-            # display
-            img1 = draw_humans(thumbnail, humans)
-            return img1
diff --git a/core/python3AiActionLoop/README.md b/core/python3AiActionLoop/README.md
index c27663d..50b7657 100644
--- a/core/python3AiActionLoop/README.md
+++ b/core/python3AiActionLoop/README.md
@@ -32,7 +32,7 @@ Bellow are the versions for the included libraries:
 This image has Jupyter Notebook installed. You may find useful to run quick Notebooks directly on the image which may run the actual code. To start Jupyter Notebook execute:
 
 ```bash
-$ docker run -it -p 8888:8888 --rm --entrypoint jupyter-notebook openwhisk/actionloop-python-v3.7ai  --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root
+$ docker run -it -p 8888:8888 --rm --entrypoint jupyter-notebook openwhisk/actionloop-python-v3.6ai  --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root
 ```
 
 #### AI Action Sample
@@ -73,7 +73,7 @@ To view an example with this AI Action check the [samples/smart-body-crop notebo
 For a complete list execute:
 
 ```bash
-$ docker run --rm --entrypoint pip openwhisk/python3aiaction list
+docker run --rm --entrypoint pip actionloop-python-v3.6-ai list
 ```
 
 #### Available Ubuntu packages
@@ -81,5 +81,5 @@ $ docker run --rm --entrypoint pip openwhisk/python3aiaction list
 For a complete list execute:
 
 ```bash
-$ docker run --rm --entrypoint apt openwhisk/python3aiaction list --installed
+docker run --rm --entrypoint apt actionloop-python-v3.6-ai list --installed
 ```
diff --git a/settings.gradle b/settings.gradle
index 35888bc..1445790 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -18,9 +18,7 @@
 include 'tests'
 
 include 'core:python2ActionLoop'
-include 'core:python3Action'
 include 'core:python3ActionLoop'
-include 'core:python3AiAction'
 include 'core:python3AiActionLoop'
 
 rootProject.name = 'runtime-python'
diff --git a/tests/src/test/scala/runtime/actionContainers/Python3AiActionContainerTests.scala b/tests/src/test/scala/runtime/actionContainers/Python3AiActionContainerTests.scala
deleted file mode 100644
index e5c4075..0000000
--- a/tests/src/test/scala/runtime/actionContainers/Python3AiActionContainerTests.scala
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package runtime.actionContainers
-
-import org.junit.runner.RunWith
-import org.scalatest.junit.JUnitRunner
-import common.WskActorSystem
-import spray.json._
-import DefaultJsonProtocol._
-
-@RunWith(classOf[JUnitRunner])
-class Python3AiActionContainerTests extends PythonActionContainerTests with WskActorSystem {
-
-  override lazy val imageName = "python3aiaction"
-
-  it should "run tensorflow" in {
-    val (out, err) = withActionContainer() { c =>
-      val code =
-        """
-          |import tensorflow as tf
-          |def main(args):
-          |   # Initialize two constants
-          |   x1 = tf.constant([1,2,3,4])
-          |   x2 = tf.constant([5,6,7,8])
-          |
-          |   # Multiply
-          |   result = tf.multiply(x1, x2)
-          |
-          |   # Initialize Session and run `result`
-          |   with tf.Session() as sess:
-          |       output = sess.run(result)
-          |       print(output)
-          |       return { "response": output.tolist() }
-        """.stripMargin
-
-      val (initCode, res) = c.init(initPayload(code))
-      initCode should be(200)
-
-      val (runCode, runRes) = c.run(runPayload(JsObject()))
-      runCode should be(200)
-
-      runRes shouldBe defined
-      runRes should be(Some(JsObject("response" -> List(5, 12, 21, 32).toJson)))
-    }
-  }
-
-  it should "run pytorch" in {
-    val (out, err) = withActionContainer() { c =>
-      val code =
-        """
-          |import torch
-          |import torchvision
-          |import torch.nn as nn
-          |import numpy as np
-          |import torchvision.transforms as transforms
-          |def main(args):
-          |   # Create a numpy array.
-          |   x = np.array([1,2,3,4])
-          |
-          |   # Convert the numpy array to a torch tensor.
-          |   y = torch.from_numpy(x)
-          |
-          |   # Convert the torch tensor to a numpy array.
-          |   z = y.numpy()
-          |   return { "response": z.tolist()}
-        """.stripMargin
-
-      val (initCode, res) = c.init(initPayload(code))
-      initCode should be(200)
-
-      val (runCode, runRes) = c.run(runPayload(JsObject()))
-      runCode should be(200)
-
-      runRes shouldBe defined
-      runRes should be(Some(JsObject("response" -> List(1, 2, 3, 4).toJson)))
-    }
-  }
-
-}
diff --git a/tests/src/test/scala/runtime/actionContainers/Python3AiActionLoopContainerTests.scala b/tests/src/test/scala/runtime/actionContainers/Python3AiActionLoopContainerTests.scala
index 8f40869..456f3b7 100644
--- a/tests/src/test/scala/runtime/actionContainers/Python3AiActionLoopContainerTests.scala
+++ b/tests/src/test/scala/runtime/actionContainers/Python3AiActionLoopContainerTests.scala
@@ -111,14 +111,12 @@ class Python3AiActionLoopContainerTests
       // action loop detects those errors at init time
       val (initCode, initRes) = c.init(initPayload(code))
       initCode should be(200)
-      println(initCode, initRes)
 
       val (runCode, runRes) = c.run(runPayload(JsObject()))
       runCode should be(200)
       runRes.get.fields.get("array") should not be empty
     }
-    println(out)
-    println(err)
+
     checkStreams(out, err, {
       case (o, e) =>
         o shouldBe empty
@@ -139,13 +137,11 @@ class Python3AiActionLoopContainerTests
       // action loop detects those errors at init time
       val (initCode, initRes) = c.init(initPayload(code))
       initCode should be(200)
-      println(initCode, initRes)
 
       val (runCode, _) = c.run(runPayload(JsObject()))
       runCode should be(400)
     }
-    println(out)
-    println(err)
+
     checkStreams(out, err, {
       case (o, e) =>
         o shouldBe empty
diff --git a/tests/src/test/scala/runtime/actionContainers/PythonActionContainerTests.scala b/tests/src/test/scala/runtime/actionContainers/PythonActionContainerTests.scala
index 0c11bc3..be810d1 100644
--- a/tests/src/test/scala/runtime/actionContainers/PythonActionContainerTests.scala
+++ b/tests/src/test/scala/runtime/actionContainers/PythonActionContainerTests.scala
@@ -17,8 +17,6 @@
 
 package runtime.actionContainers
 
-import org.junit.runner.RunWith
-import org.scalatest.junit.JUnitRunner
 import spray.json.DefaultJsonProtocol._
 import spray.json._
 import common.WskActorSystem
@@ -26,10 +24,9 @@ import actionContainers.{ActionContainer, BasicActionRunnerTests}
 import actionContainers.ActionContainer.withContainer
 import actionContainers.ResourceHelpers.ZipBuilder
 
-@RunWith(classOf[JUnitRunner])
-class PythonActionContainerTests extends BasicActionRunnerTests with WskActorSystem {
+abstract class PythonActionContainerTests extends BasicActionRunnerTests with WskActorSystem {
 
-  lazy val imageName = "python3action"
+  val imageName: String
 
   /** actionLoop does not return an error code on failed run */
   lazy val errorCodeOnRun = true
@@ -98,6 +95,21 @@ class PythonActionContainerTests extends BasicActionRunnerTests with WskActorSys
         |    }
       """.stripMargin.trim)
 
+  override val testEnvParameters =
+    TestConfig("""
+        |import os
+        |somevar = os.environ['SOME_VAR']
+        |another = os.environ['ANOTHER_VAR']
+        |
+        |def main(dict):
+        |    global somevar
+        |    global another
+        |    return {
+        |       "SOME_VAR": somevar,
+        |       "ANOTHER_VAR": another
+        |    }
+      """.stripMargin)
+
   override val testLargeInput =
     TestConfig("""
         |def main(args):
diff --git a/tests/src/test/scala/runtime/actionContainers/PythonActionLoopExtraTests.scala b/tests/src/test/scala/runtime/actionContainers/PythonActionLoopExtraTests.scala
index 93a98ed..6a75c0a 100644
--- a/tests/src/test/scala/runtime/actionContainers/PythonActionLoopExtraTests.scala
+++ b/tests/src/test/scala/runtime/actionContainers/PythonActionLoopExtraTests.scala
@@ -36,7 +36,6 @@ trait PythonActionLoopExtraTests {
 
       val (runCode, runRes) = c.run(runPayload(JsObject()))
       runCode should be(400)
-      println(runCode, runRes)
       runRes.get.fields.get("error").get.toString() should include("command exited")
     }
     checkStreams(out, err, {
diff --git a/tools/travis/publish.sh b/tools/travis/publish.sh
index f393664..c93c537 100755
--- a/tools/travis/publish.sh
+++ b/tools/travis/publish.sh
@@ -31,12 +31,8 @@ RUNTIME_VERSION=$2
 IMAGE_TAG=$3
 
 if [ ${RUNTIME_VERSION} == "3" ]; then
-  RUNTIME="pythonAction"
-elif [ ${RUNTIME_VERSION} == "3-ai" ]; then
-  RUNTIME="python3AiAction"
-elif [ ${RUNTIME_VERSION} == "3-loop" ]; then
   RUNTIME="python3ActionLoop"
-elif [ ${RUNTIME_VERSION} == "3-loopai" ]; then
+elif [ ${RUNTIME_VERSION} == "3-ai" ]; then
   RUNTIME="python3AiActionLoop"
 fi