You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@openwhisk.apache.org by st...@apache.org on 2023/05/17 02:31:04 UTC

[openwhisk-runtime-python] branch master updated: remove Python 3.6 based runtime (#143)

This is an automated email from the ASF dual-hosted git repository.

style95 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/openwhisk-runtime-python.git


The following commit(s) were added to refs/heads/master by this push:
     new e507a01  remove Python 3.6 based runtime (#143)
e507a01 is described below

commit e507a016f3ac446a2644b83bac7a0e6aeec88475
Author: David Grove <dg...@users.noreply.github.com>
AuthorDate: Tue May 16 22:30:59 2023 -0400

    remove Python 3.6 based runtime (#143)
    
    Python 3.6 is past its end of life; therefore this runtime is no
    longer receiving security updates.
---
 .github/workflows/ci.yaml                          |   4 -
 README.md                                          |   3 -
 core/python36AiAction/Dockerfile                   |  94 ---
 core/python36AiAction/README.md                    |  85 --
 core/python36AiAction/build.gradle                 |  45 --
 core/python36AiAction/requirements.txt             |  32 -
 .../samples/smart-body-crop/.gitignore             |   5 -
 .../samples/smart-body-crop/common.py              | 332 --------
 .../samples/smart-body-crop/crop.ipynb             | 872 ---------------------
 .../samples/smart-body-crop/fashion-men-1.jpg      | Bin 2471074 -> 0 bytes
 .../samples/smart-body-crop/inference.py           | 246 ------
 settings.gradle                                    |   1 -
 .../runtime/actionContainers/Python36AiTests.scala | 149 ----
 tutorials/local_build.md                           |   2 +-
 tutorials/local_build.sh                           |  24 +-
 15 files changed, 13 insertions(+), 1881 deletions(-)

diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 45c4c66..c9e22fa 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -90,8 +90,6 @@ jobs:
           SHORT_COMMIT=$(git rev-parse --short "$GITHUB_SHA")
           ./gradlew :core:python3Action:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=nightly
           ./gradlew :core:python3Action:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=$SHORT_COMMIT
-          ./gradlew :core:python36AiAction:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=nightly
-          ./gradlew :core:python36AiAction:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=$SHORT_COMMIT
           ./gradlew :core:python39Action:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=nightly
           ./gradlew :core:python39Action:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=$SHORT_COMMIT
           ./gradlew :core:python310Action:distDocker -PdockerRegistry=docker.io -PdockerImagePrefix=openwhisk -PdockerImageTag=nightly
@@ -106,8 +104,6 @@ jobs:
           IMAGE_TAG=${GITHUB_REF_NAME##*@}
           if [ ${RUNTIME_VERSION} == "3" ]; then
             RUNTIME="python3Action"
-          elif [ ${RUNTIME_VERSION} == "3-ai" ]; then
-            RUNTIME="python36AiAction"
           elif [ ${RUNTIME_VERSION} == "39" ]; then
             RUNTIME="python39Action"
           elif [ ${RUNTIME_VERSION} == "310" ]; then
diff --git a/README.md b/README.md
index f329f5f..7bb71f5 100644
--- a/README.md
+++ b/README.md
@@ -150,9 +150,6 @@ others.
 ./gradlew :tests:test --tests Python*Tests
 ```
 
-## Python 3 AI Runtime
-This action runtime enables developers to create AI Services with OpenWhisk. It comes with preinstalled libraries useful for running Machine Learning and Deep Learning inferences. [Read more about this runtime here](./core/python36AiAction).
-
 ## Using additional python libraries
 
 If you need more libraries for your Python action,  you can include a virtualenv in the zip file of the action.
diff --git a/core/python36AiAction/Dockerfile b/core/python36AiAction/Dockerfile
deleted file mode 100644
index c269c86..0000000
--- a/core/python36AiAction/Dockerfile
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# build go proxy from source
-FROM golang:1.18 AS builder_source
-ARG GO_PROXY_GITHUB_USER=apache
-ARG GO_PROXY_GITHUB_BRANCH=master
-RUN git clone --branch ${GO_PROXY_GITHUB_BRANCH} \
-   https://github.com/${GO_PROXY_GITHUB_USER}/openwhisk-runtime-go /src ;\
-   cd /src ; env GO111MODULE=on CGO_ENABLED=0 go build main/proxy.go && \
-   mv proxy /bin/proxy
-
-# or build it from a release
-FROM golang:1.18 AS builder_release
-ARG GO_PROXY_RELEASE_VERSION=1.18@1.20.0
-RUN curl -sL \
-  https://github.com/apache/openwhisk-runtime-go/archive/${GO_PROXY_RELEASE_VERSION}.tar.gz\
-  | tar xzf -\
-  && cd openwhisk-runtime-go-*/main\
-  && GO111MODULE=on CGO_ENABLED=0 go build -o /bin/proxy
-
-# Dockerfile for python AI actions, overrides and extends ActionRunner from actionProxy
-FROM tensorflow/tensorflow:1.15.2-py3-jupyter
-
-# select the builder to use
-ARG GO_PROXY_BUILD_FROM=release
-
-RUN apt-get update && apt-get upgrade -y && apt-get install -y \
-            curl \
-            gcc \
-            libc-dev \
-            libxslt-dev \
-            libxml2-dev \
-            libffi-dev \
-            libssl-dev \
-            zip \
-            unzip \
-            vim \
-            && rm -rf /var/lib/apt/lists/*
-
-# PyTorch
-# persistent as it fails often
-RUN while ! pip list | grep torch ;\
-    do pip --no-cache-dir install torch ; done ;\
-    while ! pip list | grep torchvision ;\
-    do pip install torchvision ; done
-
-# rclone
-RUN curl -L https://downloads.rclone.org/rclone-current-linux-amd64.deb -o rclone.deb \
-    && dpkg -i rclone.deb \
-    && rm rclone.deb
-
-COPY requirements_common.txt requirements_common.txt
-COPY requirements.txt requirements.txt
-RUN pip3 install --upgrade pip six wheel &&\
-    pip3 install --no-cache-dir -r requirements.txt &&\
-    ln -sf /usr/bin/python3 /usr/local/bin/python
-
-RUN mkdir -p /action
-WORKDIR /
-
-COPY --from=builder_source /bin/proxy /bin/proxy_source
-COPY --from=builder_release /bin/proxy /bin/proxy_release
-RUN mv /bin/proxy_${GO_PROXY_BUILD_FROM} /bin/proxy
-
-ADD bin/compile /bin/compile
-ADD lib/launcher.py /lib/launcher.py
-
-# log initialization errors
-ENV OW_LOG_INIT_ERROR=1
-# the launcher must wait for an ack
-ENV OW_WAIT_FOR_ACK=1
-# using the runtime name to identify the execution environment
-ENV OW_EXECUTION_ENV=openwhisk/action-python-v3.6-ai
-# compiler script
-ENV OW_COMPILER=/bin/compile
-# use utf-8
-ENV PYTHONIOENCODING=UTF-8
-
-ENTRYPOINT ["/bin/proxy"]
diff --git a/core/python36AiAction/README.md b/core/python36AiAction/README.md
deleted file mode 100644
index 50b7657..0000000
--- a/core/python36AiAction/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-<!--
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
--->
-
-# AI Action
-
-This image contains libraries and frameworks useful for running AI Services.
-
-Bellow are the versions for the included libraries:
-
-| Image Version | Package | Notes |
-| ------------- | ------- | ----- |
-| 1.1.0      | Tensorflow 1.11.0, PyTorch 0.4.1 | Based on Ubuntu 16.04.5, Python 3.5.2.
-
-### Opening Notebooks
-
-This image has Jupyter Notebook installed. You may find useful to run quick Notebooks directly on the image which may run the actual code. To start Jupyter Notebook execute:
-
-```bash
-$ docker run -it -p 8888:8888 --rm --entrypoint jupyter-notebook openwhisk/actionloop-python-v3.6ai  --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root
-```
-
-#### AI Action Sample
-
-To view an example with this AI Action check the [samples/smart-body-crop notebook](./samples/smart-body-crop/crop.ipynb) and follow the instructions.
-
-### 1.1.0 Details
-#### Available python packages
-
-| Package               | Version               |
-| --------------------- | --------------------- |
-| tensorboard           | 1.11.0                |
-| tensorflow            | 1.11.0                |
-| torch                 | 0.4.1                 |
-| torchvision           | 0.2.1                 |
-| scikit-learn          | 0.19.2                |
-| scipy                 | 1.1.0                 |
-| sklearn               | 0.0                   |
-| numpy                 | 1.15.2                |
-| pandas                | 0.23.4                |
-| Pillow                | 5.2.0                 |
-| Cython                | 0.28.5                |
-| ipykernel             | 4.9.0                 |
-| ipython               | 6.5.0                 |
-| ipywidgets            | 7.4.2                 |
-| jupyter               | 1.0.0                 |
-| jupyter-client        | 5.2.3                 |
-| jupyter-console       | 5.2.0                 |
-| jupyter-core          | 4.4.0                 |
-| Keras                 | 2.2.2                 |
-| Keras-Applications    | 1.0.4                 |
-| Keras-Preprocessing   | 1.0.2                 |
-| matplotlib            | 3.0.0                 |
-| notebook              | 5.7.0                 |
-| opencv-contrib-python | 3.4.2.17              |
-| protobuf              | 3.6.1                 |
-
-For a complete list execute:
-
-```bash
-docker run --rm --entrypoint pip actionloop-python-v3.6-ai list
-```
-
-#### Available Ubuntu packages
-
-For a complete list execute:
-
-```bash
-docker run --rm --entrypoint apt actionloop-python-v3.6-ai list --installed
-```
diff --git a/core/python36AiAction/build.gradle b/core/python36AiAction/build.gradle
deleted file mode 100644
index c11c238..0000000
--- a/core/python36AiAction/build.gradle
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-ext.dockerImageName = 'action-python-v3.6-ai'
-apply from: '../../gradle/docker.gradle'
-
-distDocker.dependsOn 'copyLib'
-distDocker.dependsOn 'copyBin'
-distDocker.dependsOn 'copyReqrCommon'
-distDocker.finalizedBy('cleanup')
-
-task copyLib(type: Copy) {
-    from '../python3Action/lib'
-    into './lib'
-}
-
-task copyBin(type: Copy) {
-    from '../python3Action/bin'
-    into './bin'
-}
-
-task copyReqrCommon(type: Copy) {
-    from '../requirements_common.txt'
-    into './'
-}
-
-task cleanup(type: Delete) {
-    delete 'bin'
-    delete 'lib'
-    delete 'requirements_common.txt'
-}
diff --git a/core/python36AiAction/requirements.txt b/core/python36AiAction/requirements.txt
deleted file mode 100644
index 122027f..0000000
--- a/core/python36AiAction/requirements.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-# default packages available for action-python-v3.6-ai
--r requirements_common.txt
-
-# package to sync from a variety of cloud blob storage
-python-rclone == 0.0.2
-
-# more ML/DL packages
-keras==2.6.0
-opencv-contrib-python==4.5.3.56
-Cython==0.29.24
-tools==0.1.9
-scikit-image==0.17.2
-nltk==3.6.2
-
-# packages for numerics
-numpy==1.19.5
-scikit-learn==0.24.2
-scipy==1.5.4
-pandas==1.1.5
-
-# packages for image processing
-Pillow==8.3.2
-
-# Etc
-PyJWT==1.7.1
-pymongo==3.12.0
-redis==3.5.3
-pika==1.2.0
-elasticsearch==7.14.1
-cassandra-driver==3.25.0
-etcd3==0.12.0
-twilio==6.63.2
diff --git a/core/python36AiAction/samples/smart-body-crop/.gitignore b/core/python36AiAction/samples/smart-body-crop/.gitignore
deleted file mode 100644
index e5d0af6..0000000
--- a/core/python36AiAction/samples/smart-body-crop/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-models
-action.zip
-action_package
-.ipynb_checkpoints
-__pycache__
diff --git a/core/python36AiAction/samples/smart-body-crop/common.py b/core/python36AiAction/samples/smart-body-crop/common.py
deleted file mode 100644
index 4c6ece1..0000000
--- a/core/python36AiAction/samples/smart-body-crop/common.py
+++ /dev/null
@@ -1,332 +0,0 @@
-"""Executable Python script for running Python actions.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-"""
-
-'''
-Some code is based on Ildoo Kim's code (https://github.com/ildoonet/tf-openpose) and https://gist.github.com/alesolano/b073d8ec9603246f766f9f15d002f4f4
-and derived from the OpenPose Library (https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/LICENSE)
-'''
-
-from collections import defaultdict
-from enum import Enum
-import math
-import numpy as np
-import itertools
-from scipy.ndimage.filters import maximum_filter
-from PIL import Image, ImageDraw
-
-
-class CocoPart(Enum):
-    Nose = 0
-    Neck = 1
-    RShoulder = 2
-    RElbow = 3
-    RWrist = 4
-    LShoulder = 5
-    LElbow = 6
-    LWrist = 7
-    RHip = 8
-    RKnee = 9
-    RAnkle = 10
-    LHip = 11
-    LKnee = 12
-    LAnkle = 13
-    REye = 14
-    LEye = 15
-    REar = 16
-    LEar = 17
-    Background = 18
-
-
-parts_dict = {'Nose': [0], 'Neck': [1], 'Shoulders': [2, 5], 'Elbows': [3, 6], 'Wrists': [
-    4, 7], 'Hips': [8, 11], 'Knees': [9, 12], 'Ankles': [10, 13], 'Eyes': [14, 15], 'Ears': [16, 17]}
-# parts_if_notfound_upper={'Eyes':'Ears','Ears':'Eyes','Nose':'Ears','Neck':'Nose','Shoulders':'Neck','Elbows':'Shoulders','Wrists':'Elbows','Hips':'Wrists#','Knees':'Hips'}
-# parts_if_notfound_lower=#{'Ears':'Nose','Nose':'Neck','Neck':'Shoulders','Shoulders':'Elbows','Elbows':'Wrists','Wrists':'Hips','Hips':'Knees',
-#                  'Knees':'Ankles','Ankles':'Knees'}
-body_parts_dict = {0: 'Eyes', 1: 'Ears', 2: 'Nose', 3: 'Neck', 4: 'Shoulders',
-                   5: 'Elbows', 6: 'Wrists', 7: 'Hips', 8: 'Knees', 9: 'Ankles'}
-
-CocoPairs = [
-    (1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6,
-                                             7), (1, 8), (8, 9), (9, 10), (1, 11),
-    (11, 12), (12, 13), (1, 0), (0, 14), (14,
-                                          16), (0, 15), (15, 17), (2, 16), (5, 17)
-]   # = 19
-CocoPairsRender = CocoPairs[:-2]
-CocoPairsNetwork = [
-    (12, 13), (20, 21), (14, 15), (16, 17), (22,
-                                             23), (24, 25), (0, 1), (2, 3), (4, 5),
-    (6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34,
-                                                   35), (32, 33), (36, 37), (18, 19), (26, 27)
-]  # = 19
-
-CocoColors = [(255, 0, 0), (255, 85, 0), (255, 170, 0), (255, 255, 0), (170, 255, 0), (85, 255, 0), (0, 255, 0),
-              (0, 255, 85), (0, 255, 170), (0, 255, 255), (0,
-                                                           170, 255), (0, 85, 255), (0, 0, 255), (85, 0, 255),
-              (170, 0, 255), (255, 0, 255), (255, 0, 170), (255, 0, 85)]
-
-
-NMS_Threshold = 0.1
-InterMinAbove_Threshold = 6
-Inter_Threashold = 0.1
-Min_Subset_Cnt = 4
-Min_Subset_Score = 0.8
-Max_Human = 96
-
-
-def human_conns_to_human_parts(human_conns, heatMat):
-    human_parts = defaultdict(lambda: None)
-    for conn in human_conns:
-        human_parts[conn['partIdx'][0]] = (
-            conn['partIdx'][0],  # part index
-            (conn['coord_p1'][0] / heatMat.shape[2], conn['coord_p1']
-             [1] / heatMat.shape[1]),  # relative coordinates
-            heatMat[conn['partIdx'][0], conn['coord_p1']
-                    [1], conn['coord_p1'][0]]  # score
-        )
-        human_parts[conn['partIdx'][1]] = (
-            conn['partIdx'][1],
-            (conn['coord_p2'][0] / heatMat.shape[2],
-             conn['coord_p2'][1] / heatMat.shape[1]),
-            heatMat[conn['partIdx'][1], conn['coord_p2']
-                    [1], conn['coord_p2'][0]]
-        )
-    return human_parts
-
-
-def non_max_suppression(heatmap, window_size=3, threshold=NMS_Threshold):
-    heatmap[heatmap < threshold] = 0  # set low values to 0
-    part_candidates = heatmap * \
-        (heatmap == maximum_filter(heatmap, footprint=np.ones((window_size, window_size))))
-    return part_candidates
-
-
-def estimate_pose(heatMat, pafMat):
-    if heatMat.shape[2] == 19:
-        # transform from [height, width, n_parts] to [n_parts, height, width]
-        heatMat = np.rollaxis(heatMat, 2, 0)
-    if pafMat.shape[2] == 38:
-        # transform from [height, width, 2*n_pairs] to [2*n_pairs, height, width]
-        pafMat = np.rollaxis(pafMat, 2, 0)
-
-    # reliability issue.
-    heatMat = heatMat - heatMat.min(axis=1).min(axis=1).reshape(19, 1, 1)
-    heatMat = heatMat - heatMat.min(axis=2).reshape(19, heatMat.shape[1], 1)
-
-    _NMS_Threshold = max(np.average(heatMat) * 4.0, NMS_Threshold)
-    _NMS_Threshold = min(_NMS_Threshold, 0.3)
-
-    coords = []  # for each part index, it stores coordinates of candidates
-    for heatmap in heatMat[:-1]:  # remove background
-        part_candidates = non_max_suppression(heatmap, 5, _NMS_Threshold)
-        coords.append(np.where(part_candidates >= _NMS_Threshold))
-
-    # all connections detected. no information about what humans they belong to
-    connection_all = []
-    for (idx1, idx2), (paf_x_idx, paf_y_idx) in zip(CocoPairs, CocoPairsNetwork):
-        connection = estimate_pose_pair(
-            coords, idx1, idx2, pafMat[paf_x_idx], pafMat[paf_y_idx])
-        connection_all.extend(connection)
-
-    conns_by_human = dict()
-    for idx, c in enumerate(connection_all):
-        # at first, all connections belong to different humans
-        conns_by_human['human_%d' % idx] = [c]
-
-    no_merge_cache = defaultdict(list)
-    empty_set = set()
-    while True:
-        is_merged = False
-        for h1, h2 in itertools.combinations(conns_by_human.keys(), 2):
-            if h1 == h2:
-                continue
-            if h2 in no_merge_cache[h1]:
-                continue
-            for c1, c2 in itertools.product(conns_by_human[h1], conns_by_human[h2]):
-                # if two humans share a part (same part idx and coordinates), merge those humans
-                if set(c1['uPartIdx']) & set(c2['uPartIdx']) != empty_set:
-                    is_merged = True
-                    # extend human1 connectios with human2 connections
-                    conns_by_human[h1].extend(conns_by_human[h2])
-                    conns_by_human.pop(h2)  # delete human2
-                    break
-            if is_merged:
-                no_merge_cache.pop(h1, None)
-                break
-            else:
-                no_merge_cache[h1].append(h2)
-
-        if not is_merged:  # if no more mergings are possible, then break
-            break
-
-    conns_by_human = {h: conns for (
-        h, conns) in conns_by_human.items() if len(conns) >= Min_Subset_Cnt}
-    conns_by_human = {h: conns for (h, conns) in conns_by_human.items() if max(
-        [conn['score'] for conn in conns]) >= Min_Subset_Score}
-
-    humans = [human_conns_to_human_parts(
-        human_conns, heatMat) for human_conns in conns_by_human.values()]
-    return humans
-
-
-def estimate_pose_pair(coords, partIdx1, partIdx2, pafMatX, pafMatY):
-    connection_temp = []  # all possible connections
-    peak_coord1, peak_coord2 = coords[partIdx1], coords[partIdx2]
-
-    for idx1, (y1, x1) in enumerate(zip(peak_coord1[0], peak_coord1[1])):
-        for idx2, (y2, x2) in enumerate(zip(peak_coord2[0], peak_coord2[1])):
-            score, count = get_score(x1, y1, x2, y2, pafMatX, pafMatY)
-            if (partIdx1, partIdx2) in [(2, 3), (3, 4), (5, 6), (6, 7)]:  # arms
-                if count < InterMinAbove_Threshold // 2 or score <= 0.0:
-                    continue
-            elif count < InterMinAbove_Threshold or score <= 0.0:
-                continue
-            connection_temp.append({
-                'score': score,
-                'coord_p1': (x1, y1),
-                'coord_p2': (x2, y2),
-                'idx': (idx1, idx2),  # connection candidate identifier
-                'partIdx': (partIdx1, partIdx2),
-                'uPartIdx': ('{}-{}-{}'.format(x1, y1, partIdx1), '{}-{}-{}'.format(x2, y2, partIdx2))
-            })
-
-    connection = []
-    used_idx1, used_idx2 = [], []
-    for conn_candidate in sorted(connection_temp, key=lambda x: x['score'], reverse=True):
-        if conn_candidate['idx'][0] in used_idx1 or conn_candidate['idx'][1] in used_idx2:
-            continue
-        connection.append(conn_candidate)
-        used_idx1.append(conn_candidate['idx'][0])
-        used_idx2.append(conn_candidate['idx'][1])
-
-    return connection
-
-
-def get_score(x1, y1, x2, y2, pafMatX, pafMatY):
-    num_inter = 10
-    dx, dy = x2 - x1, y2 - y1
-    normVec = math.sqrt(dx ** 2 + dy ** 2)
-
-    if normVec < 1e-4:
-        return 0.0, 0
-
-    vx, vy = dx / normVec, dy / normVec
-
-    xs = np.arange(
-        x1, x2, dx / num_inter) if x1 != x2 else np.full((num_inter, ), x1)
-    ys = np.arange(
-        y1, y2, dy / num_inter) if y1 != y2 else np.full((num_inter, ), y1)
-    xs = (xs + 0.5).astype(np.int8)
-    ys = (ys + 0.5).astype(np.int8)
-
-    # without vectorization
-    pafXs = np.zeros(num_inter)
-    pafYs = np.zeros(num_inter)
-    for idx, (mx, my) in enumerate(zip(xs, ys)):
-        pafXs[idx] = pafMatX[my][mx]
-        pafYs[idx] = pafMatY[my][mx]
-
-    local_scores = pafXs * vx + pafYs * vy
-    thidxs = local_scores > Inter_Threashold
-
-    return sum(local_scores * thidxs), sum(thidxs)
-
-
-def draw_humans(img1_raw, human_list):
-    img = np.asarray(img1_raw)
-    img_copied = np.copy(img)
-    image_h, image_w = img_copied.shape[:2]
-    centers = {}
-    c = 10
-    for human in human_list:
-        part_idxs = human.keys()
-
-        # draw point
-        draw = ImageDraw.Draw(img1_raw)
-        for i in range(CocoPart.Background.value):
-            if i not in part_idxs:
-                continue
-            part_coord = human[i][1]
-            center = (int(part_coord[0] * image_w + 0.5),
-                      int(part_coord[1] * image_h + 0.5))
-            centers[i] = center
-            bbox = (center[0] - c, center[1] - c, center[0] + c, center[1] + c)
-            draw.ellipse(bbox, fill=CocoColors[i])
-
-        # draw line
-        ctr = 1
-        for pair_order, pair in enumerate(CocoPairsRender):
-            if pair[0] not in part_idxs or pair[1] not in part_idxs:
-                continue
-            draw.line((centers[pair[0]][0], centers[pair[0]][1], centers[pair[1]]
-                       [0], centers[pair[1]][1]), fill=CocoColors[pair_order], width=5)
-    img1_raw = np.asarray(img1_raw)
-    del draw
-    return img1_raw
-
-
-def crop_image(img, humans_list, upper_body, lower_body):
-    upper_coord = 0.0
-    upper_coord_x = 0.0
-    lower_coord = 0.0
-    lower_coord_x = 0.0
-
-    img = np.asarray(img)
-    image_h, image_w = img.shape[:2]
-
-    if upper_body == 'Ankles' or lower_body == 'Eyes':
-        raise NameError('Body parts not consistent')
-
-    for human in humans_list:
-        parts = human.keys()
-        inte = parts_dict[upper_body]  # could be [1] or [2,3]
-
-        if upper_body == 'Nose' or upper_body == 'Neck':
-            upper_coord = human[inte[0]][1][1]  # interested only in heights.
-            upper_coord_x = human[inte[0]][1][0]
-        else:
-            upper_coord = (human[inte[0]][1][1] + human[inte[1]][1][1])/2
-            upper_coord_x = (human[inte[0]][1][0] + human[inte[1]][1][0])/2
-
-        inte = parts_dict[lower_body]
-        if lower_body == 'Nose' or lower_body == 'Neck':
-            lower_coord = human[inte[0]][1][1]  # interested only in heights.
-            lower_coord_x = human[inte[0]][1][0]
-        else:
-            lower_coord = (human[inte[0]][1][1] + human[inte[1]][1][1])/2
-            lower_coord_x = (human[inte[0]][1][0] + human[inte[1]][1][0])/2
-
-    image_h_u = int(upper_coord * image_h)
-    image_h_l = int(lower_coord * image_h)
-
-    image_w_left = int(upper_coord_x * image_w)
-    image_w_right = int(lower_coord_x * image_w)
-    aspect_ratio = image_h / image_w
-    image_w = int((image_w_left + image_w_right)/2)
-
-    img = img[image_h_u:image_h_l]
-    wid = int((img.shape[0]/aspect_ratio)/2)
-    img = img.transpose(1, 0, 2)
-    img = img[image_w-2*wid:image_w+2*wid]
-    img = img.transpose(1, 0, 2)
-
-    crop_position = (image_w-2*wid, image_h_u)
-    crop_size = (img.shape[1], img.shape[0])
-
-    return img, crop_position, crop_size
diff --git a/core/python36AiAction/samples/smart-body-crop/crop.ipynb b/core/python36AiAction/samples/smart-body-crop/crop.ipynb
deleted file mode 100644
index 3ea6633..0000000
--- a/core/python36AiAction/samples/smart-body-crop/crop.ipynb
+++ /dev/null
@@ -1,872 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# AI Action example: Smart Body Crop "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This notebook illustrates how a ML engineer develops an algorithm and deploys it in a serverless environment directly from the notebook itself. \n",
-    "\n",
-    "To make it faster to run, the training is skipped. This example reuses a pre-trained OpenPose model to identify a person in a picture, and then crops the body to highlight the desired clothing item.\n",
-    "\n",
-    "### Running the notebook locally\n",
-    "\n",
-    "Simply execute:\n",
-    "        \n",
-    "        $ docker run -it -p 8888:8888 -e OPENWHISK_AUTH=`cat ~/.wskprops | grep ^AUTH= | awk -F= '{print $2}'` -e OPENWHISK_APIHOST=`cat ~/.wskprops | grep ^APIHOST= | awk -F= '{print $2}'` --rm -v `pwd`:/notebooks/sf  --entrypoint jupyter-notebook adobeapiplatform/openwhisk-python3aiaction:0.11.0  --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root\n",
-    "\n",
-    "> This command reads the local `~/.wskprops` and uses the Apache OpenWhisk credentials within."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from matplotlib import pyplot as plt\n",
-    "import matplotlib.patches as patches\n",
-    "\n",
-    "%matplotlib inline\n",
-    "from inference import SmartBodyCrop\n",
-    "from PIL import Image\n",
-    "import numpy as np"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#model_url = \"https://s3.amazonaws.com/rt-dev-public-models/openpose/2dw1oz9l9hi9avg/optimized_openpose.pb\"\n",
-    "model_url = \"models/optimized_openpose.pb\"\n",
-    "inf = SmartBodyCrop(model_url = model_url)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Download the deep learning (open pose) model\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
-      "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\n",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\n",
-      "100  1204    0  1204    0     0    983      0 --:--:--  0:00:01 --:--:-- 1175k\n",
-      "100  199M  100  199M    0     0  13.8M      0  0:00:14  0:00:14 --:--:-- 19.5M\n"
-     ]
-    }
-   ],
-   "source": [
-    "!mkdir -p models\n",
-    "# Comment the line bellow downloading the model, once you have it locally.\n",
-    "!curl -L https://www.dropbox.com/s/2dw1oz9l9hi9avg/optimized_openpose.pb -o models/optimized_openpose.pb"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 23,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "245\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOAAAAEyCAYAAADjrNxxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvWmMpdlZJvicG3dfYs3IiMi1sjbXYuPyboQFRUMzNBpsDxbQRqLNDMJIuH+MhBCeXyCNWuIH02DRqC1bTbctD7SRcYMFlqGxaOzBdrnKlKtcpF1bZlZmRmVmrDfi7us3PyKfE+994z3f/W5UFb41k690db/lfGd/3u1sLooi3KE7dIe+P5T6fmfgDt2h/z/THQDeoTv0faQ7ALxDd+j7SHcAeIfu0PeR7gDwDt2h7yPdAeAdukPfR3rNAOic+0nn3LPOuReccx99rdK5 [...]
-      "text/plain": [
-       "<Figure size 720x360 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "image = Image.open('fashion-men-1.jpg')\n",
-    "image.thumbnail( (368,368) )\n",
-    "print(image.size[0])\n",
-    "image = np.asarray(image)\n",
-    "plt.figure(figsize = (10,5))\n",
-    "plt.imshow(image)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## How algorithm sees the body"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image loaded in:      0.1135\n",
-      "Loading the model...\n",
-      "model imported in :     1.5586\n",
-      "tf session executed in:      5.4359\n",
-      "pose estimated in:      0.0048\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOAAAAEyCAYAAADjrNxxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvWmQZNd1Jvbd3NfKWru7eu/GwgaanAYFbhIgCeDikRRkkBwZrSXCQ40miLFFWZKt8Ij8YVPhiJmQ7bHGntHEhKkwNaRojgTQJqUQGRQXU9JoOFxAgqBALASIbnR3dXdV15JVWZV75vOPrHPrvJPn3vdedYNMhPtEZOR799393u+cc8/dTBAEuE236Tb9aCj1o87AbbpN/3+m2wC8TbfpR0i3AXibbtOPkG4D8Dbdph8h3QbgbbpNP0K6DcDbdJt+hPSKAdAY8zPGmOeN [...]
-      "text/plain": [
-       "<Figure size 720x360 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "x = inf.detect_parts('fashion-men-1.jpg')\n",
-    "#x = inf.detect_parts('https://cdn.shopify.com/s/files/1/1970/6605/products/Pioneer-Camp-2017-spring-new-fashion-men-shirt-long-sleeve-brand-clothing-quality-cotton-soft-shirt_e262fa2c-a279-4190-9cf7-707982189e9e.jpg?v=1501310825')\n",
-    "plt.figure(figsize=(10,5))\n",
-    "plt.imshow(x)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Bodycrop based on detected body parts"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 20,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image (3840x5760) loaded in:      0.1047\n",
-      "Loading the model...\n",
-      "model imported in :     1.4202\n",
-      "tf session executed in:      5.4498\n",
-      "pose estimated in:      0.0051\n",
-      "image cropped in:      0.0002\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUEAAAD8CAYAAADpLRYuAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJztnXuwbVdV5n+TG94Y8uTm5t48LiRFKjwSrolEiYAGFWyUUCKiFk0LXfnHRrSsUmj/sLrKKttqS6CraLujIHRXikdDeAi0PCKUUqUhSScGSEhIbl73ksclD0BQIbL6j3O+s8ceZ6x55lp7n3327T2+qlNn773WmnOuudZe+xtjfGPM0nUdiUQisap4zE4PIJFIJHYS+RBMJBIrjXwIJhKJlUY+BBOJxEojH4KJRGKlkQ/BRCKx0siHYCKRWGnM9BAspby0lHJLKeW2Usqb [...]
-      "text/plain": [
-       "<Figure size 576x288 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "CPU times: user 8.79 s, sys: 1.87 s, total: 10.7 s\n",
-      "Wall time: 5.78 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "x, coordinates, imgpath = inf.infer('fashion-men-1.jpg','Eyes','Hips')\n",
-    "plt.figure(figsize = (8,4))\n",
-    "plt.imshow(x)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Draw the crop coordinates on the original image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 22,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZwAAAJCCAYAAAD0nXH7AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvUtzXNl17/k/efKdCeQDCYB4EeCbVWSV6mFVWQ7Zcjh05Q5PenpveODZjXBEf4D+Av0l7syzjh560OEeeOCwwrYsyVKpqshiFd8EQbwTyAfynacHqd/KladQKqo7usRw40QwSAKZ5+yz93r813+tvXYQRZEurovr4rq4Lq6L6//rK/GHHsDFdXFdXBfXxfX/j+vC4VxcF9fFdXFdXN/JdeFwLq6L6+K6uC6u7+S6cDgX18V1cV1cF9d3cl04nIvr4rq4Lq6L6zu5LhzO [...]
-      "text/plain": [
-       "<Figure size 1008x720 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "img = Image.open('fashion-men-1.jpg')\n",
-    "\n",
-    "# Create figure and axes\n",
-    "fig,ax = plt.subplots(1,figsize=(14,10))\n",
-    "ax.imshow(img)\n",
-    "\n",
-    "# Create a Rectangle patch\n",
-    "rect = patches.Rectangle(\n",
-    "    (coordinates.get('x'),coordinates.get('y')),\n",
-    "    coordinates.get('width'),coordinates.get('height'),\n",
-    "    linewidth = 3, \n",
-    "    edgecolor = 'r',\n",
-    "    facecolor = 'none')\n",
-    "\n",
-    "# Add the patch to the Axes\n",
-    "ax.add_patch(rect)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Test with a remote image "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 24,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image downloaded in :     0.3233\n",
-      "image (2192x4299) loaded in:      0.4292\n",
-      "Loading the model...\n",
-      "model imported in :     1.6012\n",
-      "tf session executed in:      5.5260\n",
-      "pose estimated in:      0.0030\n",
-      "image cropped in:      0.0001\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQUAAAD8CAYAAAB+fLH0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvVvMbUt2HvRV1byuy3/b971Pn3O63afdbZu0jU0sgRRFWCgQLIwQskiQ5YClfgIFAcIOT7wgmRfA4iFSi4CMFCkJYGEeokTIOFEcQtPdtsGOu0/36XPfl7Mv/21d56Vq8FBjVNVc//r35VzaO9Ia0t5r/WvOWbPmnDWrxvjGN8ZQRISd7GQnOxHRf9od2MlOdvJyyW5S2MlOdjKQ3aSwk53sZCC7SWEnO9nJQHaTwk52spOB7CaFnexkJwP5zCYFpdS/qpR6Uyn1llLq [...]
-      "text/plain": [
-       "<Figure size 576x288 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "# https://i.pinimg.com/736x/eb/61/fa/eb61fa047dcd0a20001392c13da93709--mens-fashion-blog-mens-fashion-styles.jpg\n",
-    "# 2192x4299 - https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\n",
-    "x, coordinates, imgpath = inf.infer('https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg',\n",
-    "                           'Eyes',\n",
-    "                           'Hips')\n",
-    "plt.figure(figsize = (8,4))\n",
-    "plt.imshow(x)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Deploy the algorithm as a function"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 1. Write the function\n",
-    "\n",
-    "The function expects the following parameters as input:\n",
-    "* `model_url` - the location of the model\n",
-    "* `image` - the image location. It can be given as a request object, or a location string (provided no extra authorization headers are required to read the file). I.e. \n",
-    "  ```javascript\n",
-    "  {\n",
-    "      uri: \"https://...\",\n",
-    "      headers: {\n",
-    "          \"Authorization\": \"Bearer ...\",\n",
-    "      }\n",
-    "  }\n",
-    "  ```\n",
-    "* `from_upper` - the upper part of the body to start the crop from. I.e. _Eyes_, _Nose_, _Neck_\n",
-    "* `to_lower` - the lower part of the body to stop the crop at. I.e. _Hip_, _Knees_, _Ankles_\n",
-    "\n",
-    "For flexibility, this function returns only the information needed to crop the body. I.e. \n",
-    "```javascript\n",
-    "{\n",
-    "  X: 100,\n",
-    "  Y: 100,\n",
-    "  W: 200,\n",
-    "  H: 100\n",
-    "}\n",
-    "```\n",
-    "\n",
-    "On the premise that the cropped image may exceed the max response size of an action, the actual cropping may be performed by another action, which should upload the cropped image to a blob storage. Bellow is the code that can crop the image based on the coordinates \n",
-    "\n",
-    "```python\n",
-    "from PIL import Image\n",
-    "import os\n",
-    "\n",
-    "img_crop = Image.open(local_image_path)\n",
-    "\n",
-    "img_crop = img_crop.crop(\n",
-    "    (coordinates.get('X'),                              # left \n",
-    "     coordinates.get('Y'),                              # upper\n",
-    "     coordinates.get('X') + coordinates.get('W'),       # right\n",
-    "     coordinates.get('Y') + coordinates.get('H')))      # lower\n",
-    "     \n",
-    "img_crop_filename = (os.environ.get('__OW_ACTIVATION_ID') or '_local') + \".jpg\"\n",
-    "img_crop_path = '/tmp/' + img_crop_filename\n",
-    "\n",
-    "img_crop.save(img_crop_path, \"JPEG\", optimize=True)\n",
-    "\n",
-    "print(\"The cropped image has been saved in:\", img_crop_path)\n",
-    "```"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 25,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Overwriting smart_body_crop.py\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%writefile smart_body_crop.py\n",
-    "\n",
-    "from inference import SmartBodyCrop\n",
-    "import os\n",
-    "\n",
-    "def action_handler(args):\n",
-    "    print(args)\n",
-    "    model_url = args.get('model_url')\n",
-    "    \n",
-    "    body_crop = SmartBodyCrop(model_url = model_url)\n",
-    "    print(\"SmartBodyCrop.initialized=\", SmartBodyCrop.initialized)\n",
-    "    \n",
-    "    crop_img, crop_coordinates, local_image_path = body_crop.infer(\n",
-    "                    args.get('image'), \n",
-    "                    args.get('from_upper'), \n",
-    "                    args.get('to_lower'))\n",
-    "    \n",
-    "    # if you want to crop the image, you can insert the code demonstrated above\n",
-    "    # then return the image as a base64 encoded string in the response body\n",
-    "    \n",
-    "    return {\n",
-    "        'X': crop_coordinates.get('x'),\n",
-    "        'Y': crop_coordinates.get('y'),\n",
-    "        'W': crop_coordinates.get('width'),\n",
-    "        'H': crop_coordinates.get('height')\n",
-    "    }\n",
-    "    "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Test the function locally"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 26,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "{'model_url': 'models/optimized_openpose.pb', 'image': 'https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg', 'from_upper': 'Eyes', 'to_lower': 'Elbows'}\n",
-      "SmartBodyCrop.initialized= False\n",
-      "image downloaded in :     0.1114\n",
-      "image (2192x4299) loaded in:      0.2350\n",
-      "Loading the model...\n",
-      "model imported in :     1.5238\n",
-      "tf session executed in:      5.0700\n",
-      "pose estimated in:      0.0056\n",
-      "image cropped in:      0.0002\n",
-      "{'H': 1028.0217391304348, 'Y': 467.2826086956522, 'W': 1031.5294117647059, 'X': 550.9304812834225}\n",
-      "CPU times: user 6.18 s, sys: 1.88 s, total: 8.06 s\n",
-      "Wall time: 5.35 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "from smart_body_crop import action_handler\n",
-    "action_response = action_handler({ \n",
-    "    'model_url': model_url,\n",
-    "    'image': \"https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\",\n",
-    "    'from_upper': 'Eyes',\n",
-    "    'to_lower': 'Elbows'})\n",
-    "\n",
-    "print(action_response)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Verify that the returned coordinates are correct"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 27,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "image downloaded in :     0.1307\n",
-      "(2192, 4299)\n"
-     ]
-    },
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAANkAAADGCAYAAABfPiU4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsvdmvbNl93/dZa+255jrzPXfq2+yBTYqjJWowHUGSJVuwID8EgoIgsIMAfkj8ECAPcf4DI0gCGwkgQEEerCSIbcEZnEhJ4EgiZIkmKVFmk+xu9u3ue2/f6cx1atzjGvKwa9epc/uSajbFVoO4P6Bw6uzau2rvtdZv/v5+SzjneEbP6Bn98Ej+Zd/AM3pGP+r0jMme0TP6IdMzJntGz+iHTM+Y7Bk9ox8yPWOyZ/SMfsj0jMme0TP6IdOHzmRCiL8hhHhTCPG2EOIffNi/ [...]
-      "text/plain": [
-       "<Figure size 504x216 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "imgpath = inf._download_image('https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg')\n",
-    "image = Image.open(imgpath)\n",
-    "print(image.size)\n",
-    "\n",
-    "img_crop = image.crop(\n",
-    "    (action_response.get('X'),                              # left \n",
-    "     action_response.get('Y'),                              # upper\n",
-    "     action_response.get('X') + action_response.get('W'),       # right\n",
-    "     action_response.get('Y') + action_response.get('H')))      # lower\n",
-    "\n",
-    "image = np.asarray(img_crop)\n",
-    "plt.figure(figsize = (7,3))\n",
-    "plt.imshow(image)\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 2. Configure Apache OpenWhisk as the FaaS Provider "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 28,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Installing wsk CLI ...\n",
-      "wsk\n",
-      "NOTICE.txt\n",
-      "README.md\n",
-      "LICENSE.txt\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
-      "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-      "\r",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-      "100   626    0   626    0     0   1501      0 --:--:-- --:--:-- --:--:--  1538\n",
-      "\r",
-      "  3 3845k    3  135k    0     0   115k      0  0:00:33  0:00:01  0:00:32  115k\r",
-      " 58 3845k   58 2260k    0     0  1050k      0  0:00:03  0:00:02  0:00:01 2172k\r",
-      "100 3845k  100 3845k    0     0  1344k      0  0:00:02  0:00:02 --:--:-- 2202k\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "which wsk && exit\n",
-    "echo \"Installing wsk CLI ...\"\n",
-    "curl -L https://github.com/apache/incubator-openwhisk-cli/releases/download/latest/OpenWhisk_CLI-latest-linux-amd64.tgz -o /tmp/wsk.tgz \n",
-    "tar xvfz /tmp/wsk.tgz -C /tmp/\n",
-    "mv /tmp/wsk /usr/local/bin"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Configure Apache OpenWhisk credentials\n",
-    "\n",
-    "Use `OPENWHISK_AUTH` and `OPENWHISK_APIHOST` environment variables."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 29,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from pathlib import Path\n",
-    "import os\n",
-    "home = str(Path.home())\n",
-    "file = open(home + \"/.wskprops\",\"w\") \n",
-    "file.write('AUTH=' + os.environ.get('OPENWHISK_AUTH') + \"\\n\")\n",
-    "file.write('APIHOST=' + os.environ.get('OPENWHISK_APIHOST') + \"\\n\")\n",
-    "file.close()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 3. Deploy the function"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The function must ZIP the other dependent python scripts used to train the model. The action code must be placed in a file called `__main__.py`."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 30,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "  adding: smart_body_crop.py (deflated 58%)\n",
-      "  adding: common.py (deflated 68%)\n",
-      "  adding: inference.py (deflated 73%)\n",
-      "  adding: __main__.py (deflated 58%)\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "mkdir -p action_package\n",
-    "\n",
-    "cp smart_body_crop.py action_package/__main__.py\n",
-    "cp *.py action_package/\n",
-    "cd action_package && zip -9 -r ../action.zip ./"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 31,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model_url = \"https://s3.amazonaws.com/rt-dev-public-models/openpose/2dw1oz9l9hi9avg/optimized_openpose.pb\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 32,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "ok: updated action smart_body_crop\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash -s \"$model_url\"\n",
-    "\n",
-    "wsk action update smart_body_crop action.zip --main action_handler  \\\n",
-    "    --param model_url \"$1\" \\\n",
-    "    --param from_upper Eyes \\\n",
-    "    --param to_lower Hips \\\n",
-    "    --memory 3891 \\\n",
-    "    --docker adobeapiplatform/openwhisk-python3aiaction:0.11.0"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 33,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# !wsk action get smart_body_crop"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 4. Invoke the function"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 34,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32mok:\u001b[0m invoked /\u001b[1m_\u001b[0m/\u001b[1msmart_body_crop\u001b[0m with id \u001b[1m6c1536170686492a9536170686692a59\u001b[0m\r\n"
-     ]
-    }
-   ],
-   "source": [
-    "!wsk action invoke smart_body_crop --param image \"https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\" \\\n",
-    "  --param from_upper Eyes --param to_lower Elbows\n",
-    "# !wsk action invoke smart_body_crop --param image \"https://i.pinimg.com/236x/17/1c/a6/171ca6b06111529aa6f10b1f4e418339--style-men-my-style.jpg\" \\\n",
-    "#   --param from_upper Eyes --param to_lower Elbows"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Note on first run \n",
-    "On initial run the function has to:\n",
-    "* download the model\n",
-    "* initialize tensorflow \n",
-    "\n",
-    "These steps will take a few seconds."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 35,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32mok:\u001b[0m got activation \u001b[1m6c1536170686492a9536170686692a59\u001b[0m\r\n",
-      "{\r\n",
-      "    \"namespace\": \"bladerunner-test\",\r\n",
-      "    \"name\": \"smart_body_crop\",\r\n",
-      "    \"version\": \"0.0.20\",\r\n",
-      "    \"subject\": \"bladerunner-test\",\r\n",
-      "    \"activationId\": \"6c1536170686492a9536170686692a59\",\r\n",
-      "    \"start\": 1545333346300,\r\n",
-      "    \"end\": 1545333353046,\r\n",
-      "    \"duration\": 6746,\r\n",
-      "    \"response\": {\r\n",
-      "        \"status\": \"success\",\r\n",
-      "        \"statusCode\": 0,\r\n",
-      "        \"success\": true,\r\n",
-      "        \"result\": {\r\n",
-      "            \"H\": 1028.0217391304348,\r\n",
-      "            \"W\": 1031.5294117647059,\r\n",
-      "            \"X\": 550.9304812834225,\r\n",
-      "            \"Y\": 467.2826086956522\r\n",
-      "        }\r\n",
-      "    },\r\n",
-      "    \"logs\": [],\r\n",
-      "    \"annotations\": [\r\n",
-      "        {\r\n",
-      "            \"key\": \"path\",\r\n",
-      "            \"value\": \"bladerunner-test/smart_body_crop\"\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"waitTime\",\r\n",
-      "            \"value\": 2787\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"kind\",\r\n",
-      "            \"value\": \"blackbox\"\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"limits\",\r\n",
-      "            \"value\": {\r\n",
-      "                \"concurrency\": 1,\r\n",
-      "                \"logs\": 10,\r\n",
-      "                \"memory\": 3891,\r\n",
-      "                \"timeout\": 60000\r\n",
-      "            }\r\n",
-      "        },\r\n",
-      "        {\r\n",
-      "            \"key\": \"initTime\",\r\n",
-      "            \"value\": 19\r\n",
-      "        }\r\n",
-      "    ],\r\n",
-      "    \"publish\": false\r\n",
-      "}\r\n"
-     ]
-    }
-   ],
-   "source": [
-    "!wsk activation get 6c1536170686492a9536170686692a59"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Invoke the action again\n",
-    "\n",
-    "This time it should respond much faster as it has been pre-warmed."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 36,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32mok:\u001b[0m invoked /\u001b[1m_\u001b[0m/\u001b[1msmart_body_crop\u001b[0m with id \u001b[1mfd729b5d47e6415ab29b5d47e6915aa9\u001b[0m\n",
-      "{\n",
-      "    \"activationId\": \"fd729b5d47e6415ab29b5d47e6915aa9\",\n",
-      "    \"annotations\": [\n",
-      "        {\n",
-      "            \"key\": \"limits\",\n",
-      "            \"value\": {\n",
-      "                \"concurrency\": 1,\n",
-      "                \"logs\": 10,\n",
-      "                \"memory\": 3891,\n",
-      "                \"timeout\": 60000\n",
-      "            }\n",
-      "        },\n",
-      "        {\n",
-      "            \"key\": \"path\",\n",
-      "            \"value\": \"bladerunner-test/smart_body_crop\"\n",
-      "        },\n",
-      "        {\n",
-      "            \"key\": \"kind\",\n",
-      "            \"value\": \"blackbox\"\n",
-      "        },\n",
-      "        {\n",
-      "            \"key\": \"waitTime\",\n",
-      "            \"value\": 6\n",
-      "        }\n",
-      "    ],\n",
-      "    \"duration\": 2160,\n",
-      "    \"end\": 1545333364902,\n",
-      "    \"logs\": [],\n",
-      "    \"name\": \"smart_body_crop\",\n",
-      "    \"namespace\": \"bladerunner-test\",\n",
-      "    \"publish\": false,\n",
-      "    \"response\": {\n",
-      "        \"result\": {\n",
-      "            \"H\": 1028.0217391304348,\n",
-      "            \"W\": 1031.5294117647059,\n",
-      "            \"X\": 550.9304812834225,\n",
-      "            \"Y\": 467.2826086956522\n",
-      "        },\n",
-      "        \"status\": \"success\",\n",
-      "        \"success\": true\n",
-      "    },\n",
-      "    \"start\": 1545333362742,\n",
-      "    \"subject\": \"bladerunner-test\",\n",
-      "    \"version\": \"0.0.20\"\n",
-      "}\n",
-      "CPU times: user 100 ms, sys: 140 ms, total: 240 ms\n",
-      "Wall time: 3.42 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "!wsk action invoke smart_body_crop --param image \"https://i.pinimg.com/originals/9c/96/87/9c968732595e965619ef7b0b7e4807e0.jpg\" \\\n",
-    "  --param from_upper Eyes --param to_lower Elbows -b"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/core/python36AiAction/samples/smart-body-crop/fashion-men-1.jpg b/core/python36AiAction/samples/smart-body-crop/fashion-men-1.jpg
deleted file mode 100644
index 8d440e3..0000000
Binary files a/core/python36AiAction/samples/smart-body-crop/fashion-men-1.jpg and /dev/null differ
diff --git a/core/python36AiAction/samples/smart-body-crop/inference.py b/core/python36AiAction/samples/smart-body-crop/inference.py
deleted file mode 100644
index dbb5f87..0000000
--- a/core/python36AiAction/samples/smart-body-crop/inference.py
+++ /dev/null
@@ -1,246 +0,0 @@
-"""Executable Python script for running Python actions.
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-"""
-
-'''
-Some is based on Ildoo Kim's code (https://github.com/ildoonet/tf-openpose) and https://gist.github.com/alesolano/b073d8ec9603246f766f9f15d002f4f4
-and derived from the OpenPose Library (https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/LICENSE)
-'''
-
-import tensorflow as tf
-import numpy as np
-from PIL import Image
-from tensorflow.core.framework import graph_pb2
-import urllib3
-import certifi
-import os
-import shutil
-
-from common import estimate_pose, crop_image, draw_humans
-
-import time
-
-
-def print_time(message, start):
-    print(message, "{:10.4f}".format(time.time() - start))
-    return time.time()
-
-
-class SmartBodyCrop:
-    initialized = False
-    tmp_path = '/tmp/'
-    tmpfs_path = '/mnt/action/'
-
-    def __init__(self, model_url):
-        self.model_url = model_url
-
-    def read_img(self, imgpath, width, height):
-        img = Image.open(imgpath)
-        orig_width, orig_height = img.size
-        # resize the image to match openpose's training data
-        # https://github.com/ildoonet/tf-pose-estimation#inference-time
-        img.thumbnail((width, height))
-        thumbnail_w, thumbnail_h = img.size
-        #val_img = val_img.resize((width, height))
-        val_img = np.asarray(img, dtype=np.float32)
-        val_img = val_img.reshape([1, thumbnail_h, thumbnail_w, 3])
-        # val_img = val_img.astype(float)
-        val_img = val_img * (2.0 / 255.0) - 1.0  # normalization
-
-        return val_img, img, orig_width, orig_height
-
-    def _download_model(self):
-        # check if the model is a ref to local file path
-        if type(self.model_url) is str:
-            if not self.model_url.startswith('http'):
-                return self.model_url
-
-        start = time.time()
-        local_model_path = SmartBodyCrop.tmp_path + 'optimized_openpose.pb'
-        tmpfs_model_path = SmartBodyCrop.tmpfs_path + 'optimized_openpose.pb'
-
-        if (os.path.isfile(local_model_path)):
-            print_time("model was found in the local storage: " +
-                       local_model_path, start)
-            return local_model_path
-
-        # check if this model was downloaded by another invocation in the tmpfs path
-        if (os.path.isfile(tmpfs_model_path)):
-            print_time("model was found in the tmpfs storage: " +
-                       tmpfs_model_path, start)
-            shutil.copy(tmpfs_model_path, local_model_path)
-            print_time("model copied FROM tmpfs:" + tmpfs_model_path, start)
-            return local_model_path
-
-        http = urllib3.PoolManager(
-            cert_reqs='CERT_REQUIRED',
-            ca_certs=certifi.where(),
-            headers={
-                'Accept': 'application/octet-stream',
-                'Content-Type': 'application/octet-stream'
-            })
-        urllib3.disable_warnings()
-
-        r = http.request('GET', self.model_url,
-                         preload_content=False,
-                         retries=urllib3.Retry(5, redirect=5))
-
-        with open(local_model_path, 'wb') as out:
-            while True:
-                data = r.read(8192)  # 64 # 8192
-                if not data:
-                    break
-                out.write(data)
-
-        r.release_conn()
-        print_time("model downloaded in :", start)
-
-        # copy the file to the tmpfs_model_path to be reused by other actions
-        # this seems to work concurrently as per: https://stackoverflow.com/questions/35605463/why-is-concurrent-copy-of-a-file-not-failing
-        if (os.path.isdir(SmartBodyCrop.tmpfs_path)):
-            shutil.copy(local_model_path, tmpfs_model_path)
-            print_time("model copied to tmpfs:" + tmpfs_model_path, start)
-
-        return local_model_path
-
-    def _download_image(self, image):
-        start = time.time()
-        headers = {}
-        image_url = image
-        local_image_path = SmartBodyCrop.tmp_path + 'image'
-        if type(image) is dict:
-            headers = image.get('headers')
-            image_url = image.get('uri')
-        # check if the image is a local file path
-        if type(image) is str:
-            if not image.startswith('http'):
-                return image
-
-        http = urllib3.PoolManager(
-            cert_reqs='CERT_REQUIRED',
-            ca_certs=certifi.where(),
-            headers=headers)
-        urllib3.disable_warnings()
-
-        r = http.request('GET', image_url,
-                         preload_content=False,
-                         retries=urllib3.Retry(5, redirect=5))
-
-        with open(local_image_path, 'wb') as out:
-            while True:
-                data = r.read(1024)  # 8192
-                if not data:
-                    break
-                out.write(data)
-
-        r.release_conn()
-        print_time("image downloaded in :", start)
-        return local_image_path
-
-    def load_graph_def(self):
-        start = time.time()
-
-        local_model_path = self._download_model()
-
-        tf.reset_default_graph()
-        graph_def = graph_pb2.GraphDef()
-        with open(local_model_path, 'rb') as f:
-            graph_def.ParseFromString(f.read())
-        tf.import_graph_def(graph_def, name='')
-
-        start = print_time("model imported in :", start)
-        start = time.time()
-
-        # SmartBodyCrop.initialized = True
-
-    def infer(self, image, upper_body, lower_body):
-        start = time.time()
-
-        imgpath = self._download_image(image)
-        image, thumbnail, input_width, input_height = self.read_img(
-            imgpath, 368, 368)
-        start = print_time("image (" + str(input_width) +
-                           "x" + str(input_height) + ") loaded in: ", start)
-
-        if not SmartBodyCrop.initialized:
-            print("Loading the model...")
-            self.load_graph_def()
-
-        with tf.Session() as sess:
-            inputs = tf.get_default_graph().get_tensor_by_name('inputs:0')
-            heatmaps_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L2/BiasAdd:0')
-            pafs_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L1/BiasAdd:0')
-
-            heatMat, pafMat = sess.run(
-                [heatmaps_tensor, pafs_tensor], feed_dict={inputs: image})
-
-            start = print_time("tf session executed in: ", start)
-
-            humans = estimate_pose(heatMat[0], pafMat[0])
-            start = print_time("pose estimated in: ", start)
-            # send the thumbnail to render an initial crop
-            img, crop_position, crop_size = crop_image(
-                thumbnail, humans, upper_body, lower_body)
-            # scale back the crop_coordinates to match the original picture size
-            scale_factor_w = input_width / thumbnail.size[0]
-            scale_factor_h = input_height / thumbnail.size[1]
-            crop_coordinates = {
-                'x':      crop_position[0] * scale_factor_w,
-                'y':      crop_position[1] * scale_factor_h,
-                'width':  crop_size[0] * scale_factor_w,
-                'height': crop_size[1] * scale_factor_h
-            }
-
-            start = print_time("image cropped in: ", start)
-
-            sess.close()
-            return img, crop_coordinates, imgpath
-
-    def detect_parts(self, image):
-        start = time.time()
-
-        imgpath = self._download_image(image)
-        image, thumbnail, input_width, input_height = self.read_img(
-            imgpath, 368, 368)
-        start = print_time("image loaded in: ", start)
-
-        if not SmartBodyCrop.initialized:
-            print("Loading the model...")
-            self.load_graph_def()
-
-        with tf.Session() as sess:
-            inputs = tf.get_default_graph().get_tensor_by_name('inputs:0')
-            heatmaps_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L2/BiasAdd:0')
-            pafs_tensor = tf.get_default_graph().get_tensor_by_name(
-                'Mconv7_stage6_L1/BiasAdd:0')
-
-            heatMat, pafMat = sess.run(
-                [heatmaps_tensor, pafs_tensor], feed_dict={inputs: image})
-
-            start = print_time("tf session executed in: ", start)
-
-            humans = estimate_pose(heatMat[0], pafMat[0])
-            start = print_time("pose estimated in: ", start)
-
-            # display
-            img1 = draw_humans(thumbnail, humans)
-            return img1
diff --git a/settings.gradle b/settings.gradle
index 55fd2e4..2976fbe 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -18,7 +18,6 @@
 include 'tests'
 
 include 'core:python3Action'
-include 'core:python36AiAction'
 include 'core:python39Action'
 include 'core:python310Action'
 include 'core:python311Action'
diff --git a/tests/src/test/scala/runtime/actionContainers/Python36AiTests.scala b/tests/src/test/scala/runtime/actionContainers/Python36AiTests.scala
deleted file mode 100644
index 931489f..0000000
--- a/tests/src/test/scala/runtime/actionContainers/Python36AiTests.scala
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package runtime.actionContainers
-
-import org.junit.runner.RunWith
-import org.scalatest.junit.JUnitRunner
-import spray.json._
-import DefaultJsonProtocol._
-
-@RunWith(classOf[JUnitRunner])
-class Python36AiTests extends Python37Tests {
-
-  override lazy val imageName = "action-python-v3.6-ai"
-
-  override lazy val zipPrefix = "python-v3.6-ai"
-
-  override lazy val errorCodeOnRun = false
-
-  override val testNoSource = TestConfig("", hasCodeStub = false)
-
-  it should "run tensorflow" in {
-    val (out, err) = withActionContainer() { c =>
-      val code =
-        """
-          |import tensorflow as tf
-          |def main(args):
-          |   # Initialize two constants
-          |   x1 = tf.constant([1,2,3,4])
-          |   x2 = tf.constant([5,6,7,8])
-          |
-          |   # Multiply
-          |   result = tf.multiply(x1, x2)
-          |
-          |   # Initialize Session and run `result`
-          |   with tf.Session() as sess:
-          |       output = sess.run(result)
-          |       print(output)
-          |       return { "response": output.tolist() }
-        """.stripMargin
-
-      val (initCode, res) = c.init(initPayload(code))
-      initCode should be(200)
-
-      val (runCode, runRes) = c.run(runPayload(JsObject()))
-      runCode should be(200)
-
-      runRes shouldBe defined
-      runRes should be(Some(JsObject("response" -> List(5, 12, 21, 32).toJson)))
-    }
-  }
-
-  it should "run pytorch" in {
-    val (out, err) = withActionContainer() { c =>
-      val code =
-        """
-          |import torch
-          |import torchvision
-          |import torch.nn as nn
-          |import numpy as np
-          |import torchvision.transforms as transforms
-          |def main(args):
-          |   # Create a numpy array.
-          |   x = np.array([1,2,3,4])
-          |
-          |   # Convert the numpy array to a torch tensor.
-          |   y = torch.from_numpy(x)
-          |
-          |   # Convert the torch tensor to a numpy array.
-          |   z = y.numpy()
-          |   return { "response": z.tolist()}
-        """.stripMargin
-
-      val (initCode, res) = c.init(initPayload(code))
-      initCode should be(200)
-
-      val (runCode, runRes) = c.run(runPayload(JsObject()))
-      runCode should be(200)
-
-      runRes shouldBe defined
-      runRes should be(Some(JsObject("response" -> List(1, 2, 3, 4).toJson)))
-    }
-  }
-
-  it should "support numpy" in {
-    val (out, err) = withActionContainer() { c =>
-      val code =
-        """
-          |import numpy as np
-          |def main(args):
-          |   a = np.arange(15).reshape(3, 5).tolist()
-          |   return { "array": a }
-        """.stripMargin
-
-      // action loop detects those errors at init time
-      val (initCode, initRes) = c.init(initPayload(code))
-      initCode should be(200)
-
-      val (runCode, runRes) = c.run(runPayload(JsObject()))
-      runCode should be(200)
-      runRes.get.fields.get("array") should not be empty
-    }
-
-    checkStreams(out, err, {
-      case (o, e) =>
-        o shouldBe empty
-        e shouldBe empty
-    })
-  }
-
-  it should "detect numpy failures" in {
-    val (out, err) = withActionContainer() { c =>
-      val code =
-        """
-          |import numpy as np
-          |def main(args):
-          |   a = np.arange(15).reshape(3, 5)
-          |   return { "array": a }
-        """.stripMargin
-
-      // action loop detects those errors at init time
-      val (initCode, initRes) = c.init(initPayload(code))
-      initCode should be(200)
-
-      val (runCode, _) = c.run(runPayload(JsObject()))
-      runCode should be(400)
-    }
-
-    checkStreams(out, err, {
-      case (o, e) =>
-        o shouldBe empty
-        e should include("Object of type 'ndarray' is not JSON serializable")
-    })
-  }
-}
diff --git a/tutorials/local_build.md b/tutorials/local_build.md
index 51e0655..ab0c33f 100644
--- a/tutorials/local_build.md
+++ b/tutorials/local_build.md
@@ -34,7 +34,7 @@ cd openwhisk-runtime-python
 
 Build docker image using Python 3.7 (recommended). This tutorial assumes you're building with python 3.7.
 Run `local_build.sh` to build docker. This script takes two parameters as input
-- `-r` Specific runtime image folder name to be built, it can be one of `python3Action`, `python36AiAction`, `python39Action` or `python310Action`
+- `-r` Specific runtime image folder name to be built, it can be one of `python3Action`, `python39Action`, `python310Action`, or `python311Action`
 - `-t` The name for docker image and tag used for building the docker image. Example: `action-python-v3.7:1.0-SNAPSHOT`
 
 ```
diff --git a/tutorials/local_build.sh b/tutorials/local_build.sh
index 3dd334c..d6f16ea 100755
--- a/tutorials/local_build.sh
+++ b/tutorials/local_build.sh
@@ -20,7 +20,7 @@ helperInstructions()
 {
    echo ""
    echo "Usage: $0 -r runtimeParameter -t dockerImageTag"
-   echo -e "\t-r Specific runtime image folder name to be built, it can be one of python3Action, python36AiAction, python39Action or python310Action"
+   echo -e "\t-r Specific runtime image folder name to be built, it can be one of python3Action, python39Action, python310Action, or python311Action"
    echo -e "\t-t The name for docker image and tag used for building the docker image. Example: action-python-v3.7:1.0-SNAPSHOT"
    exit 1 #Exit script
 }
@@ -35,7 +35,7 @@ do
 done
 
 # Print helperInstructions in case parameters are empty
-if [ -z "$runtimeParameter" ] || [ -z "$dockerImageTag" ] || ( [[ "$runtimeParameter" != "python3Action" ]] && [[ "$runtimeParameter" != "python36AiAction" ]] && [[ "$runtimeParameter" != "python39Action" ]] && [[ "$runtimeParameter" != "python310Action" ]] )
+if [ -z "$runtimeParameter" ] || [ -z "$dockerImageTag" ] || ( [[ "$runtimeParameter" != "python3Action" ]] && [[ "$runtimeParameter" != "python39Action" ]] && [[ "$runtimeParameter" != "python310Action" ]] && [[ "$runtimeParameter" != "python311Action" ]] )
  then
    echo "Runtime parameter is empty or not supported";
    helperInstructions
@@ -48,16 +48,6 @@ if [[ "$runtimeParameter" == "python3Action" ]]
     cp $(pwd)/core/requirements_common.txt $(pwd)/core/python3Action/requirements_common.txt
     docker build -t "$dockerImageTag" $(pwd)/core/python3Action
     rm $(pwd)/core/python3Action/requirements_common.txt
-elif [[ "$runtimeParameter" == "python36AiAction" ]]
-  then
-    echo "Building docker for python36AiAction."
-    cp $(pwd)/core/requirements_common.txt $(pwd)/core/python36AiAction/requirements_common.txt
-    cp -r $(pwd)/core/python3Action/bin $(pwd)/core/python36AiAction/bin
-    cp -r $(pwd)/core/python3Action/lib $(pwd)/core/python36AiAction/lib
-    docker build -t "$dockerImageTag" $(pwd)/core/python36AiAction
-    rm $(pwd)/core/python36AiAction/requirements_common.txt
-    rm -r $(pwd)/core/python36AiAction/bin
-    rm -r $(pwd)/core/python36AiAction/lib
 elif [[ "$runtimeParameter" == "python39Action" ]]
   then
     echo "Building docker for python39Action."
@@ -78,4 +68,14 @@ elif [[ "$runtimeParameter" == "python310Action" ]]
     rm $(pwd)/core/python310Action/requirements_common.txt
     rm -r $(pwd)/core/python310Action/bin
     rm -r $(pwd)/core/python310Action/lib
+elif [[ "$runtimeParameter" == "python311Action" ]]
+  then
+    echo "Building docker for python311Action."
+    cp $(pwd)/core/requirements_common.txt $(pwd)/core/python311Action/requirements_common.txt
+    cp -r $(pwd)/core/python3Action/bin $(pwd)/core/python311Action/bin
+    cp -r $(pwd)/core/python3Action/lib $(pwd)/core/python311Action/lib
+    docker build -t "$dockerImageTag" $(pwd)/core/python311Action
+    rm $(pwd)/core/python311Action/requirements_common.txt
+    rm -r $(pwd)/core/python311Action/bin
+    rm -r $(pwd)/core/python311Action/lib
 fi