You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@madlib.apache.org by ok...@apache.org on 2023/04/03 14:06:30 UTC

[madlib] 01/08: update: fix madlib support py3.

This is an automated email from the ASF dual-hosted git repository.

okislal pushed a commit to branch madlib2-master
in repository https://gitbox.apache.org/repos/asf/madlib.git

commit 3eaae9971d1b9f852ddec918ee77acfac5d402c7
Author: soarpenguin <so...@gmail.com>
AuthorDate: Sat Aug 7 09:17:40 2021 +0000

    update: fix madlib support py3.
---
 cmake/FindPackageHandleStandardArgs.cmake          |  606 ++
 cmake/FindPackageMessage.cmake                     |   49 +
 cmake/FindPostgreSQL_13.cmake                      |  318 +
 cmake/SelectLibraryConfigurations.cmake            |   81 +
 cmake/TestIfNoUTF8BOM.py                           |    4 +-
 .../hello_world/iterative/simple_logistic.py_in    |    2 +-
 .../hello_world/iterative/simple_logistic.sql_in   |    4 +-
 methods/array_ops/src/pg_gp/array_ops.sql_in       |    2 +-
 methods/sketch/src/pg_gp/countmin.py_in            |   10 +-
 methods/sketch/src/pg_gp/sketch.sql_in             |   12 +-
 methods/stemmer/src/pg_gp/porter_stemmer.sql_in    |    2 +-
 methods/svec_util/src/pg_gp/svec_util.sql_in       |    4 +-
 src/bin/madpack                                    |    2 +-
 src/madpack/argparse.py                            |    8 +-
 src/madpack/configyml.py                           |   53 +-
 src/madpack/create_changelist.py                   |   14 +-
 src/madpack/diff_udf.sql                           |    4 +-
 src/madpack/diff_udo.sql                           |    6 +-
 src/madpack/diff_udoc.sql                          |    6 +-
 src/madpack/diff_udt.sql                           |    6 +-
 src/madpack/madpack.py                             |   45 +-
 src/madpack/sort-module.py                         |    6 +-
 src/madpack/upgrade_util.py                        |   34 +-
 src/madpack/utilities.py                           |   20 +-
 src/madpack/yaml/__init__.py                       |   30 +-
 src/madpack/yaml/composer.py                       |   12 +-
 src/madpack/yaml/constructor.py                    |   98 +-
 src/madpack/yaml/cyaml.py                          |    8 +-
 src/madpack/yaml/dumper.py                         |    8 +-
 src/madpack/yaml/emitter.py                        |  292 +-
 src/madpack/yaml/error.py                          |    4 +-
 src/madpack/yaml/loader.py                         |   12 +-
 src/madpack/yaml/parser.py                         |   24 +-
 src/madpack/yaml/reader.py                         |   22 +-
 src/madpack/yaml/representer.py                    |  138 +-
 src/madpack/yaml/resolver.py                       |   66 +-
 src/madpack/yaml/scanner.py                        |  326 +-
 src/madpack/yaml/serializer.py                     |    8 +-
 .../plpy_mock.py_in => 13/CMakeLists.txt}          |   29 +-
 .../SQLCommon.m4_in => 13/madpack/SQLCommon.m4}    |   62 +-
 src/ports/postgres/CMakeLists.txt                  |   18 +-
 .../dbconnector/SystemInformation_impl.hpp         |    7 +-
 src/ports/postgres/extension/madlib.control_in     |    4 +-
 src/ports/postgres/madpack/SQLCommon.m4_in         |    2 +-
 .../modules/assoc_rules/assoc_rules.sql_in         |   12 +-
 src/ports/postgres/modules/bayes/bayes.py_in       |   40 +-
 src/ports/postgres/modules/bayes/bayes.sql_in      |   20 +-
 src/ports/postgres/modules/convex/lmf.sql_in       |    2 +-
 src/ports/postgres/modules/convex/mlp.sql_in       |   12 +-
 src/ports/postgres/modules/convex/mlp_igd.py_in    |    9 +-
 .../modules/convex/test/unit_tests/plpy_mock.py_in |    2 +-
 src/ports/postgres/modules/crf/crf.sql_in          |    4 +-
 .../postgres/modules/crf/crf_data_loader.sql_in    |   10 +-
 .../postgres/modules/crf/crf_feature_gen.sql_in    |    4 +-
 src/ports/postgres/modules/crf/viterbi.sql_in      |    4 +-
 src/ports/postgres/modules/dbscan/dbscan.sql_in    |   30 +-
 .../modules/dbscan/test/unit_tests/plpy_mock.py_in |    2 +-
 .../modules/deep_learning/gpu_info_from_tf.py_in   |    4 +-
 .../deep_learning/input_data_preprocessor.py_in    |   13 +-
 .../deep_learning/input_data_preprocessor.sql_in   |   16 +-
 .../deep_learning/keras_model_arch_table.py_in     |    4 +
 .../deep_learning/keras_model_arch_table.sql_in    |   29 +-
 .../modules/deep_learning/madlib_keras.py_in       |   43 +-
 .../modules/deep_learning/madlib_keras.sql_in      |   54 +-
 .../deep_learning/madlib_keras_automl.py_in        |   10 +-
 .../deep_learning/madlib_keras_automl.sql_in       |   15 +-
 .../madlib_keras_automl_hyperband.py_in            |   18 +-
 .../madlib_keras_automl_hyperopt.py_in             |   18 +-
 .../madlib_keras_custom_function.py_in             |   10 +
 .../madlib_keras_custom_function.sql_in            |   36 +-
 .../madlib_keras_fit_multiple_model.py_in          |   23 +-
 .../madlib_keras_fit_multiple_model.sql_in         |   10 +-
 .../deep_learning/madlib_keras_gpu_info.py_in      |    1 +
 .../deep_learning/madlib_keras_gpu_info.sql_in     |    8 +-
 .../deep_learning/madlib_keras_helper.py_in        |   24 +-
 .../madlib_keras_model_selection.py_in             |   35 +-
 .../madlib_keras_model_selection.sql_in            |   11 +-
 .../deep_learning/madlib_keras_predict.py_in       |   28 +-
 .../deep_learning/madlib_keras_serializer.py_in    |    4 +
 .../deep_learning/madlib_keras_validator.py_in     |   51 +-
 .../deep_learning/madlib_keras_wrapper.py_in       |   64 +-
 .../modules/deep_learning/model_arch_info.py_in    |    2 +-
 .../deep_learning/predict_input_params.py_in       |   15 +-
 .../test/keras_model_arch_table.sql_in             |    2 +-
 .../test/madlib_keras_custom_function.setup.sql_in |    8 +-
 .../deep_learning/test/madlib_keras_fit.sql_in     |    2 +-
 .../test/madlib_keras_fit_multiple.sql_in          |   20 +-
 .../test/madlib_keras_transfer_learning.sql_in     |    2 +-
 .../postgres/modules/elastic_net/elastic_net.py_in |   22 +-
 .../modules/elastic_net/elastic_net.sql_in         |    8 +-
 .../elastic_net/elastic_net_generate_result.py_in  |    6 +-
 .../modules/elastic_net/elastic_net_models.py_in   |    4 +-
 .../elastic_net/elastic_net_optimizer_fista.py_in  |   18 +-
 .../elastic_net/elastic_net_optimizer_igd.py_in    |   22 +-
 src/ports/postgres/modules/glm/glm.py_in           |    8 +-
 src/ports/postgres/modules/glm/glm.sql_in          |   10 +-
 src/ports/postgres/modules/glm/multinom.py_in      |    6 +-
 .../postgres/modules/glm/multiresponseglm.sql_in   |    8 +-
 src/ports/postgres/modules/glm/ordinal.py_in       |    6 +-
 src/ports/postgres/modules/glm/ordinal.sql_in      |    8 +-
 src/ports/postgres/modules/graph/apsp.py_in        |    6 +-
 src/ports/postgres/modules/graph/apsp.sql_in       |   16 +-
 src/ports/postgres/modules/graph/bfs.py_in         |    4 +-
 src/ports/postgres/modules/graph/bfs.sql_in        |   16 +-
 src/ports/postgres/modules/graph/graph_utils.py_in |    7 +-
 src/ports/postgres/modules/graph/hits.py_in        |   10 +-
 src/ports/postgres/modules/graph/hits.sql_in       |    8 +-
 src/ports/postgres/modules/graph/measures.py_in    |    2 +-
 src/ports/postgres/modules/graph/measures.sql_in   |   32 +-
 src/ports/postgres/modules/graph/pagerank.py_in    |   17 +-
 src/ports/postgres/modules/graph/pagerank.sql_in   |   10 +-
 src/ports/postgres/modules/graph/sssp.py_in        |    6 +-
 src/ports/postgres/modules/graph/sssp.sql_in       |   14 +-
 src/ports/postgres/modules/graph/wcc.py_in         |    8 +-
 src/ports/postgres/modules/graph/wcc.sql_in        |   32 +-
 src/ports/postgres/modules/kmeans/kmeans.sql_in    |   48 +-
 src/ports/postgres/modules/knn/knn.sql_in          |    8 +-
 src/ports/postgres/modules/lda/lda.py_in           |   18 +-
 src/ports/postgres/modules/lda/lda.sql_in          |   28 +-
 .../modules/linalg/matrix_help_message.py_in       |    2 +-
 src/ports/postgres/modules/linalg/matrix_ops.py_in |   14 +-
 .../postgres/modules/linalg/matrix_ops.sql_in      |  144 +-
 src/ports/postgres/modules/linalg/svd.py_in        |   10 +-
 src/ports/postgres/modules/linalg/svd.sql_in       |   18 +-
 .../linear_systems/dense_linear_systems.py_in      |    4 +-
 .../linear_systems/dense_linear_systems.sql_in     |    4 +-
 .../linear_systems/sparse_linear_systems.py_in     |    8 +-
 .../linear_systems/sparse_linear_systems.sql_in    |    4 +-
 src/ports/postgres/modules/pca/pca.py_in           |    4 +-
 src/ports/postgres/modules/pca/pca.sql_in          |   10 +-
 src/ports/postgres/modules/pca/pca_project.py_in   |    6 +-
 src/ports/postgres/modules/pca/pca_project.sql_in  |   10 +-
 src/ports/postgres/modules/pmml/binding.py_in      | 7498 ++++++++++----------
 src/ports/postgres/modules/pmml/pmml_builder.py_in |   28 +-
 .../postgres/modules/pmml/table_to_pmml.py_in      |   12 +-
 .../postgres/modules/pmml/table_to_pmml.sql_in     |    8 +-
 .../recursive_partitioning/decision_tree.py_in     |   44 +-
 .../recursive_partitioning/decision_tree.sql_in    |   30 +-
 .../recursive_partitioning/random_forest.py_in     |   36 +-
 .../recursive_partitioning/random_forest.sql_in    |   20 +-
 .../modules/regress/clustered_variance.sql_in      |   18 +-
 src/ports/postgres/modules/regress/linear.sql_in   |    6 +-
 src/ports/postgres/modules/regress/logistic.sql_in |    8 +-
 src/ports/postgres/modules/regress/marginal.py_in  |    8 +-
 src/ports/postgres/modules/regress/marginal.sql_in |   14 +-
 src/ports/postgres/modules/regress/margins.py_in   |    6 +-
 .../postgres/modules/regress/margins_builder.py_in |   50 +-
 .../postgres/modules/regress/multilogistic.sql_in  |   14 +-
 src/ports/postgres/modules/regress/robust.sql_in   |   14 +-
 .../postgres/modules/sample/balance_sample.py_in   |   30 +-
 .../postgres/modules/sample/balance_sample.sql_in  |   20 +-
 .../modules/sample/stratified_sample.sql_in        |   16 +-
 .../modules/sample/train_test_split.sql_in         |   20 +-
 .../modules/stats/clustered_variance_coxph.sql_in  |    6 +-
 src/ports/postgres/modules/stats/correlation.py_in |    5 +-
 .../postgres/modules/stats/correlation.sql_in      |   12 +-
 .../postgres/modules/stats/cox_prop_hazards.sql_in |   16 +-
 .../postgres/modules/stats/pred_metrics.sql_in     |   36 +-
 .../modules/stats/robust_variance_coxph.sql_in     |    4 +-
 .../postgres/modules/summary/Summarizer.py_in      |    2 +-
 src/ports/postgres/modules/summary/summary.py_in   |    2 +-
 src/ports/postgres/modules/summary/summary.sql_in  |    6 +-
 .../modules/svm/kernel_approximation.py_in         |    4 +-
 src/ports/postgres/modules/svm/svm.py_in           |   18 +-
 src/ports/postgres/modules/svm/svm.sql_in          |   18 +-
 src/ports/postgres/modules/tsa/arima.py_in         |    2 +-
 src/ports/postgres/modules/tsa/arima.sql_in        |   12 +-
 .../postgres/modules/tsa/arima_forecast.py_in      |   12 +-
 src/ports/postgres/modules/utilities/admin.py_in   |    2 +-
 .../postgres/modules/utilities/cols2vec.sql_in     |    4 +-
 src/ports/postgres/modules/utilities/control.py_in |    4 +-
 .../modules/utilities/control_composite.py_in      |    6 +-
 .../modules/utilities/create_indicators.py_in      |   20 +-
 .../modules/utilities/create_indicators.sql_in     |    6 +-
 .../modules/utilities/encode_categorical.py_in     |   22 +-
 .../modules/utilities/encode_categorical.sql_in    |   20 +-
 .../postgres/modules/utilities/group_control.py_in |    6 +-
 .../modules/utilities/in_mem_group_control.py_in   |   37 +-
 .../utilities/mean_std_dev_calculator.py_in        |    2 +-
 .../utilities/minibatch_preprocessing.py_in        |   36 +-
 .../utilities/minibatch_preprocessing.sql_in       |    6 +-
 .../modules/utilities/minibatch_validation.py_in   |    8 +-
 src/ports/postgres/modules/utilities/path.py_in    |   17 +-
 src/ports/postgres/modules/utilities/path.sql_in   |    4 +-
 src/ports/postgres/modules/utilities/pivot.py_in   |   26 +-
 src/ports/postgres/modules/utilities/pivot.sql_in  |   24 +-
 .../postgres/modules/utilities/sessionize.py_in    |    8 +-
 .../postgres/modules/utilities/sessionize.sql_in   |    4 +-
 .../modules/utilities/text_utilities.py_in         |    4 +-
 .../modules/utilities/text_utilities.sql_in        |    2 +-
 .../modules/utilities/transform_vec_cols.py_in     |   30 +-
 .../postgres/modules/utilities/utilities.py_in     |   30 +-
 .../postgres/modules/utilities/utilities.sql_in    |   22 +-
 .../postgres/modules/utilities/validate_args.py_in |    9 +-
 .../postgres/modules/utilities/vec2cols.sql_in     |    4 +-
 .../modules/validation/cross_validation.sql_in     |   10 +-
 .../validation/internal/cross_validation.py_in     |   12 +-
 197 files changed, 6735 insertions(+), 5558 deletions(-)

diff --git a/cmake/FindPackageHandleStandardArgs.cmake b/cmake/FindPackageHandleStandardArgs.cmake
new file mode 100644
index 00000000..54129d64
--- /dev/null
+++ b/cmake/FindPackageHandleStandardArgs.cmake
@@ -0,0 +1,606 @@
+# Distributed under the OSI-approved BSD 3-Clause License.  See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#[=======================================================================[.rst:
+FindPackageHandleStandardArgs
+-----------------------------
+
+This module provides functions intended to be used in :ref:`Find Modules`
+implementing :command:`find_package(<PackageName>)` calls.
+
+.. command:: find_package_handle_standard_args
+
+  This command handles the ``REQUIRED``, ``QUIET`` and version-related
+  arguments of :command:`find_package`.  It also sets the
+  ``<PackageName>_FOUND`` variable.  The package is considered found if all
+  variables listed contain valid results, e.g. valid filepaths.
+
+  There are two signatures:
+
+  .. code-block:: cmake
+
+    find_package_handle_standard_args(<PackageName>
+      (DEFAULT_MSG|<custom-failure-message>)
+      <required-var>...
+      )
+
+    find_package_handle_standard_args(<PackageName>
+      [FOUND_VAR <result-var>]
+      [REQUIRED_VARS <required-var>...]
+      [VERSION_VAR <version-var>]
+      [HANDLE_VERSION_RANGE]
+      [HANDLE_COMPONENTS]
+      [CONFIG_MODE]
+      [NAME_MISMATCHED]
+      [REASON_FAILURE_MESSAGE <reason-failure-message>]
+      [FAIL_MESSAGE <custom-failure-message>]
+      )
+
+  The ``<PackageName>_FOUND`` variable will be set to ``TRUE`` if all
+  the variables ``<required-var>...`` are valid and any optional
+  constraints are satisfied, and ``FALSE`` otherwise.  A success or
+  failure message may be displayed based on the results and on
+  whether the ``REQUIRED`` and/or ``QUIET`` option was given to
+  the :command:`find_package` call.
+
+  The options are:
+
+  ``(DEFAULT_MSG|<custom-failure-message>)``
+    In the simple signature this specifies the failure message.
+    Use ``DEFAULT_MSG`` to ask for a default message to be computed
+    (recommended).  Not valid in the full signature.
+
+  ``FOUND_VAR <result-var>``
+    .. deprecated:: 3.3
+
+    Specifies either ``<PackageName>_FOUND`` or
+    ``<PACKAGENAME>_FOUND`` as the result variable.  This exists only
+    for compatibility with older versions of CMake and is now ignored.
+    Result variables of both names are always set for compatibility.
+
+  ``REQUIRED_VARS <required-var>...``
+    Specify the variables which are required for this package.
+    These may be named in the generated failure message asking the
+    user to set the missing variable values.  Therefore these should
+    typically be cache entries such as ``FOO_LIBRARY`` and not output
+    variables like ``FOO_LIBRARIES``.
+
+    .. versionchanged:: 3.18
+      If ``HANDLE_COMPONENTS`` is specified, this option can be omitted.
+
+  ``VERSION_VAR <version-var>``
+    Specify the name of a variable that holds the version of the package
+    that has been found.  This version will be checked against the
+    (potentially) specified required version given to the
+    :command:`find_package` call, including its ``EXACT`` option.
+    The default messages include information about the required
+    version and the version which has been actually found, both
+    if the version is ok or not.
+
+  ``HANDLE_VERSION_RANGE``
+    .. versionadded:: 3.19
+
+    Enable handling of a version range, if one is specified. Without this
+    option, a developer warning will be displayed if a version range is
+    specified.
+
+  ``HANDLE_COMPONENTS``
+    Enable handling of package components.  In this case, the command
+    will report which components have been found and which are missing,
+    and the ``<PackageName>_FOUND`` variable will be set to ``FALSE``
+    if any of the required components (i.e. not the ones listed after
+    the ``OPTIONAL_COMPONENTS`` option of :command:`find_package`) are
+    missing.
+
+  ``CONFIG_MODE``
+    Specify that the calling find module is a wrapper around a
+    call to ``find_package(<PackageName> NO_MODULE)``.  This implies
+    a ``VERSION_VAR`` value of ``<PackageName>_VERSION``.  The command
+    will automatically check whether the package configuration file
+    was found.
+
+  ``REASON_FAILURE_MESSAGE <reason-failure-message>``
+    .. versionadded:: 3.16
+
+    Specify a custom message of the reason for the failure which will be
+    appended to the default generated message.
+
+  ``FAIL_MESSAGE <custom-failure-message>``
+    Specify a custom failure message instead of using the default
+    generated message.  Not recommended.
+
+  ``NAME_MISMATCHED``
+    .. versionadded:: 3.17
+
+    Indicate that the ``<PackageName>`` does not match
+    ``${CMAKE_FIND_PACKAGE_NAME}``. This is usually a mistake and raises a
+    warning, but it may be intentional for usage of the command for components
+    of a larger package.
+
+Example for the simple signature:
+
+.. code-block:: cmake
+
+  find_package_handle_standard_args(LibXml2 DEFAULT_MSG
+    LIBXML2_LIBRARY LIBXML2_INCLUDE_DIR)
+
+The ``LibXml2`` package is considered to be found if both
+``LIBXML2_LIBRARY`` and ``LIBXML2_INCLUDE_DIR`` are valid.
+Then also ``LibXml2_FOUND`` is set to ``TRUE``.  If it is not found
+and ``REQUIRED`` was used, it fails with a
+:command:`message(FATAL_ERROR)`, independent whether ``QUIET`` was
+used or not.  If it is found, success will be reported, including
+the content of the first ``<required-var>``.  On repeated CMake runs,
+the same message will not be printed again.
+
+.. note::
+
+  If ``<PackageName>`` does not match ``CMAKE_FIND_PACKAGE_NAME`` for the
+  calling module, a warning that there is a mismatch is given. The
+  ``FPHSA_NAME_MISMATCHED`` variable may be set to bypass the warning if using
+  the old signature and the ``NAME_MISMATCHED`` argument using the new
+  signature. To avoid forcing the caller to require newer versions of CMake for
+  usage, the variable's value will be used if defined when the
+  ``NAME_MISMATCHED`` argument is not passed for the new signature (but using
+  both is an error)..
+
+Example for the full signature:
+
+.. code-block:: cmake
+
+  find_package_handle_standard_args(LibArchive
+    REQUIRED_VARS LibArchive_LIBRARY LibArchive_INCLUDE_DIR
+    VERSION_VAR LibArchive_VERSION)
+
+In this case, the ``LibArchive`` package is considered to be found if
+both ``LibArchive_LIBRARY`` and ``LibArchive_INCLUDE_DIR`` are valid.
+Also the version of ``LibArchive`` will be checked by using the version
+contained in ``LibArchive_VERSION``.  Since no ``FAIL_MESSAGE`` is given,
+the default messages will be printed.
+
+Another example for the full signature:
+
+.. code-block:: cmake
+
+  find_package(Automoc4 QUIET NO_MODULE HINTS /opt/automoc4)
+  find_package_handle_standard_args(Automoc4  CONFIG_MODE)
+
+In this case, a ``FindAutmoc4.cmake`` module wraps a call to
+``find_package(Automoc4 NO_MODULE)`` and adds an additional search
+directory for ``automoc4``.  Then the call to
+``find_package_handle_standard_args`` produces a proper success/failure
+message.
+
+.. command:: find_package_check_version
+
+  .. versionadded:: 3.19
+
+  Helper function which can be used to check if a ``<version>`` is valid
+  against version-related arguments of :command:`find_package`.
+
+  .. code-block:: cmake
+
+    find_package_check_version(<version> <result-var>
+      [HANDLE_VERSION_RANGE]
+      [RESULT_MESSAGE_VARIABLE <message-var>]
+      )
+
+  The ``<result-var>`` will hold a boolean value giving the result of the check.
+
+  The options are:
+
+  ``HANDLE_VERSION_RANGE``
+    Enable handling of a version range, if one is specified. Without this
+    option, a developer warning will be displayed if a version range is
+    specified.
+
+  ``RESULT_MESSAGE_VARIABLE <message-var>``
+    Specify a variable to get back a message describing the result of the check.
+
+Example for the usage:
+
+.. code-block:: cmake
+
+  find_package_check_version(1.2.3 result HANDLE_VERSION_RANGE
+    RESULT_MESSAGE_VARIABLE reason)
+  if (result)
+    message (STATUS "${reason}")
+  else()
+    message (FATAL_ERROR "${reason}")
+  endif()
+#]=======================================================================]
+
+include(${CMAKE_CURRENT_LIST_DIR}/FindPackageMessage.cmake)
+
+
+cmake_policy(PUSH)
+# numbers and boolean constants
+cmake_policy (SET CMP0012 NEW)
+# IN_LIST operator
+cmake_policy (SET CMP0057 NEW)
+
+
+# internal helper macro
+macro(_FPHSA_FAILURE_MESSAGE _msg)
+  set (__msg "${_msg}")
+  if (FPHSA_REASON_FAILURE_MESSAGE)
+    string(APPEND __msg "\n    Reason given by package: ${FPHSA_REASON_FAILURE_MESSAGE}\n")
+  endif()
+  if (${_NAME}_FIND_REQUIRED)
+    message(FATAL_ERROR "${__msg}")
+  else ()
+    if (NOT ${_NAME}_FIND_QUIETLY)
+      message(STATUS "${__msg}")
+    endif ()
+  endif ()
+endmacro()
+
+
+# internal helper macro to generate the failure message when used in CONFIG_MODE:
+macro(_FPHSA_HANDLE_FAILURE_CONFIG_MODE)
+  # <PackageName>_CONFIG is set, but FOUND is false, this means that some other of the REQUIRED_VARS was not found:
+  if(${_NAME}_CONFIG)
+    _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: missing:${MISSING_VARS} (found ${${_NAME}_CONFIG} ${VERSION_MSG})")
+  else()
+    # If _CONSIDERED_CONFIGS is set, the config-file has been found, but no suitable version.
+    # List them all in the error message:
+    if(${_NAME}_CONSIDERED_CONFIGS)
+      set(configsText "")
+      list(LENGTH ${_NAME}_CONSIDERED_CONFIGS configsCount)
+      math(EXPR configsCount "${configsCount} - 1")
+      foreach(currentConfigIndex RANGE ${configsCount})
+        list(GET ${_NAME}_CONSIDERED_CONFIGS ${currentConfigIndex} filename)
+        list(GET ${_NAME}_CONSIDERED_VERSIONS ${currentConfigIndex} version)
+        string(APPEND configsText "\n    ${filename} (version ${version})")
+      endforeach()
+      if (${_NAME}_NOT_FOUND_MESSAGE)
+        if (FPHSA_REASON_FAILURE_MESSAGE)
+          string(PREPEND FPHSA_REASON_FAILURE_MESSAGE "${${_NAME}_NOT_FOUND_MESSAGE}\n    ")
+        else()
+          set(FPHSA_REASON_FAILURE_MESSAGE "${${_NAME}_NOT_FOUND_MESSAGE}")
+        endif()
+      else()
+        string(APPEND configsText "\n")
+      endif()
+      _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} ${VERSION_MSG}, checked the following files:${configsText}")
+
+    else()
+      # Simple case: No Config-file was found at all:
+      _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: found neither ${_NAME}Config.cmake nor ${_NAME_LOWER}-config.cmake ${VERSION_MSG}")
+    endif()
+  endif()
+endmacro()
+
+
+function(FIND_PACKAGE_CHECK_VERSION version result)
+  cmake_parse_arguments (PARSE_ARGV 2 FPCV "HANDLE_VERSION_RANGE;NO_AUTHOR_WARNING_VERSION_RANGE" "RESULT_MESSAGE_VARIABLE" "")
+
+  if (FPCV_UNPARSED_ARGUMENTS)
+    message (FATAL_ERROR "find_package_check_version(): ${FPCV_UNPARSED_ARGUMENTS}: unexpected arguments")
+  endif()
+  if ("RESULT_MESSAGE_VARIABLE" IN_LIST FPCV_KEYWORDS_MISSING_VALUES)
+    message (FATAL_ERROR "find_package_check_version(): RESULT_MESSAGE_VARIABLE expects an argument")
+  endif()
+
+  set (${result} FALSE PARENT_SCOPE)
+  if (FPCV_RESULT_MESSAGE_VARIABLE)
+    unset (${FPCV_RESULT_MESSAGE_VARIABLE} PARENT_SCOPE)
+  endif()
+
+  if (_CMAKE_FPHSA_PACKAGE_NAME)
+    set (package "${_CMAKE_FPHSA_PACKAGE_NAME}")
+  elseif (CMAKE_FIND_PACKAGE_NAME)
+    set (package "${CMAKE_FIND_PACKAGE_NAME}")
+  else()
+    message (FATAL_ERROR "find_package_check_version(): Cannot be used outside a 'Find Module'")
+  endif()
+
+  if (NOT FPCV_NO_AUTHOR_WARNING_VERSION_RANGE
+      AND ${package}_FIND_VERSION_RANGE AND NOT FPCV_HANDLE_VERSION_RANGE)
+    message(AUTHOR_WARNING
+      "`find_package()` specify a version range but the option "
+      "HANDLE_VERSION_RANGE` is not passed to `find_package_check_version()`. "
+      "Only the lower endpoint of the range will be used.")
+  endif()
+
+
+  set (version_ok FALSE)
+  unset (version_msg)
+
+  if (FPCV_HANDLE_VERSION_RANGE AND ${package}_FIND_VERSION_RANGE)
+    if ((${package}_FIND_VERSION_RANGE_MIN STREQUAL "INCLUDE"
+          AND version VERSION_GREATER_EQUAL ${package}_FIND_VERSION_MIN)
+        AND ((${package}_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE"
+            AND version VERSION_LESS_EQUAL ${package}_FIND_VERSION_MAX)
+          OR (${package}_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE"
+            AND version VERSION_LESS ${package}_FIND_VERSION_MAX)))
+      set (version_ok TRUE)
+      set(version_msg "(found suitable version \"${version}\", required range is \"${${package}_FIND_VERSION_RANGE}\")")
+    else()
+      set(version_msg "Found unsuitable version \"${version}\", required range is \"${${package}_FIND_VERSION_RANGE}\"")
+    endif()
+  elseif (DEFINED ${package}_FIND_VERSION)
+    if(${package}_FIND_VERSION_EXACT)       # exact version required
+      # count the dots in the version string
+      string(REGEX REPLACE "[^.]" "" version_dots "${version}")
+      # add one dot because there is one dot more than there are components
+      string(LENGTH "${version_dots}." version_dots)
+      if (version_dots GREATER ${package}_FIND_VERSION_COUNT)
+        # Because of the C++ implementation of find_package() ${package}_FIND_VERSION_COUNT
+        # is at most 4 here. Therefore a simple lookup table is used.
+        if (${package}_FIND_VERSION_COUNT EQUAL 1)
+          set(version_regex "[^.]*")
+        elseif (${package}_FIND_VERSION_COUNT EQUAL 2)
+          set(version_regex "[^.]*\\.[^.]*")
+        elseif (${package}_FIND_VERSION_COUNT EQUAL 3)
+          set(version_regex "[^.]*\\.[^.]*\\.[^.]*")
+        else()
+          set(version_regex "[^.]*\\.[^.]*\\.[^.]*\\.[^.]*")
+        endif()
+        string(REGEX REPLACE "^(${version_regex})\\..*" "\\1" version_head "${version}")
+        if (NOT ${package}_FIND_VERSION VERSION_EQUAL version_head)
+          set(version_msg "Found unsuitable version \"${version}\", but required is exact version \"${${package}_FIND_VERSION}\"")
+        else ()
+          set(version_ok TRUE)
+          set(version_msg "(found suitable exact version \"${_FOUND_VERSION}\")")
+        endif ()
+      else ()
+        if (NOT ${package}_FIND_VERSION VERSION_EQUAL version)
+          set(version_msg "Found unsuitable version \"${version}\", but required is exact version \"${${package}_FIND_VERSION}\"")
+        else ()
+          set(version_ok TRUE)
+          set(version_msg "(found suitable exact version \"${version}\")")
+        endif ()
+      endif ()
+    else()     # minimum version
+      if (${package}_FIND_VERSION VERSION_GREATER version)
+        set(version_msg "Found unsuitable version \"${version}\", but required is at least \"${${package}_FIND_VERSION}\"")
+      else()
+        set(version_ok TRUE)
+        set(version_msg "(found suitable version \"${version}\", minimum required is \"${${package}_FIND_VERSION}\")")
+      endif()
+    endif()
+  else ()
+    set(version_ok TRUE)
+    set(version_msg "(found version \"${version}\")")
+  endif()
+
+  set (${result} ${version_ok} PARENT_SCOPE)
+  if (FPCV_RESULT_MESSAGE_VARIABLE)
+    set (${FPCV_RESULT_MESSAGE_VARIABLE} "${version_msg}" PARENT_SCOPE)
+  endif()
+endfunction()
+
+
+function(FIND_PACKAGE_HANDLE_STANDARD_ARGS _NAME _FIRST_ARG)
+
+  # Set up the arguments for `cmake_parse_arguments`.
+  set(options  CONFIG_MODE  HANDLE_COMPONENTS NAME_MISMATCHED HANDLE_VERSION_RANGE)
+  set(oneValueArgs  FAIL_MESSAGE  REASON_FAILURE_MESSAGE VERSION_VAR  FOUND_VAR)
+  set(multiValueArgs REQUIRED_VARS)
+
+  # Check whether we are in 'simple' or 'extended' mode:
+  set(_KEYWORDS_FOR_EXTENDED_MODE  ${options} ${oneValueArgs} ${multiValueArgs} )
+  list(FIND _KEYWORDS_FOR_EXTENDED_MODE "${_FIRST_ARG}" INDEX)
+
+  unset(FPHSA_NAME_MISMATCHED_override)
+  if (DEFINED FPHSA_NAME_MISMATCHED)
+    # If the variable NAME_MISMATCHED variable is set, error if it is passed as
+    # an argument. The former is for old signatures, the latter is for new
+    # signatures.
+    list(FIND ARGN "NAME_MISMATCHED" name_mismatched_idx)
+    if (NOT name_mismatched_idx EQUAL "-1")
+      message(FATAL_ERROR
+        "The `NAME_MISMATCHED` argument may only be specified by the argument or "
+        "the variable, not both.")
+    endif ()
+
+    # But use the variable if it is not an argument to avoid forcing minimum
+    # CMake version bumps for calling modules.
+    set(FPHSA_NAME_MISMATCHED_override "${FPHSA_NAME_MISMATCHED}")
+  endif ()
+
+  if(${INDEX} EQUAL -1)
+    set(FPHSA_FAIL_MESSAGE ${_FIRST_ARG})
+    set(FPHSA_REQUIRED_VARS ${ARGN})
+    set(FPHSA_VERSION_VAR)
+  else()
+    cmake_parse_arguments(FPHSA "${options}" "${oneValueArgs}" "${multiValueArgs}"  ${_FIRST_ARG} ${ARGN})
+
+    if(FPHSA_UNPARSED_ARGUMENTS)
+      message(FATAL_ERROR "Unknown keywords given to FIND_PACKAGE_HANDLE_STANDARD_ARGS(): \"${FPHSA_UNPARSED_ARGUMENTS}\"")
+    endif()
+
+    if(NOT FPHSA_FAIL_MESSAGE)
+      set(FPHSA_FAIL_MESSAGE  "DEFAULT_MSG")
+    endif()
+
+    # In config-mode, we rely on the variable <PackageName>_CONFIG, which is set by find_package()
+    # when it successfully found the config-file, including version checking:
+    if(FPHSA_CONFIG_MODE)
+      list(INSERT FPHSA_REQUIRED_VARS 0 ${_NAME}_CONFIG)
+      list(REMOVE_DUPLICATES FPHSA_REQUIRED_VARS)
+      set(FPHSA_VERSION_VAR ${_NAME}_VERSION)
+    endif()
+
+    if(NOT FPHSA_REQUIRED_VARS AND NOT FPHSA_HANDLE_COMPONENTS)
+      message(FATAL_ERROR "No REQUIRED_VARS specified for FIND_PACKAGE_HANDLE_STANDARD_ARGS()")
+    endif()
+  endif()
+
+  if (DEFINED FPHSA_NAME_MISMATCHED_override)
+    set(FPHSA_NAME_MISMATCHED "${FPHSA_NAME_MISMATCHED_override}")
+  endif ()
+
+  if (DEFINED CMAKE_FIND_PACKAGE_NAME
+      AND NOT FPHSA_NAME_MISMATCHED
+      AND NOT _NAME STREQUAL CMAKE_FIND_PACKAGE_NAME)
+    message(AUTHOR_WARNING
+      "The package name passed to `find_package_handle_standard_args` "
+      "(${_NAME}) does not match the name of the calling package "
+      "(${CMAKE_FIND_PACKAGE_NAME}). This can lead to problems in calling "
+      "code that expects `find_package` result variables (e.g., `_FOUND`) "
+      "to follow a certain pattern.")
+  endif ()
+
+  if (${_NAME}_FIND_VERSION_RANGE AND NOT FPHSA_HANDLE_VERSION_RANGE)
+    message(AUTHOR_WARNING
+      "`find_package()` specify a version range but the module ${_NAME} does "
+      "not support this capability. Only the lower endpoint of the range "
+      "will be used.")
+  endif()
+
+  # to propagate package name to FIND_PACKAGE_CHECK_VERSION
+  set(_CMAKE_FPHSA_PACKAGE_NAME "${_NAME}")
+
+  # now that we collected all arguments, process them
+
+  if("x${FPHSA_FAIL_MESSAGE}" STREQUAL "xDEFAULT_MSG")
+    set(FPHSA_FAIL_MESSAGE "Could NOT find ${_NAME}")
+  endif()
+
+  if (FPHSA_REQUIRED_VARS)
+    list(GET FPHSA_REQUIRED_VARS 0 _FIRST_REQUIRED_VAR)
+  endif()
+
+  string(TOUPPER ${_NAME} _NAME_UPPER)
+  string(TOLOWER ${_NAME} _NAME_LOWER)
+
+  if(FPHSA_FOUND_VAR)
+    set(_FOUND_VAR_UPPER ${_NAME_UPPER}_FOUND)
+    set(_FOUND_VAR_MIXED ${_NAME}_FOUND)
+    if(FPHSA_FOUND_VAR STREQUAL _FOUND_VAR_MIXED  OR  FPHSA_FOUND_VAR STREQUAL _FOUND_VAR_UPPER)
+      set(_FOUND_VAR ${FPHSA_FOUND_VAR})
+    else()
+      message(FATAL_ERROR "The argument for FOUND_VAR is \"${FPHSA_FOUND_VAR}\", but only \"${_FOUND_VAR_MIXED}\" and \"${_FOUND_VAR_UPPER}\" are valid names.")
+    endif()
+  else()
+    set(_FOUND_VAR ${_NAME_UPPER}_FOUND)
+  endif()
+
+  # collect all variables which were not found, so they can be printed, so the
+  # user knows better what went wrong (#6375)
+  set(MISSING_VARS "")
+  set(DETAILS "")
+  # check if all passed variables are valid
+  set(FPHSA_FOUND_${_NAME} TRUE)
+  foreach(_CURRENT_VAR ${FPHSA_REQUIRED_VARS})
+    if(NOT ${_CURRENT_VAR})
+      set(FPHSA_FOUND_${_NAME} FALSE)
+      string(APPEND MISSING_VARS " ${_CURRENT_VAR}")
+    else()
+      string(APPEND DETAILS "[${${_CURRENT_VAR}}]")
+    endif()
+  endforeach()
+  if(FPHSA_FOUND_${_NAME})
+    set(${_NAME}_FOUND TRUE)
+    set(${_NAME_UPPER}_FOUND TRUE)
+  else()
+    set(${_NAME}_FOUND FALSE)
+    set(${_NAME_UPPER}_FOUND FALSE)
+  endif()
+
+  # component handling
+  unset(FOUND_COMPONENTS_MSG)
+  unset(MISSING_COMPONENTS_MSG)
+
+  if(FPHSA_HANDLE_COMPONENTS)
+    foreach(comp ${${_NAME}_FIND_COMPONENTS})
+      if(${_NAME}_${comp}_FOUND)
+
+        if(NOT DEFINED FOUND_COMPONENTS_MSG)
+          set(FOUND_COMPONENTS_MSG "found components:")
+        endif()
+        string(APPEND FOUND_COMPONENTS_MSG " ${comp}")
+
+      else()
+
+        if(NOT DEFINED MISSING_COMPONENTS_MSG)
+          set(MISSING_COMPONENTS_MSG "missing components:")
+        endif()
+        string(APPEND MISSING_COMPONENTS_MSG " ${comp}")
+
+        if(${_NAME}_FIND_REQUIRED_${comp})
+          set(${_NAME}_FOUND FALSE)
+          string(APPEND MISSING_VARS " ${comp}")
+        endif()
+
+      endif()
+    endforeach()
+    set(COMPONENT_MSG "${FOUND_COMPONENTS_MSG} ${MISSING_COMPONENTS_MSG}")
+    string(APPEND DETAILS "[c${COMPONENT_MSG}]")
+  endif()
+
+  # version handling:
+  set(VERSION_MSG "")
+  set(VERSION_OK TRUE)
+
+  # check with DEFINED here as the requested or found version may be "0"
+  if (DEFINED ${_NAME}_FIND_VERSION)
+    if(DEFINED ${FPHSA_VERSION_VAR})
+      set(_FOUND_VERSION ${${FPHSA_VERSION_VAR}})
+      if (FPHSA_HANDLE_VERSION_RANGE)
+        set (FPCV_HANDLE_VERSION_RANGE HANDLE_VERSION_RANGE)
+      else()
+        set(FPCV_HANDLE_VERSION_RANGE NO_AUTHOR_WARNING_VERSION_RANGE)
+      endif()
+      find_package_check_version ("${_FOUND_VERSION}" VERSION_OK RESULT_MESSAGE_VARIABLE VERSION_MSG
+        ${FPCV_HANDLE_VERSION_RANGE})
+    else()
+      # if the package was not found, but a version was given, add that to the output:
+      if(${_NAME}_FIND_VERSION_EXACT)
+        set(VERSION_MSG "(Required is exact version \"${${_NAME}_FIND_VERSION}\")")
+      elseif (FPHSA_HANDLE_VERSION_RANGE AND ${_NAME}_FIND_VERSION_RANGE)
+        set(VERSION_MSG "(Required is version range \"${${_NAME}_FIND_VERSION_RANGE}\")")
+      else()
+        set(VERSION_MSG "(Required is at least version \"${${_NAME}_FIND_VERSION}\")")
+      endif()
+    endif()
+  else ()
+    # Check with DEFINED as the found version may be 0.
+    if(DEFINED ${FPHSA_VERSION_VAR})
+      set(VERSION_MSG "(found version \"${${FPHSA_VERSION_VAR}}\")")
+    endif()
+  endif ()
+
+  if(VERSION_OK)
+    string(APPEND DETAILS "[v${${FPHSA_VERSION_VAR}}(${${_NAME}_FIND_VERSION})]")
+  else()
+    set(${_NAME}_FOUND FALSE)
+  endif()
+
+
+  # print the result:
+  if (${_NAME}_FOUND)
+    FIND_PACKAGE_MESSAGE(${_NAME} "Found ${_NAME}: ${${_FIRST_REQUIRED_VAR}} ${VERSION_MSG} ${COMPONENT_MSG}" "${DETAILS}")
+  else ()
+
+    if(FPHSA_CONFIG_MODE)
+      _FPHSA_HANDLE_FAILURE_CONFIG_MODE()
+    else()
+      if(NOT VERSION_OK)
+        set(RESULT_MSG)
+        if (_FIRST_REQUIRED_VAR)
+          string (APPEND RESULT_MSG "found ${${_FIRST_REQUIRED_VAR}}")
+        endif()
+        if (COMPONENT_MSG)
+          if (RESULT_MSG)
+            string (APPEND RESULT_MSG ", ")
+          endif()
+          string (APPEND RESULT_MSG "${FOUND_COMPONENTS_MSG}")
+        endif()
+        _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: ${VERSION_MSG} (${RESULT_MSG})")
+      else()
+        _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} (missing:${MISSING_VARS}) ${VERSION_MSG}")
+      endif()
+    endif()
+
+  endif ()
+
+  set(${_NAME}_FOUND ${${_NAME}_FOUND} PARENT_SCOPE)
+  set(${_NAME_UPPER}_FOUND ${${_NAME}_FOUND} PARENT_SCOPE)
+endfunction()
+
+
+cmake_policy(POP)
+
diff --git a/cmake/FindPackageMessage.cmake b/cmake/FindPackageMessage.cmake
new file mode 100644
index 00000000..fd2d60fd
--- /dev/null
+++ b/cmake/FindPackageMessage.cmake
@@ -0,0 +1,49 @@
+# Distributed under the OSI-approved BSD 3-Clause License.  See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#[=======================================================================[.rst:
+FindPackageMessage
+------------------
+
+.. code-block:: cmake
+
+  find_package_message(<name> "message for user" "find result details")
+
+This function is intended to be used in FindXXX.cmake modules files.
+It will print a message once for each unique find result.  This is
+useful for telling the user where a package was found.  The first
+argument specifies the name (XXX) of the package.  The second argument
+specifies the message to display.  The third argument lists details
+about the find result so that if they change the message will be
+displayed again.  The macro also obeys the QUIET argument to the
+find_package command.
+
+Example:
+
+.. code-block:: cmake
+
+  if(X11_FOUND)
+    find_package_message(X11 "Found X11: ${X11_X11_LIB}"
+      "[${X11_X11_LIB}][${X11_INCLUDE_DIR}]")
+  else()
+   ...
+  endif()
+#]=======================================================================]
+
+function(find_package_message pkg msg details)
+  # Avoid printing a message repeatedly for the same find result.
+  if(NOT ${pkg}_FIND_QUIETLY)
+    string(REPLACE "\n" "" details "${details}")
+    set(DETAILS_VAR FIND_PACKAGE_MESSAGE_DETAILS_${pkg})
+    if(NOT "${details}" STREQUAL "${${DETAILS_VAR}}")
+      # The message has not yet been printed.
+      message(STATUS "${msg}")
+
+      # Save the find details in the cache to avoid printing the same
+      # message again.
+      set("${DETAILS_VAR}" "${details}"
+        CACHE INTERNAL "Details about finding ${pkg}")
+    endif()
+  endif()
+endfunction()
+
diff --git a/cmake/FindPostgreSQL_13.cmake b/cmake/FindPostgreSQL_13.cmake
new file mode 100644
index 00000000..02934ff0
--- /dev/null
+++ b/cmake/FindPostgreSQL_13.cmake
@@ -0,0 +1,318 @@
+# Distributed under the OSI-approved BSD 3-Clause License.  See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#[=======================================================================[.rst:
+FindPostgreSQL
+--------------
+
+Find the PostgreSQL installation.
+
+IMPORTED Targets
+^^^^^^^^^^^^^^^^
+
+.. versionadded:: 3.14
+
+This module defines :prop_tgt:`IMPORTED` target ``PostgreSQL::PostgreSQL``
+if PostgreSQL has been found.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module will set the following variables in your project:
+
+``PostgreSQL_FOUND``
+  True if PostgreSQL is found.
+``PostgreSQL_LIBRARIES``
+  the PostgreSQL libraries needed for linking
+``PostgreSQL_INCLUDE_DIRS``
+  the directories of the PostgreSQL headers
+``PostgreSQL_LIBRARY_DIRS``
+  the link directories for PostgreSQL libraries
+``PostgreSQL_VERSION_STRING``
+  the version of PostgreSQL found
+``PostgreSQL_TYPE_INCLUDE_DIR``
+  the directories of the PostgreSQL server headers
+
+Components
+^^^^^^^^^^
+
+This module contains additional ``Server`` component, that forcibly checks
+for the presence of server headers. Note that ``PostgreSQL_TYPE_INCLUDE_DIR``
+is set regardless of the presence of the ``Server`` component in find_package call.
+
+#]=======================================================================]
+
+# ----------------------------------------------------------------------------
+# History:
+# This module is derived from the module originally found in the VTK source tree.
+#
+# ----------------------------------------------------------------------------
+# Note:
+# PostgreSQL_ADDITIONAL_VERSIONS is a variable that can be used to set the
+# version number of the implementation of PostgreSQL.
+# In Windows the default installation of PostgreSQL uses that as part of the path.
+# E.g C:\Program Files\PostgreSQL\8.4.
+# Currently, the following version numbers are known to this module:
+# "11" "10" "9.6" "9.5" "9.4" "9.3" "9.2" "9.1" "9.0" "8.4" "8.3" "8.2" "8.1" "8.0"
+#
+# To use this variable just do something like this:
+# set(PostgreSQL_ADDITIONAL_VERSIONS "9.2" "8.4.4")
+# before calling find_package(PostgreSQL) in your CMakeLists.txt file.
+# This will mean that the versions you set here will be found first in the order
+# specified before the default ones are searched.
+#
+# ----------------------------------------------------------------------------
+# You may need to manually set:
+#  PostgreSQL_INCLUDE_DIR  - the path to where the PostgreSQL include files are.
+#  PostgreSQL_LIBRARY_DIR  - The path to where the PostgreSQL library files are.
+# If FindPostgreSQL.cmake cannot find the include files or the library files.
+#
+# ----------------------------------------------------------------------------
+# The following variables are set if PostgreSQL is found:
+#  PostgreSQL_FOUND         - Set to true when PostgreSQL is found.
+#  PostgreSQL_INCLUDE_DIRS  - Include directories for PostgreSQL
+#  PostgreSQL_LIBRARY_DIRS  - Link directories for PostgreSQL libraries
+#  PostgreSQL_LIBRARIES     - The PostgreSQL libraries.
+#
+# The ``PostgreSQL::PostgreSQL`` imported target is also created.
+#
+# ----------------------------------------------------------------------------
+# If you have installed PostgreSQL in a non-standard location.
+# (Please note that in the following comments, it is assumed that <Your Path>
+# points to the root directory of the include directory of PostgreSQL.)
+# Then you have three options.
+# 1) After CMake runs, set PostgreSQL_INCLUDE_DIR to <Your Path>/include and
+#    PostgreSQL_LIBRARY_DIR to wherever the library pq (or libpq in windows) is
+# 2) Use CMAKE_INCLUDE_PATH to set a path to <Your Path>/PostgreSQL<-version>. This will allow find_path()
+#    to locate PostgreSQL_INCLUDE_DIR by utilizing the PATH_SUFFIXES option. e.g. In your CMakeLists.txt file
+#    set(CMAKE_INCLUDE_PATH ${CMAKE_INCLUDE_PATH} "<Your Path>/include")
+# 3) Set an environment variable called ${PostgreSQL_ROOT} that points to the root of where you have
+#    installed PostgreSQL, e.g. <Your Path>.
+#
+# ----------------------------------------------------------------------------
+
+cmake_policy(PUSH)
+cmake_policy(SET CMP0057 NEW) # if IN_LIST
+
+set(PostgreSQL_INCLUDE_PATH_DESCRIPTION "top-level directory containing the PostgreSQL include directories. E.g /usr/local/include/PostgreSQL/8.4 or C:/Program Files/PostgreSQL/8.4/include")
+set(PostgreSQL_INCLUDE_DIR_MESSAGE "Set the PostgreSQL_INCLUDE_DIR cmake cache entry to the ${PostgreSQL_INCLUDE_PATH_DESCRIPTION}")
+set(PostgreSQL_LIBRARY_PATH_DESCRIPTION "top-level directory containing the PostgreSQL libraries.")
+set(PostgreSQL_LIBRARY_DIR_MESSAGE "Set the PostgreSQL_LIBRARY_DIR cmake cache entry to the ${PostgreSQL_LIBRARY_PATH_DESCRIPTION}")
+set(PostgreSQL_ROOT_DIR_MESSAGE "Set the PostgreSQL_ROOT system variable to where PostgreSQL is found on the machine E.g C:/Program Files/PostgreSQL/8.4")
+
+
+#set(CMAKE_INCLUDE_PATH ${CMAKE_INCLUDE_PATH} "/usr/include/postgresql/13/server/")
+# TODO py3
+include_directories(/usr/include/postgresql/13/server/)
+set(PostgreSQL_KNOWN_VERSIONS ${PostgreSQL_ADDITIONAL_VERSIONS}
+    "13" "12" "11" "10" "9.6" "9.5" "9.4" "9.3" "9.2" "9.1" "9.0" "8.4" "8.3" "8.2" "8.1" "8.0")
+
+# Define additional search paths for root directories.
+set( PostgreSQL_ROOT_DIRECTORIES
+   ENV PostgreSQL_ROOT
+   ${PostgreSQL_ROOT}
+)
+foreach(suffix ${PostgreSQL_KNOWN_VERSIONS})
+  if(WIN32)
+    list(APPEND PostgreSQL_LIBRARY_ADDITIONAL_SEARCH_SUFFIXES
+        "PostgreSQL/${suffix}/lib")
+    list(APPEND PostgreSQL_INCLUDE_ADDITIONAL_SEARCH_SUFFIXES
+        "PostgreSQL/${suffix}/include")
+    list(APPEND PostgreSQL_TYPE_ADDITIONAL_SEARCH_SUFFIXES
+        "PostgreSQL/${suffix}/include/server")
+  endif()
+  if(UNIX)
+    list(APPEND PostgreSQL_LIBRARY_ADDITIONAL_SEARCH_SUFFIXES
+        "postgresql${suffix}"
+        "pgsql-${suffix}/lib")
+    list(APPEND PostgreSQL_INCLUDE_ADDITIONAL_SEARCH_SUFFIXES
+        "postgresql${suffix}"
+        "postgresql/${suffix}"
+        "pgsql-${suffix}/include")
+    list(APPEND PostgreSQL_TYPE_ADDITIONAL_SEARCH_SUFFIXES
+        "postgresql${suffix}/server"
+        "postgresql/${suffix}/server"
+        "pgsql-${suffix}/include/server")
+  endif()
+endforeach()
+
+#
+# Look for an installation.
+#
+find_path(PostgreSQL_INCLUDE_DIR
+  NAMES libpq-fe.h
+  PATHS
+   # Look in other places.
+   ${PostgreSQL_ROOT_DIRECTORIES}
+  PATH_SUFFIXES
+    pgsql
+    postgresql
+    include
+    ${PostgreSQL_INCLUDE_ADDITIONAL_SEARCH_SUFFIXES}
+  # Help the user find it if we cannot.
+  DOC "The ${PostgreSQL_INCLUDE_DIR_MESSAGE}"
+)
+
+find_path(PostgreSQL_TYPE_INCLUDE_DIR
+  NAMES catalog/pg_type.h
+  PATHS
+   # Look in other places.
+   ${PostgreSQL_ROOT_DIRECTORIES}
+  PATH_SUFFIXES
+    postgresql
+    pgsql/server
+    postgresql/server
+    include/server
+    ${PostgreSQL_TYPE_ADDITIONAL_SEARCH_SUFFIXES}
+  # Help the user find it if we cannot.
+  DOC "The ${PostgreSQL_INCLUDE_DIR_MESSAGE}"
+)
+
+# The PostgreSQL library.
+set (PostgreSQL_LIBRARY_TO_FIND pq)
+# Setting some more prefixes for the library
+set (PostgreSQL_LIB_PREFIX "")
+if ( WIN32 )
+  set (PostgreSQL_LIB_PREFIX ${PostgreSQL_LIB_PREFIX} "lib")
+  set (PostgreSQL_LIBRARY_TO_FIND ${PostgreSQL_LIB_PREFIX}${PostgreSQL_LIBRARY_TO_FIND})
+endif()
+
+function(__postgresql_find_library _name)
+  find_library(${_name}
+   NAMES ${ARGN}
+   PATHS
+     ${PostgreSQL_ROOT_DIRECTORIES}
+   PATH_SUFFIXES
+     lib
+     ${PostgreSQL_LIBRARY_ADDITIONAL_SEARCH_SUFFIXES}
+   # Help the user find it if we cannot.
+   DOC "The ${PostgreSQL_LIBRARY_DIR_MESSAGE}"
+  )
+endfunction()
+
+# For compatibility with versions prior to this multi-config search, honor
+# any PostgreSQL_LIBRARY that is already specified and skip the search.
+if(PostgreSQL_LIBRARY)
+  set(PostgreSQL_LIBRARIES "${PostgreSQL_LIBRARY}")
+  get_filename_component(PostgreSQL_LIBRARY_DIR "${PostgreSQL_LIBRARY}" PATH)
+else()
+  __postgresql_find_library(PostgreSQL_LIBRARY_RELEASE ${PostgreSQL_LIBRARY_TO_FIND})
+  __postgresql_find_library(PostgreSQL_LIBRARY_DEBUG ${PostgreSQL_LIBRARY_TO_FIND}d)
+  include(${CMAKE_CURRENT_LIST_DIR}/SelectLibraryConfigurations.cmake)
+  select_library_configurations(PostgreSQL)
+  mark_as_advanced(PostgreSQL_LIBRARY_RELEASE PostgreSQL_LIBRARY_DEBUG)
+  if(PostgreSQL_LIBRARY_RELEASE)
+    get_filename_component(PostgreSQL_LIBRARY_DIR "${PostgreSQL_LIBRARY_RELEASE}" PATH)
+  elseif(PostgreSQL_LIBRARY_DEBUG)
+    get_filename_component(PostgreSQL_LIBRARY_DIR "${PostgreSQL_LIBRARY_DEBUG}" PATH)
+  else()
+    set(PostgreSQL_LIBRARY_DIR "")
+  endif()
+endif()
+
+if (PostgreSQL_INCLUDE_DIR)
+  # Some platforms include multiple pg_config.hs for multi-lib configurations
+  # This is a temporary workaround.  A better solution would be to compile
+  # a dummy c file and extract the value of the symbol.
+  file(GLOB _PG_CONFIG_HEADERS "${PostgreSQL_INCLUDE_DIR}/pg_config*.h")
+  foreach(_PG_CONFIG_HEADER ${_PG_CONFIG_HEADERS})
+    if(EXISTS "${_PG_CONFIG_HEADER}")
+      file(STRINGS "${_PG_CONFIG_HEADER}" pgsql_version_str
+           REGEX "^#define[\t ]+PG_VERSION_NUM[\t ]+.*")
+      if(pgsql_version_str)
+        string(REGEX REPLACE "^#define[\t ]+PG_VERSION_NUM[\t ]+([0-9]*).*"
+               "\\1" _PostgreSQL_VERSION_NUM "${pgsql_version_str}")
+        break()
+      endif()
+    endif()
+  endforeach()
+  if (_PostgreSQL_VERSION_NUM)
+    # 9.x and older encoding
+    if (_PostgreSQL_VERSION_NUM LESS 100000)
+      math(EXPR _PostgreSQL_major_version "${_PostgreSQL_VERSION_NUM} / 10000")
+      math(EXPR _PostgreSQL_minor_version "${_PostgreSQL_VERSION_NUM} % 10000 / 100")
+      math(EXPR _PostgreSQL_patch_version "${_PostgreSQL_VERSION_NUM} % 100")
+      set(PostgreSQL_VERSION_STRING "${_PostgreSQL_major_version}.${_PostgreSQL_minor_version}.${_PostgreSQL_patch_version}")
+      unset(_PostgreSQL_major_version)
+      unset(_PostgreSQL_minor_version)
+      unset(_PostgreSQL_patch_version)
+    else ()
+      math(EXPR _PostgreSQL_major_version "${_PostgreSQL_VERSION_NUM} / 10000")
+      math(EXPR _PostgreSQL_minor_version "${_PostgreSQL_VERSION_NUM} % 10000")
+      set(PostgreSQL_VERSION_STRING "${_PostgreSQL_major_version}.${_PostgreSQL_minor_version}")
+      unset(_PostgreSQL_major_version)
+      unset(_PostgreSQL_minor_version)
+    endif ()
+  else ()
+    foreach(_PG_CONFIG_HEADER ${_PG_CONFIG_HEADERS})
+      if(EXISTS "${_PG_CONFIG_HEADER}")
+        file(STRINGS "${_PG_CONFIG_HEADER}" pgsql_version_str
+             REGEX "^#define[\t ]+PG_VERSION[\t ]+\".*\"")
+        if(pgsql_version_str)
+          string(REGEX REPLACE "^#define[\t ]+PG_VERSION[\t ]+\"([^\"]*)\".*"
+                 "\\1" PostgreSQL_VERSION_STRING "${pgsql_version_str}")
+          break()
+        endif()
+      endif()
+    endforeach()
+  endif ()
+  unset(_PostgreSQL_VERSION_NUM)
+  unset(pgsql_version_str)
+endif()
+
+if("Server" IN_LIST PostgreSQL_FIND_COMPONENTS)
+  set(PostgreSQL_Server_FOUND TRUE)
+  if(NOT PostgreSQL_TYPE_INCLUDE_DIR)
+    set(PostgreSQL_Server_FOUND FALSE)
+  endif()
+endif()
+
+# Did we find anything?
+include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake)
+
+# TODO find_package_handle_standard_args(PostgreSQL_13
+find_package_handle_standard_args(PostgreSQL_13
+                                  REQUIRED_VARS PostgreSQL_LIBRARY PostgreSQL_INCLUDE_DIR
+                                  HANDLE_COMPONENTS
+                                  VERSION_VAR PostgreSQL_VERSION_STRING)
+set(PostgreSQL_FOUND  ${POSTGRESQL_FOUND})
+
+function(__postgresql_import_library _target _var _config)
+  if(_config)
+    set(_config_suffix "_${_config}")
+  else()
+    set(_config_suffix "")
+  endif()
+
+  set(_lib "${${_var}${_config_suffix}}")
+  if(EXISTS "${_lib}")
+    if(_config)
+      set_property(TARGET ${_target} APPEND PROPERTY
+        IMPORTED_CONFIGURATIONS ${_config})
+    endif()
+    set_target_properties(${_target} PROPERTIES
+      IMPORTED_LOCATION${_config_suffix} "${_lib}")
+  endif()
+endfunction()
+
+# Now try to get the include and library path.
+if(PostgreSQL_FOUND)
+  set(PostgreSQL_INCLUDE_DIRS ${PostgreSQL_INCLUDE_DIR})
+  if(PostgreSQL_TYPE_INCLUDE_DIR)
+    list(APPEND PostgreSQL_INCLUDE_DIRS ${PostgreSQL_TYPE_INCLUDE_DIR})
+  endif()
+  set(PostgreSQL_LIBRARY_DIRS ${PostgreSQL_LIBRARY_DIR})
+  if (NOT TARGET PostgreSQL::PostgreSQL)
+    add_library(PostgreSQL::PostgreSQL UNKNOWN IMPORTED)
+    set_target_properties(PostgreSQL::PostgreSQL PROPERTIES
+      INTERFACE_INCLUDE_DIRECTORIES "${PostgreSQL_INCLUDE_DIRS}")
+    __postgresql_import_library(PostgreSQL::PostgreSQL PostgreSQL_LIBRARY "")
+    __postgresql_import_library(PostgreSQL::PostgreSQL PostgreSQL_LIBRARY "RELEASE")
+    __postgresql_import_library(PostgreSQL::PostgreSQL PostgreSQL_LIBRARY "DEBUG")
+  endif ()
+endif()
+
+mark_as_advanced(PostgreSQL_INCLUDE_DIR PostgreSQL_TYPE_INCLUDE_DIR)
+
+cmake_policy(POP)
+
diff --git a/cmake/SelectLibraryConfigurations.cmake b/cmake/SelectLibraryConfigurations.cmake
new file mode 100644
index 00000000..586a98ef
--- /dev/null
+++ b/cmake/SelectLibraryConfigurations.cmake
@@ -0,0 +1,81 @@
+# Distributed under the OSI-approved BSD 3-Clause License.  See accompanying
+# file Copyright.txt or https://cmake.org/licensing for details.
+
+#[=======================================================================[.rst:
+SelectLibraryConfigurations
+---------------------------
+
+.. code-block:: cmake
+
+  select_library_configurations(basename)
+
+This macro takes a library base name as an argument, and will choose
+good values for the variables
+
+::
+
+  basename_LIBRARY
+  basename_LIBRARIES
+  basename_LIBRARY_DEBUG
+  basename_LIBRARY_RELEASE
+
+depending on what has been found and set.
+
+If only ``basename_LIBRARY_RELEASE`` is defined, ``basename_LIBRARY`` will
+be set to the release value, and ``basename_LIBRARY_DEBUG`` will be set
+to ``basename_LIBRARY_DEBUG-NOTFOUND``.  If only ``basename_LIBRARY_DEBUG``
+is defined, then ``basename_LIBRARY`` will take the debug value, and
+``basename_LIBRARY_RELEASE`` will be set to ``basename_LIBRARY_RELEASE-NOTFOUND``.
+
+If the generator supports configuration types, then ``basename_LIBRARY``
+and ``basename_LIBRARIES`` will be set with debug and optimized flags
+specifying the library to be used for the given configuration.  If no
+build type has been set or the generator in use does not support
+configuration types, then ``basename_LIBRARY`` and ``basename_LIBRARIES``
+will take only the release value, or the debug value if the release one
+is not set.
+#]=======================================================================]
+
+# This macro was adapted from the FindQt4 CMake module and is maintained by Will
+# Dicharry <wd...@stellarscience.com>.
+
+macro(select_library_configurations basename)
+    if(NOT ${basename}_LIBRARY_RELEASE)
+        set(${basename}_LIBRARY_RELEASE "${basename}_LIBRARY_RELEASE-NOTFOUND" CACHE FILEPATH "Path to a library.")
+    endif()
+    if(NOT ${basename}_LIBRARY_DEBUG)
+        set(${basename}_LIBRARY_DEBUG "${basename}_LIBRARY_DEBUG-NOTFOUND" CACHE FILEPATH "Path to a library.")
+    endif()
+
+    get_property(_isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
+    if( ${basename}_LIBRARY_DEBUG AND ${basename}_LIBRARY_RELEASE AND
+           NOT ${basename}_LIBRARY_DEBUG STREQUAL ${basename}_LIBRARY_RELEASE AND
+           ( _isMultiConfig OR CMAKE_BUILD_TYPE ) )
+        # if the generator is multi-config or if CMAKE_BUILD_TYPE is set for
+        # single-config generators, set optimized and debug libraries
+        set( ${basename}_LIBRARY "" )
+        foreach( _libname IN LISTS ${basename}_LIBRARY_RELEASE )
+            list( APPEND ${basename}_LIBRARY optimized "${_libname}" )
+        endforeach()
+        foreach( _libname IN LISTS ${basename}_LIBRARY_DEBUG )
+            list( APPEND ${basename}_LIBRARY debug "${_libname}" )
+        endforeach()
+    elseif( ${basename}_LIBRARY_RELEASE )
+        set( ${basename}_LIBRARY ${${basename}_LIBRARY_RELEASE} )
+    elseif( ${basename}_LIBRARY_DEBUG )
+        set( ${basename}_LIBRARY ${${basename}_LIBRARY_DEBUG} )
+    else()
+        set( ${basename}_LIBRARY "${basename}_LIBRARY-NOTFOUND")
+    endif()
+
+    set( ${basename}_LIBRARIES "${${basename}_LIBRARY}" )
+
+    if( ${basename}_LIBRARY )
+        set( ${basename}_FOUND TRUE )
+    endif()
+
+    mark_as_advanced( ${basename}_LIBRARY_RELEASE
+        ${basename}_LIBRARY_DEBUG
+    )
+endmacro()
+
diff --git a/cmake/TestIfNoUTF8BOM.py b/cmake/TestIfNoUTF8BOM.py
index 135a0c00..9b46a64a 100755
--- a/cmake/TestIfNoUTF8BOM.py
+++ b/cmake/TestIfNoUTF8BOM.py
@@ -5,7 +5,7 @@ import sys
 def detectBOM(inFileName):
     file = open(inFileName, 'r')
     file.seek(0)
-    head = map(ord, file.read(4))
+    head = list(map(ord, file.read(4)))
     if head == [0x00, 0x00, 0xFE, 0xFF]:
         return "utf_32_be"
     elif head == [0xFF, 0xFE, 0x00, 0x00]:
@@ -27,7 +27,7 @@ def main(argv=None):
     if BOM != "utf_8":
         return 0
     else:
-        print ('Detected byte-order mark (%s) in file "%s".' % (BOM, argv[1]))
+        print('Detected byte-order mark (%s) in file "%s".' % (BOM, argv[1]))
         return 1
 
 if __name__ == '__main__':
diff --git a/examples/hello_world/iterative/simple_logistic.py_in b/examples/hello_world/iterative/simple_logistic.py_in
index d19740c4..94cc0867 100644
--- a/examples/hello_world/iterative/simple_logistic.py_in
+++ b/examples/hello_world/iterative/simple_logistic.py_in
@@ -112,7 +112,7 @@ def __logregr_train_compute(schema_madlib, tbl_source, tbl_output, dep_col,
     state = None
     for it in range(0, max_iter):
         res_tuple = plpy.execute(update_plan, [state])
-        state = res_tuple[0].values()[0]
+        state = list(res_tuple[0].values())[0]
 
     output_table = plpy.prepare(
             """
diff --git a/examples/hello_world/iterative/simple_logistic.sql_in b/examples/hello_world/iterative/simple_logistic.sql_in
index a12da7e5..4c5d27cc 100644
--- a/examples/hello_world/iterative/simple_logistic.sql_in
+++ b/examples/hello_world/iterative/simple_logistic.sql_in
@@ -185,7 +185,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.logregr_simple_train (
     verbose             BOOLEAN
 ) RETURNS VOID AS $$
 PythonFunction(hello_world, simple_logistic, logregr_simple_train)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 ------------------------------------------------------------------------
@@ -207,7 +207,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.logregr_simple_train(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(hello_world, simple_logistic, logregr_simple_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.logregr_simple_train()
diff --git a/methods/array_ops/src/pg_gp/array_ops.sql_in b/methods/array_ops/src/pg_gp/array_ops.sql_in
index c1ec853f..f9a6b764 100644
--- a/methods/array_ops/src/pg_gp/array_ops.sql_in
+++ b/methods/array_ops/src/pg_gp/array_ops.sql_in
@@ -731,7 +731,7 @@ FROM (
 ) t
 ORDER BY 1,2;
         """.format(schema_madlib='MADLIB_SCHEMA')
-$$ LANGUAGE PLPYTHONU IMMUTABLE
+$$ LANGUAGE PLPYTHON3U IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 m4_changequote(<!, !>)
diff --git a/methods/sketch/src/pg_gp/countmin.py_in b/methods/sketch/src/pg_gp/countmin.py_in
index 67a862ff..d8e89d9c 100644
--- a/methods/sketch/src/pg_gp/countmin.py_in
+++ b/methods/sketch/src/pg_gp/countmin.py_in
@@ -12,7 +12,7 @@ __numcounters = 1024 # magic mod of hash function
 __countmin_sz = __depth*__numcounters
 __numsketches = __ranges
 total_size = __numsketches * __countmin_sz
-__max_int64 = (1L << 63) - 1
+__max_int64 = (1 << 63) - 1
 __min_int64 = __max_int64 * (-1)
 
 def count(b64sketch, val):
@@ -84,7 +84,7 @@ def __find_ranges_recursive(bot, top, power, r):
         r = __find_ranges_recursive(0, top, power-1, r)
         return r
 
-    width = top - bot + 1L
+    width = top - bot + 1
 
     # account for the fact that MIN and MAX are 1 off the true power of 2
     if (top == __max_int64 or bot == __min_int64):
@@ -94,13 +94,13 @@ def __find_ranges_recursive(bot, top, power, r):
     if (dyad > 62):
         # dangerously big, so split.  we know that we don't span 0.
         sign = -1 if (top < 0) else 1
-        r = __find_ranges_recursive(bot, (1L << 62)*sign - 1, 62, r)
-        r = __find_ranges_recursive((1L << 62)*sign, top, 62, r)
+        r = __find_ranges_recursive(bot, (1 << 62)*sign - 1, 62, r)
+        r = __find_ranges_recursive((1 << 62)*sign, top, 62, r)
         return r
 
     # if we get here, we have a range of size 2 or greater.
     # Find the largest dyadic range width in this range.
-    pow_dyad = 1L << dyad
+    pow_dyad = 1 << dyad
 
     # if this is a complete dyad
     if (pow_dyad == width):
diff --git a/methods/sketch/src/pg_gp/sketch.sql_in b/methods/sketch/src/pg_gp/sketch.sql_in
index 2ae36a25..fb6bdc7a 100644
--- a/methods/sketch/src/pg_gp/sketch.sql_in
+++ b/methods/sketch/src/pg_gp/sketch.sql_in
@@ -497,7 +497,7 @@ RETURNS int8
 AS $$
     PythonFunctionBodyOnlyNoSchema(`sketch', `countmin')
     return countmin.count(sketches64, val)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 
@@ -514,7 +514,7 @@ RETURNS int8
 AS $$
     PythonFunctionBodyOnlyNoSchema(`sketch', `countmin')
     return countmin.rangecount(sketches64, bot, top)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 /**
@@ -531,7 +531,7 @@ RETURNS int8
 AS $$
     PythonFunctionBodyOnlyNoSchema(`sketch', `countmin')
     return countmin.centile(sketches64, centile, cnt)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 /**
@@ -547,7 +547,7 @@ RETURNS int8
 AS $$
     PythonFunctionBodyOnlyNoSchema(`sketch', `countmin')
     return countmin.centile(sketches64, 50, cnt)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 /**
@@ -560,7 +560,7 @@ AS $$
     PythonFunctionBodyOnlyNoSchema(`sketch', `countmin')
     # schema_madlib comes from PythonFunctionBodyOnly
     return countmin.width_histogram( sketches64, themin, themax, nbuckets)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 /** @brief <c>cmsketch_depth_histogram</c> is a UDA that takes a cmsketch and a number of buckets n, and produces an n-bucket histogram for the column where each bucket has approximately the same count. The output is a text string containing triples {lo, hi, count} representing the buckets; counts are approximate.  Note that an equi-depth histogram is equivalent to a spanning set of equi-spaced centiles.
@@ -571,7 +571,7 @@ RETURNS text
 AS $$
     PythonFunctionBodyOnlyNoSchema(`sketch', `countmin')
     return countmin.depth_histogram(sketches64, nbuckets)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 -- MFV Sketch functions
diff --git a/methods/stemmer/src/pg_gp/porter_stemmer.sql_in b/methods/stemmer/src/pg_gp/porter_stemmer.sql_in
index 43e28053..ba917b90 100644
--- a/methods/stemmer/src/pg_gp/porter_stemmer.sql_in
+++ b/methods/stemmer/src/pg_gp/porter_stemmer.sql_in
@@ -232,7 +232,7 @@ SELECT id, {schema_madlib}.stem_token(word) FROM token_tbl;
  -- array input
 SELECT {schema_madlib}.stem_token_arr(array_agg(word order by id)) FROM token_tbl;
         """.format(schema_madlib='MADLIB_SCHEMA')
-$$ LANGUAGE PLPYTHONU IMMUTABLE
+$$ LANGUAGE PLPYTHON3U IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.stem_token_arr()
diff --git a/methods/svec_util/src/pg_gp/svec_util.sql_in b/methods/svec_util/src/pg_gp/svec_util.sql_in
index 31f8845b..3abeb277 100644
--- a/methods/svec_util/src/pg_gp/svec_util.sql_in
+++ b/methods/svec_util/src/pg_gp/svec_util.sql_in
@@ -35,7 +35,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gen_doc_svecs(
     doc_term_info_col   TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(svec_util, generate_svec, generate_doc_svecs)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA');
 
 --! Helper function for MADLIB_SCHEMA.gen_doc_svec UDF.
@@ -43,7 +43,7 @@ m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA');
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gen_doc_svecs()
 RETURNS TEXT AS $$
 PythonFunction(svec_util, generate_svec, generate_doc_svecs_help)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL');
 
 --! Basic floating point scalar operator: MIN.
diff --git a/src/bin/madpack b/src/bin/madpack
index e2298081..4f87b48e 100755
--- a/src/bin/madpack
+++ b/src/bin/madpack
@@ -13,7 +13,7 @@
 # 2. Pass all arguments to ../madpack/madpack.py
 
 PYTHON_PREFIX="python"
-PYTHON_VERSIONS="2.7 2.6"
+PYTHON_VERSIONS="3.8 2.7 2.6"
 
 # create absolute path to madpack.py
 pushd `dirname $0` > /dev/null
diff --git a/src/madpack/argparse.py b/src/madpack/argparse.py
index 334384ca..f006fd48 100644
--- a/src/madpack/argparse.py
+++ b/src/madpack/argparse.py
@@ -97,9 +97,9 @@ except NameError:
     from sets import Set as set
 
 try:
-    basestring
+    str
 except NameError:
-    basestring = str
+    str = str
 
 try:
     sorted
@@ -1729,7 +1729,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
                 if not hasattr(namespace, action.dest):
                     if action.default is not SUPPRESS:
                         default = action.default
-                        if isinstance(action.default, basestring):
+                        if isinstance(action.default, str):
                             default = self._get_value(action, default)
                         setattr(namespace, action.dest, default)
 
@@ -2207,7 +2207,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
                 value = action.const
             else:
                 value = action.default
-            if isinstance(value, basestring):
+            if isinstance(value, str):
                 value = self._get_value(action, value)
                 self._check_value(action, value)
 
diff --git a/src/madpack/configyml.py b/src/madpack/configyml.py
index 4ca7b002..b2a8bea5 100644
--- a/src/madpack/configyml.py
+++ b/src/madpack/configyml.py
@@ -18,6 +18,16 @@ class MadPackConfigError(Exception):
      def __str__(self):
          return repr(self.value)
 
+##
+# Convert dict key and value from bytes to str
+##
+def convert(data):
+    if isinstance(data, bytes):  return data.decode()
+    if isinstance(data, dict):   return dict(map(convert, data.items()))
+    if isinstance(data, tuple):  return tuple(map(convert, data))
+    if isinstance(data, list):   return list(map(convert, data))
+    return data
+
 ## 
 # Load version string from Version.yml file.
 # Typical Version.yml file:
@@ -29,35 +39,39 @@ def get_version(configdir):
     try:
         conf = yaml.load(open(configdir + '/Version.yml'))
     except:
-        print "configyml : ERROR : missing or malformed Version.yml"
+        print("configyml : ERROR : missing or malformed Version.yml")
         exit(2)
 
+    # XXX
+    conf = convert(conf)
     try:
         conf['version']
     except:
-        print "configyml : ERROR : malformed Version.yml"
+        print("configyml : ERROR : malformed Version.yml")
         exit(2)
         
-    return str( conf['version'])
+    return conf['version']
 
     
 ## 
 # Load Ports.yml file
-# @param configdir the directory where we can find Version.yml
+# @param configdir the directory where we can find Ports.yml
 ##
 def get_ports(configdir):
 
     try:
         conf = yaml.load(open(configdir + '/Ports.yml'))
     except:
-        print "configyml : ERROR : missing or malformed Ports.yml"
+        print("configyml : ERROR : missing or malformed Ports.yml")
         exit(2)
 
+    # XXX
+    conf = convert(conf)
     for port in conf:
         try:
             conf[port]['name']
         except:
-            print "configyml : ERROR : malformed Ports.yml: no name element for port " + port
+            print("configyml : ERROR : malformed Ports.yml: no name element for port " + port)
             exit(2)
         
     return conf
@@ -69,23 +83,25 @@ def get_ports(configdir):
 # @param id the ID of the specific DB port
 # @param src the directory of the source code for a specific port
 ##
-def get_modules( confdir):
+def get_modules(confdir):
 
     fname = "Modules.yml"
     
     try:
         conf = yaml.load( open( confdir + '/' + fname))
     except:
-        print "configyml : ERROR : missing or malformed " + confdir + '/' + fname
+        print("configyml : ERROR : missing or malformed " + confdir + '/' + fname)
         raise Exception
 
+    # XXX
+    conf = convert(conf)
     try:
         conf['modules']
     except:
-        print "configyml : ERROR : missing modules section in " + fname
+        print("configyml : ERROR : missing modules section in " + fname)
         raise Exception
-        
-    conf = topsort_modules( conf)
+
+    conf = topsort_modules(conf)
     
     return conf
 
@@ -110,9 +126,9 @@ def topsort(depdict):
         found = 0  # flag to check if we find anything new this iteration
         newdepdict = dict()
         # find the keys with no values
-        keynoval = filter(lambda t: t[1] == [], depdict.iteritems())
+        keynoval = [t for t in iter(depdict.items()) if t[1] == []]
         # find the values that are not keys
-        valnotkey = set(flatten(depdict.itervalues())) - set(depdict.iterkeys())
+        valnotkey = set(flatten(iter(depdict.values()))) - set(depdict.keys())
 
         candidates = set([k[0] for k in keynoval]) | valnotkey
         for c in candidates:
@@ -120,9 +136,9 @@ def topsort(depdict):
                 found += 1
                 out[c] = curlevel
 
-        for k in depdict.iterkeys():
+        for k in depdict.keys():
             if depdict[k] != []:
-                newdepdict[k] = filter(lambda v: v not in valnotkey, depdict[k])
+                newdepdict[k] = [v for v in depdict[k] if v not in valnotkey]
         # newdepdict = dict(newdepdict)
         if newdepdict == depdict:
             raise MadPackConfigError(str(depdict))
@@ -149,17 +165,18 @@ def topsort_modules(conf):
         module_dict = topsort(depdict)
     except MadPackConfigError as e:
         raise MadPackConfigError("invalid cyclic dependency between modules: " + e.value + "; check Modules.yml files")
+
     missing = set(module_dict.keys()) - set(depdict.keys())
     inverted = dict()
     if len(missing) > 0:
-        for k in depdict.iterkeys():
+        for k in depdict.keys():
             for v in depdict[k]:
                 if v not in inverted:
                     inverted[v] = set()
                 inverted[v].add(k)
-        print "configyml : ERROR : required modules missing from Modules.yml: " 
+        print("configyml : ERROR : required modules missing from Modules.yml: ")
         for m in missing:
-            print  "    " + m + " (required by " + str(list(inverted[m])) + ")"
+            print("    " + m + " (required by " + str(list(inverted[m])) + ")")
         exit(2)
     conf['modules'] = sorted(conf['modules'], key=lambda m:module_dict[m['name']])
     return conf
diff --git a/src/madpack/create_changelist.py b/src/madpack/create_changelist.py
index 67ccdd39..d5a54920 100644
--- a/src/madpack/create_changelist.py
+++ b/src/madpack/create_changelist.py
@@ -42,27 +42,27 @@ new_vers = sys.argv[3]
 ch_filename = sys.argv[4]
 
 if os.path.exists(ch_filename):
-    print "{0} already exists".format(ch_filename)
+    print("{0} already exists".format(ch_filename))
     raise SystemExit
 
 err1 = os.system("""psql {0} -l > /dev/null""".format(database))
 if err1 != 0:
-    print "Database {0} does not exist".format(database)
+    print("Database {0} does not exist".format(database))
     raise SystemExit
 
 err1 = os.system("""psql {0} -c "select madlib_old_vers.version()" > /dev/null
                  """.format(database))
 if err1 != 0:
-    print "MADlib is not installed in the madlib_old_vers schema. Please refer to the Prequisites."
+    print("MADlib is not installed in the madlib_old_vers schema. Please refer to the Prequisites.")
     raise SystemExit
 
 err1 = os.system("""psql {0} -c "select madlib.version()" > /dev/null
                  """.format(database))
 if err1 != 0:
-    print "MADlib is not installed in the madlib schema. Please refer to the Prequisites."
+    print("MADlib is not installed in the madlib schema. Please refer to the Prequisites.")
     raise SystemExit
 
-print "Creating changelist {0}".format(ch_filename)
+print("Creating changelist {0}".format(ch_filename))
 os.system("""
     rm -f /tmp/madlib_tmp_nm.txt \
     /tmp/madlib_tmp_udf.txt \
@@ -77,7 +77,7 @@ try:
     # Find the new modules using the git diff
     err1 = os.system("git diff {old_vers} {new_vers} --name-only --diff-filter=A > /tmp/madlib_tmp_nm.txt".format(**locals()))
     if err1 != 0:
-        print "Git diff failed. Please ensure that branches/tags are fetched."
+        print("Git diff failed. Please ensure that branches/tags are fetched.")
         raise SystemExit
 
     f = open("/tmp/madlib_tmp_cl.yaml", "w")
@@ -322,7 +322,7 @@ try:
     os.system("cp /tmp/madlib_tmp_cl.yaml {0}".format(ch_filename))
 
 except:
-    print "Something went wrong! The changelist might be wrong/corrupted."
+    print("Something went wrong! The changelist might be wrong/corrupted.")
     raise
 finally:
     os.system("""
diff --git a/src/madpack/diff_udf.sql b/src/madpack/diff_udf.sql
index 3226409f..dc354635 100644
--- a/src/madpack/diff_udf.sql
+++ b/src/madpack/diff_udf.sql
@@ -8,7 +8,7 @@ RETURNS text AS $$
     if argstr is None:
         return "NULL"
     return argstr.replace(schema_name + ".", '')
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 CREATE OR REPLACE FUNCTION get_functions(table_name text, schema_name text,
@@ -84,7 +84,7 @@ plpy.execute("""
     WHERE retype LIKE '{type_filter}' OR retype LIKE '{type_filter}[]'
     """.format(table_name=table_name, schema_name=schema_name,
         type_filter=type_filter, proisagg_wrapper=proisagg_wrapper))
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 DROP TABLE IF EXISTS functions_madlib_old_version;
diff --git a/src/madpack/diff_udo.sql b/src/madpack/diff_udo.sql
index 2434f08e..da0c93de 100644
--- a/src/madpack/diff_udo.sql
+++ b/src/madpack/diff_udo.sql
@@ -25,14 +25,14 @@ RETURNS text AS $$
     if argstr is None:
         return "NULL"
     return argstr.replace(schema_name + ".", '')
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 CREATE OR REPLACE FUNCTION alter_schema(argstr text, schema_name text)
 RETURNS text AS $$
     if argstr is None:
         return "NULL"
     return argstr.replace(schema_name + ".", 'schema_madlib.')
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 CREATE OR REPLACE FUNCTION get_udos(table_name text, schema_name text,
@@ -63,7 +63,7 @@ $$
                   OR rettype LIKE 'schema_madlib.{type_filter}[]'
                   OR '{type_filter}' LIKE 'Full'
     """.format(table_name=table_name, schema_name=schema_name, type_filter=type_filter))
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 DROP TABLE if exists udo_madlib_old_version;
diff --git a/src/madpack/diff_udoc.sql b/src/madpack/diff_udoc.sql
index 1e18f8a1..579d0d8c 100644
--- a/src/madpack/diff_udoc.sql
+++ b/src/madpack/diff_udoc.sql
@@ -26,14 +26,14 @@ RETURNS text AS $$
     if argstr is None:
         return "NULL"
     return argstr.replace(schema_name + ".", '')
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 CREATE OR REPLACE FUNCTION alter_schema(argstr text, schema_name text)
 RETURNS text AS $$
     if argstr is None:
         return "NULL"
     return argstr.replace(schema_name + ".", 'schema_madlib.')
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 CREATE OR REPLACE FUNCTION get_udocs(table_name text, schema_name text,
                                          type_filter text)
@@ -62,7 +62,7 @@ $$
 	WHERE operators LIKE '%schema_madlib.{type_filter}%' OR '{type_filter}' LIKE 'Full'
 	""".format(table_name=table_name, schema_name=schema_name, type_filter=type_filter))
 
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 DROP TABLE if exists udoc_madlib_old_version;
 DROP TABLE if exists udoc_madlib_new_version;
diff --git a/src/madpack/diff_udt.sql b/src/madpack/diff_udt.sql
index 1f49cf11..50d7f10c 100644
--- a/src/madpack/diff_udt.sql
+++ b/src/madpack/diff_udt.sql
@@ -5,7 +5,7 @@ RETURNS text AS $$
     if argstr is None:
         return "NULL"
     return argstr.replace(schema_name + ".", '')
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 CREATE OR REPLACE FUNCTION get_types(schema_name text)
 RETURNS VOID AS
@@ -25,7 +25,7 @@ $$
           AND n.nspname ~ '^({schema_name})$'
         ORDER BY 1, 2;
         """.format(schema_name=schema_name))
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 CREATE OR REPLACE FUNCTION detect_changed_types(
@@ -72,7 +72,7 @@ $$
         if not res:
             changed_udt.append(name)
     return changed_udt
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 -- Get UDTs
 DROP TABLE IF EXISTS types_madlib;
diff --git a/src/madpack/madpack.py b/src/madpack/madpack.py
index d7982be5..3d585770 100755
--- a/src/madpack/madpack.py
+++ b/src/madpack/madpack.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
 # Main Madpack installation executable.
 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@@ -26,12 +26,12 @@ from utilities import run_query
 # Required Python version
 py_min_ver = [2, 6]
 
-# Check python version
-if sys.version_info[:2] < py_min_ver:
-    print("ERROR: python version too old ({0}). You need {1} or greater.".
-          format('.'.join(map(str, sys.version_info[:3])),
-                 '.'.join(map(str, py_min_ver))))
-    exit(1)
+# XXX py3 Check python version
+#if sys.version_info[:2] < py_min_ver:
+#    print(("ERROR: python version too old ({0}). You need {1} or greater.".
+#          format('.'.join(map(str, sys.version_info[:3])),
+#                 '.'.join(map(str, py_min_ver)))))
+#    exit(1)
 
 # Find MADlib root directory. This file is installed to
 # $MADLIB_ROOT/madpack/madpack.py, so to get $MADLIB_ROOT we need to go
@@ -54,6 +54,7 @@ maddir_lib = "libmadlib.so"  # C/C++ libraries
 
 # Read the config files
 ports = configyml.get_ports(maddir_conf)  # object made of Ports.yml
+# XXX py3
 new_madlib_ver = configyml.get_version(maddir_conf)  # MADlib OS-level version
 portid_list = []
 for port in ports:
@@ -82,7 +83,7 @@ def _make_dir(dir):
         try:
             os.makedirs(dir)
         except:
-            print "ERROR: can not create directory: %s. Check permissions." % dir
+            print("ERROR: can not create directory: %s. Check permissions." % dir)
             exit(1)
 # ------------------------------------------------------------------------------
 
@@ -328,9 +329,9 @@ def _parse_result_logfile(retval, logfile, sql_abspath,
 
     if is_install_check_logfile:
         # Output result
-        print "TEST CASE RESULT|Module: " + module + \
+        print("TEST CASE RESULT|Module: " + module + \
             "|" + os.path.basename(sql_filename) + "|" + result + \
-            "|Time: %d milliseconds" % (milliseconds)
+            "|Time: %d milliseconds" % (milliseconds))
 
     if result == 'FAIL':
         error_(this, "Failed executing %s" % sql_abspath, stop=False)
@@ -388,19 +389,21 @@ def _plpy_check(py_min_ver):
 
     # Check PL/Python existence
     rv = _internal_run_query("SELECT count(*) AS CNT FROM pg_language "
-                             "WHERE lanname = 'plpythonu'", True)
+                             "WHERE lanname = 'plpython3u'", True)
     if int(rv[0]['cnt']) > 0:
         info_(this, "> PL/Python already installed", verbose)
     else:
         info_(this, "> PL/Python not installed", verbose)
         info_(this, "> Creating language PL/Python...", True)
         try:
-            _internal_run_query("CREATE LANGUAGE plpythonu;", True)
+            # XXX py3
+            _internal_run_query("CREATE LANGUAGE plpython3u;", True)
         except:
-            error_(this, """Cannot create language plpythonu. Please check if you
+            error_(this, """Cannot create language plpython3u. Please check if you
                 have configured and installed portid (your platform) with
                 `--with-python` option. Stopping installation...""", False)
-            raise Exception
+            # XXX py3
+            #raise Exception
 
     # Check PL/Python version
     _internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
@@ -412,7 +415,7 @@ def _plpy_check(py_min_ver):
             # return '.'.join(str(item) for item in sys.version_info[:3])
             return str(sys.version_info[:3]).replace(',','.').replace(' ','').replace(')','').replace('(','')
         $$
-        LANGUAGE plpythonu;
+        LANGUAGE plpython3u;
     """, True)
     rv = _internal_run_query("SELECT plpy_version_for_madlib() AS ver;", True)
     python = rv[0]['ver']
@@ -643,6 +646,9 @@ def _process_py_sql_files_in_modules(modset, args_dict):
         else:
             maddir_mod_py = maddir + "/modules"
 
+        ### XXX PY3
+        # info_(this, "\ncalling_operation: %s, %s" % (calling_operation, maddir_mod_py), verbose)
+
         # Find the SQL module dir (platform specific or generic)
         if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
             maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
@@ -1108,9 +1114,9 @@ def _append_uninstall_madlib_sqlfile(schema, db_madlib_ver, is_schema_in_db,
                   ao['column'] + ' : ' + ao['type'], True)
     info_(this, "***********************************************************************************", True)
     info_(this, "Would you like to continue? [Y/N]", True)
-    go = raw_input('>>> ').upper()
+    go = input('>>> ').upper()
     while (go not in ('Y', 'N', 'YES', 'NO')):
-        go = raw_input('Yes or No >>> ').upper()
+        go = input('Yes or No >>> ').upper()
 
     # 2) Do the uninstall/drop
     if go in ('N', 'NO'):
@@ -1338,7 +1344,7 @@ def main(argv):
     global tmpdir
     try:
         tmpdir = tempfile.mkdtemp('', 'madlib.', args.tmpdir)
-    except OSError, e:
+    except OSError as e:
         tmpdir = e.filename
         error_(this, "cannot create temporary directory: '%s'." % tmpdir, True)
 
@@ -1413,6 +1419,7 @@ def main(argv):
         supportedVersions = [dirItem for dirItem in os.listdir(portdir)
                              if os.path.isdir(os.path.join(portdir, dirItem)) and
                              re.match("^\d+", dirItem)]
+
         if dbver is None:
             dbver = ".".join(
                 map(str, max([versionStr.split('.')
@@ -1541,4 +1548,4 @@ if __name__ == "__main__":
     if not keeplogs:
         shutil.rmtree(tmpdir)
     else:
-        print "INFO: Log files saved in " + tmpdir
+        print("INFO: Log files saved in " + tmpdir)
diff --git a/src/madpack/sort-module.py b/src/madpack/sort-module.py
index 0d8d478a..b60620b0 100644
--- a/src/madpack/sort-module.py
+++ b/src/madpack/sort-module.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 """
 @file sort-module.py
@@ -16,7 +16,7 @@ so you need change directory first to run it.
 import re
 import sys
 
-import configyml
+from . import configyml
 
 
 def get_modules_in_order():
@@ -64,7 +64,7 @@ def main(file_paths):
                 is of the form: '.../modules/<module_name>/...'.
     """
     file_order = sorted(file_paths, key=find_order)
-    print " ".join(file_order)
+    print(" ".join(file_order))
 
 
 if __name__ == '__main__':
diff --git a/src/madpack/upgrade_util.py b/src/madpack/upgrade_util.py
index 2a44e8bc..4f971f5d 100644
--- a/src/madpack/upgrade_util.py
+++ b/src/madpack/upgrade_util.py
@@ -178,9 +178,9 @@ class ChangeHandler(UpgradeBase):
         _return_obj = defaultdict(list) if not output_config_dict else output_config_dict
         if config_iterable is not None:
             for each_config in config_iterable:
-                for obj_name, obj_details in each_config.iteritems():
+                for obj_name, obj_details in each_config.items():
                     formatted_obj = {}
-                    for k, v in obj_details.items():
+                    for k, v in list(obj_details.items()):
                         v = v.lower().replace('schema_madlib', self._schema) if v else ""
                         formatted_obj[k] = v
                     _return_obj[obj_name].append(formatted_obj)
@@ -195,7 +195,7 @@ class ChangeHandler(UpgradeBase):
         Iterable.
         """
         if src_dict:
-            for k, v in src_dict.items():
+            for k, v in list(src_dict.items()):
                 if k in dest_dict:
                     if (isinstance(dest_dict[k], Iterable) and isinstance(v, Iterable)):
                         dest_dict[k] += v
@@ -349,7 +349,7 @@ class ChangeHandler(UpgradeBase):
         ret = []
 
         changed_ops = set()
-        for op, li in self._udo.items():
+        for op, li in list(self._udo.items()):
             for e in li:
                 changed_ops.add((op, e['leftarg'], e['rightarg']))
 
@@ -375,7 +375,7 @@ class ChangeHandler(UpgradeBase):
         ret = []
 
         changed_opcs = set()
-        for opc, li in self._udoc.items():
+        for opc, li in list(self._udoc.items()):
             for e in li:
                 changed_opcs.add((opc, e['index']))
         gte_gpdb5 = (self._portid == 'greenplum' and
@@ -626,12 +626,12 @@ class ViewDependency(UpgradeBase):
     def _filter_recursive_view_dependency(self):
         # Get initial list
         checklist = []
-        checklist.extend(self._view2proc.keys())
-        checklist.extend(self._view2op.keys())
+        checklist.extend(list(self._view2proc.keys()))
+        checklist.extend(list(self._view2op.keys()))
 
         while True:
             new_checklist = []
-            for depender, dependeelist in self._view2view.iteritems():
+            for depender, dependeelist in self._view2view.items():
                 for dependee in dependeelist:
                     if dependee in checklist and depender not in checklist:
                         new_checklist.append(depender)
@@ -643,7 +643,7 @@ class ViewDependency(UpgradeBase):
 
         # Filter recursive dependencies not related with MADLib UDF/UDAs
         filtered_view2view = defaultdict(list)
-        for depender, dependeelist in self._view2view.iteritems():
+        for depender, dependeelist in self._view2view.items():
             filtered_dependeelist = [r for r in dependeelist if r in checklist]
             if len(filtered_dependeelist) > 0:
                 filtered_view2view[depender] = filtered_dependeelist
@@ -718,7 +718,7 @@ class ViewDependency(UpgradeBase):
         @brief Get the depended UDF/UDA signatures for comparison
         """
         res = {}
-        for procs in self._view2proc.values():
+        for procs in list(self._view2proc.values()):
             for proc in procs:
                 if proc[2] == tag and (self._schema, proc) not in res:
                     funcinfo = self._get_function_info(proc[1])
@@ -733,7 +733,7 @@ class ViewDependency(UpgradeBase):
         @brief Get the depended UDO OIDs for comparison
         """
         res = set()
-        for depended_ops in self._view2op.values():
+        for depended_ops in list(self._view2op.values()):
             for op_entry in depended_ops:
                 res.add(op_entry[1])
 
@@ -741,7 +741,7 @@ class ViewDependency(UpgradeBase):
 
     def get_proc_w_dependency(self, tag='UDA'):
         res = []
-        for procs in self._view2proc.values():
+        for procs in list(self._view2proc.values()):
             for proc in procs:
                 if proc[2] == tag and (self._schema, proc) not in res:
                     res.append((self._schema, proc))
@@ -935,7 +935,7 @@ class TableDependency(UpgradeBase):
         @brief Get the list of depended UDOC OIDs
         """
         res = set()
-        for depended_opcs in self._index2opclass.values():
+        for depended_opcs in list(self._index2opclass.values()):
             for opc_entry in depended_opcs:
                 res.add(opc_entry[0])
 
@@ -1070,7 +1070,7 @@ class ScriptCleaner(UpgradeBase):
         self._get_existing_udo()  # from the old version
         operator_patterns = []
         # for all, pass the changed ones, add others to ret
-        for each_udo, udo_details in self._existing_udo.items():
+        for each_udo, udo_details in list(self._existing_udo.items()):
             for each_item in udo_details:
                 if each_udo in self._ch.udo:
                     if each_item in self._ch.udo[each_udo]:
@@ -1097,7 +1097,7 @@ class ScriptCleaner(UpgradeBase):
         self._get_existing_udoc()  # from the old version
         opclass_patterns = []
         # for all, pass the changed ones, add others to ret
-        for each_udoc, udoc_details in self._existing_udoc.items():
+        for each_udoc, udoc_details in list(self._existing_udoc.items()):
             for each_item in udoc_details:
                 if each_udoc in self._ch.udoc:
                     if each_item in self._ch.udoc[each_udoc]:
@@ -1122,7 +1122,7 @@ class ScriptCleaner(UpgradeBase):
         self._get_existing_uda()
         aggregate_patterns = []
 
-        for each_uda, uda_details in self._existing_uda.iteritems():
+        for each_uda, uda_details in self._existing_uda.items():
             for each_item in uda_details:
                 if each_uda in self._ch.uda:
                     if each_item in self._ch.uda[each_uda]:
@@ -1343,7 +1343,7 @@ class TestChangeHandler(unittest.TestCase):
         ch = ChangeHandler(self._dummy_schema, self._dummy_portid,
                            self._dummy_con_args, self.maddir,
                            '1.9.1', upgrade_to=get_rev_num('1.12'))
-        self.assertEqual(ch.newmodule.keys(),
+        self.assertEqual(list(ch.newmodule.keys()),
                          ['knn', 'sssp', 'apsp', 'measures', 'stratified_sample',
                           'encode_categorical', 'bfs', 'mlp', 'pagerank',
                           'train_test_split', 'wcc'])
diff --git a/src/madpack/utilities.py b/src/madpack/utilities.py
index be84de2d..7012d064 100644
--- a/src/madpack/utilities.py
+++ b/src/madpack/utilities.py
@@ -21,13 +21,16 @@
 # Madpack utilities
 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
 
-from itertools import izip_longest
+from itertools import zip_longest
 import os
 import re
 import sys
 import subprocess
 import unittest
 
+# Import MADlib python modules
+import configyml
+
 # Some read-only variables
 this = os.path.basename(sys.argv[0])    # name of this script
 
@@ -46,7 +49,7 @@ def error_(src_name, msg, stop=False):
         @param stop program exit flag
     """
     # Print to stdout
-    print("{0}: ERROR : {1}".format(src_name, msg))
+    print(("{0}: ERROR : {1}".format(src_name, msg)))
     # stack trace is not printed
     if stop:
         exit(2)
@@ -60,7 +63,7 @@ def info_(src_name, msg, verbose=True):
         @param verbose prints only if True (prevents caller from performing a check)
     """
     if verbose:
-        print("{0}: INFO : {1}".format(src_name, msg))
+        print(("{0}: INFO : {1}".format(src_name, msg)))
 # ------------------------------------------------------------------------------
 
 def remove_comments_from_sql(sql):
@@ -112,7 +115,8 @@ def run_query(sql, con_args, show_error=True):
     if err:
         if show_error:
             error_("SQL command failed: \nSQL: %s \n%s" % (sql, err), False)
-        if 'password' in err:
+        # XXX py3
+        if 'password' in err.decode():
             raise EnvironmentError
         else:
             raise Exception
@@ -121,6 +125,8 @@ def run_query(sql, con_args, show_error=True):
     results = []  # list of rows
     i = 0
     for line in std.splitlines():
+        # XXX py3
+        line = line.decode()
         if i == 0:
             cols = [name for name in line.split(delimiter)]
         else:
@@ -175,7 +181,7 @@ def get_db_madlib_version(con_args, schema):
 def get_dbver(con_args, portid):
     """ Read version number from database (of form X.Y) """
     try:
-        versionStr = run_query("SELECT pg_catalog.version()", con_args, True)[0]['version']
+        versionStr = run_query("SELECT pg_catalog.version();", con_args, True)[0]['version']
         if portid == 'postgres':
             match = re.search("PostgreSQL[a-zA-Z\s]*(\d+\.\d+)", versionStr)
         elif portid == 'greenplum':
@@ -223,7 +229,7 @@ def is_rev_gte(left, right):
     if all_numeric(left) and all_numeric(right):
         return left >= right
     else:
-        for i, (l_e, r_e) in enumerate(izip_longest(left, right)):
+        for i, (l_e, r_e) in enumerate(zip_longest(left, right)):
             if isinstance(l_e, int) and isinstance(r_e, int):
                 if l_e == r_e:
                     continue
@@ -272,7 +278,7 @@ def get_rev_num(rev):
         num += [0] * (3 - len(num))  # normalize num to be of length 3
         # get identifier part of the version string
         if len(rev_parts) > 1:
-            num.extend(map(str, rev_parts[1:]))
+            num.extend(list(map(str, rev_parts[1:])))
         if not num:
             num = [0]
         return num
diff --git a/src/madpack/yaml/__init__.py b/src/madpack/yaml/__init__.py
index bd233a87..27ead66a 100644
--- a/src/madpack/yaml/__init__.py
+++ b/src/madpack/yaml/__init__.py
@@ -1,15 +1,15 @@
 
-from error import *
+from .error import *
 
-from tokens import *
-from events import *
-from nodes import *
+from .tokens import *
+from .events import *
+from .nodes import *
 
-from loader import *
-from dumper import *
+from .loader import *
+from .dumper import *
 
 try:
-    from cyaml import *
+    from .cyaml import *
 except ImportError:
     pass
 
@@ -91,9 +91,9 @@ def emit(events, stream=None, Dumper=Dumper,
     getvalue = None
     if stream is None:
         try:
-            from cStringIO import StringIO
+            from io import StringIO
         except ImportError:
-            from StringIO import StringIO
+            from io import StringIO
         stream = StringIO()
         getvalue = stream.getvalue
     dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
@@ -115,9 +115,9 @@ def serialize_all(nodes, stream=None, Dumper=Dumper,
     getvalue = None
     if stream is None:
         try:
-            from cStringIO import StringIO
+            from io import StringIO
         except ImportError:
-            from StringIO import StringIO
+            from io import StringIO
         stream = StringIO()
         getvalue = stream.getvalue
     dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
@@ -151,9 +151,9 @@ def dump_all(documents, stream=None, Dumper=Dumper,
     getvalue = None
     if stream is None:
         try:
-            from cStringIO import StringIO
+            from io import StringIO
         except ImportError:
-            from StringIO import StringIO
+            from io import StringIO
         stream = StringIO()
         getvalue = stream.getvalue
     dumper = Dumper(stream, default_style=default_style,
@@ -258,13 +258,11 @@ class YAMLObjectMetaclass(type):
             cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
             cls.yaml_dumper.add_representer(cls, cls.to_yaml)
 
-class YAMLObject(object):
+class YAMLObject(object, metaclass=YAMLObjectMetaclass):
     """
     An object that can dump itself to a YAML stream
     and load itself from a YAML stream.
     """
-
-    __metaclass__ = YAMLObjectMetaclass
     __slots__ = ()  # no direct instantiation, so allow immutable subclasses
 
     yaml_loader = Loader
diff --git a/src/madpack/yaml/composer.py b/src/madpack/yaml/composer.py
index 9f5cd875..dd236a44 100644
--- a/src/madpack/yaml/composer.py
+++ b/src/madpack/yaml/composer.py
@@ -1,9 +1,9 @@
 
 __all__ = ['Composer', 'ComposerError']
 
-from error import MarkedYAMLError
-from events import *
-from nodes import *
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
 
 class ComposerError(MarkedYAMLError):
     pass
@@ -67,7 +67,7 @@ class Composer(object):
     def compose_scalar_node(self, anchor):
         event = self.get_event()
         tag = event.tag
-        if tag is None or tag == u'!':
+        if tag is None or tag == '!':
             tag = self.resolve(ScalarNode, event.value, event.implicit)
         node = ScalarNode(tag, event.value,
                 event.start_mark, event.end_mark, style=event.style)
@@ -78,7 +78,7 @@ class Composer(object):
     def compose_sequence_node(self, anchor):
         start_event = self.get_event()
         tag = start_event.tag
-        if tag is None or tag == u'!':
+        if tag is None or tag == '!':
             tag = self.resolve(SequenceNode, None, start_event.implicit)
         node = SequenceNode(tag, [],
                 start_event.start_mark, None,
@@ -96,7 +96,7 @@ class Composer(object):
     def compose_mapping_node(self, anchor):
         start_event = self.get_event()
         tag = start_event.tag
-        if tag is None or tag == u'!':
+        if tag is None or tag == '!':
             tag = self.resolve(MappingNode, None, start_event.implicit)
         node = MappingNode(tag, [],
                 start_event.start_mark, None,
diff --git a/src/madpack/yaml/constructor.py b/src/madpack/yaml/constructor.py
index a1295c86..1fd5c399 100644
--- a/src/madpack/yaml/constructor.py
+++ b/src/madpack/yaml/constructor.py
@@ -2,8 +2,8 @@
 __all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
     'ConstructorError']
 
-from error import *
-from nodes import *
+from .error import *
+from .nodes import *
 
 import datetime
 
@@ -89,7 +89,7 @@ class BaseConstructor(object):
             data = constructor(self, tag_suffix, node)
         if isinstance(data, types.GeneratorType):
             generator = data
-            data = generator.next()
+            data = next(generator)
             if self.deep_construct:
                 for dummy in generator:
                     pass
@@ -126,7 +126,7 @@ class BaseConstructor(object):
             key = self.construct_object(key_node, deep=deep)
             try:
                 hash(key)
-            except TypeError, exc:
+            except TypeError as exc:
                 raise ConstructorError("while constructing a mapping", node.start_mark,
                         "found unacceptable key (%s)" % exc, key_node.start_mark)
             value = self.construct_object(value_node, deep=deep)
@@ -162,7 +162,7 @@ class SafeConstructor(BaseConstructor):
     def construct_scalar(self, node):
         if isinstance(node, MappingNode):
             for key_node, value_node in node.value:
-                if key_node.tag == u'tag:yaml.org,2002:value':
+                if key_node.tag == 'tag:yaml.org,2002:value':
                     return self.construct_scalar(value_node)
         return BaseConstructor.construct_scalar(self, node)
 
@@ -171,7 +171,7 @@ class SafeConstructor(BaseConstructor):
         index = 0
         while index < len(node.value):
             key_node, value_node = node.value[index]
-            if key_node.tag == u'tag:yaml.org,2002:merge':
+            if key_node.tag == 'tag:yaml.org,2002:merge':
                 del node.value[index]
                 if isinstance(value_node, MappingNode):
                     self.flatten_mapping(value_node)
@@ -193,8 +193,8 @@ class SafeConstructor(BaseConstructor):
                     raise ConstructorError("while constructing a mapping", node.start_mark,
                             "expected a mapping or list of mappings for merging, but found %s"
                             % value_node.id, value_node.start_mark)
-            elif key_node.tag == u'tag:yaml.org,2002:value':
-                key_node.tag = u'tag:yaml.org,2002:str'
+            elif key_node.tag == 'tag:yaml.org,2002:value':
+                key_node.tag = 'tag:yaml.org,2002:str'
                 index += 1
             else:
                 index += 1
@@ -211,12 +211,12 @@ class SafeConstructor(BaseConstructor):
         return None
 
     bool_values = {
-        u'yes':     True,
-        u'no':      False,
-        u'true':    True,
-        u'false':   False,
-        u'on':      True,
-        u'off':     False,
+        'yes':     True,
+        'no':      False,
+        'true':    True,
+        'false':   False,
+        'on':      True,
+        'off':     False,
     }
 
     def construct_yaml_bool(self, node):
@@ -284,12 +284,12 @@ class SafeConstructor(BaseConstructor):
         value = self.construct_scalar(node)
         try:
             return str(value).decode('base64')
-        except (binascii.Error, UnicodeEncodeError), exc:
+        except (binascii.Error, UnicodeEncodeError) as exc:
             raise ConstructorError(None, None,
                     "failed to decode base64 data: %s" % exc, node.start_mark) 
 
     timestamp_regexp = re.compile(
-            ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+            r'''^(?P<year>[0-9][0-9][0-9][0-9])
                 -(?P<month>[0-9][0-9]?)
                 -(?P<day>[0-9][0-9]?)
                 (?:(?:[Tt]|[ \t]+)
@@ -410,51 +410,51 @@ class SafeConstructor(BaseConstructor):
                 node.start_mark)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:null',
+        'tag:yaml.org,2002:null',
         SafeConstructor.construct_yaml_null)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:bool',
+        'tag:yaml.org,2002:bool',
         SafeConstructor.construct_yaml_bool)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:int',
+        'tag:yaml.org,2002:int',
         SafeConstructor.construct_yaml_int)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:float',
+        'tag:yaml.org,2002:float',
         SafeConstructor.construct_yaml_float)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:binary',
+        'tag:yaml.org,2002:binary',
         SafeConstructor.construct_yaml_binary)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:timestamp',
+        'tag:yaml.org,2002:timestamp',
         SafeConstructor.construct_yaml_timestamp)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:omap',
+        'tag:yaml.org,2002:omap',
         SafeConstructor.construct_yaml_omap)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:pairs',
+        'tag:yaml.org,2002:pairs',
         SafeConstructor.construct_yaml_pairs)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:set',
+        'tag:yaml.org,2002:set',
         SafeConstructor.construct_yaml_set)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:str',
+        'tag:yaml.org,2002:str',
         SafeConstructor.construct_yaml_str)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:seq',
+        'tag:yaml.org,2002:seq',
         SafeConstructor.construct_yaml_seq)
 
 SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:map',
+        'tag:yaml.org,2002:map',
         SafeConstructor.construct_yaml_map)
 
 SafeConstructor.add_constructor(None,
@@ -469,7 +469,7 @@ class Constructor(SafeConstructor):
         return self.construct_scalar(node)
 
     def construct_python_long(self, node):
-        return long(self.construct_yaml_int(node))
+        return int(self.construct_yaml_int(node))
 
     def construct_python_complex(self, node):
        return complex(self.construct_scalar(node))
@@ -483,7 +483,7 @@ class Constructor(SafeConstructor):
                     "expected non-empty name appended to the tag", mark)
         try:
             __import__(name)
-        except ImportError, exc:
+        except ImportError as exc:
             raise ConstructorError("while constructing a Python module", mark,
                     "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
         return sys.modules[name]
@@ -492,7 +492,7 @@ class Constructor(SafeConstructor):
         if not name:
             raise ConstructorError("while constructing a Python object", mark,
                     "expected non-empty name appended to the tag", mark)
-        if u'.' in name:
+        if '.' in name:
             # Python 2.4 only
             #module_name, object_name = name.rsplit('.', 1)
             items = name.split('.')
@@ -503,7 +503,7 @@ class Constructor(SafeConstructor):
             object_name = name
         try:
             __import__(module_name)
-        except ImportError, exc:
+        except ImportError as exc:
             raise ConstructorError("while constructing a Python object", mark,
                     "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
         module = sys.modules[module_name]
@@ -559,7 +559,7 @@ class Constructor(SafeConstructor):
                 instance.__dict__.update(state)
             elif state:
                 slotstate.update(state)
-            for key, value in slotstate.items():
+            for key, value in list(slotstate.items()):
                 setattr(object, key, value)
 
     def construct_python_object(self, suffix, node):
@@ -610,66 +610,66 @@ class Constructor(SafeConstructor):
         return self.construct_python_object_apply(suffix, node, newobj=True)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/none',
+    'tag:yaml.org,2002:python/none',
     Constructor.construct_yaml_null)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/bool',
+    'tag:yaml.org,2002:python/bool',
     Constructor.construct_yaml_bool)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/str',
+    'tag:yaml.org,2002:python/str',
     Constructor.construct_python_str)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/unicode',
+    'tag:yaml.org,2002:python/unicode',
     Constructor.construct_python_unicode)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/int',
+    'tag:yaml.org,2002:python/int',
     Constructor.construct_yaml_int)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/long',
+    'tag:yaml.org,2002:python/long',
     Constructor.construct_python_long)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/float',
+    'tag:yaml.org,2002:python/float',
     Constructor.construct_yaml_float)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/complex',
+    'tag:yaml.org,2002:python/complex',
     Constructor.construct_python_complex)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/list',
+    'tag:yaml.org,2002:python/list',
     Constructor.construct_yaml_seq)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/tuple',
+    'tag:yaml.org,2002:python/tuple',
     Constructor.construct_python_tuple)
 
 Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/dict',
+    'tag:yaml.org,2002:python/dict',
     Constructor.construct_yaml_map)
 
 Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/name:',
+    'tag:yaml.org,2002:python/name:',
     Constructor.construct_python_name)
 
 Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/module:',
+    'tag:yaml.org,2002:python/module:',
     Constructor.construct_python_module)
 
 Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/object:',
+    'tag:yaml.org,2002:python/object:',
     Constructor.construct_python_object)
 
 Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/object/apply:',
+    'tag:yaml.org,2002:python/object/apply:',
     Constructor.construct_python_object_apply)
 
 Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/object/new:',
+    'tag:yaml.org,2002:python/object/new:',
     Constructor.construct_python_object_new)
 
diff --git a/src/madpack/yaml/cyaml.py b/src/madpack/yaml/cyaml.py
index 14acb07a..cc59e3e7 100644
--- a/src/madpack/yaml/cyaml.py
+++ b/src/madpack/yaml/cyaml.py
@@ -4,12 +4,12 @@ __all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
 
 from _yaml import CParser, CEmitter
 
-from constructor import *
+from .constructor import *
 
-from serializer import *
-from representer import *
+from .serializer import *
+from .representer import *
 
-from resolver import *
+from .resolver import *
 
 class CBaseLoader(CParser, BaseConstructor, BaseResolver):
 
diff --git a/src/madpack/yaml/dumper.py b/src/madpack/yaml/dumper.py
index 355c1e2f..f1b50254 100644
--- a/src/madpack/yaml/dumper.py
+++ b/src/madpack/yaml/dumper.py
@@ -1,10 +1,10 @@
 
 __all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
 
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
 
 class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
 
diff --git a/src/madpack/yaml/emitter.py b/src/madpack/yaml/emitter.py
index d9d1bf81..7aefca14 100644
--- a/src/madpack/yaml/emitter.py
+++ b/src/madpack/yaml/emitter.py
@@ -8,8 +8,8 @@
 
 __all__ = ['Emitter', 'EmitterError']
 
-from error import YAMLError
-from events import *
+from .error import YAMLError
+from .events import *
 
 import re
 
@@ -33,8 +33,8 @@ class ScalarAnalysis(object):
 class Emitter(object):
 
     DEFAULT_TAG_PREFIXES = {
-        u'!' : u'!',
-        u'tag:yaml.org,2002:' : u'!!',
+        '!' : '!',
+        'tag:yaml.org,2002:' : '!!',
     }
 
     def __init__(self, stream, canonical=None, indent=None, width=None,
@@ -87,8 +87,8 @@ class Emitter(object):
         self.best_width = 80
         if width and width > self.best_indent*2:
             self.best_width = width
-        self.best_line_break = u'\n'
-        if line_break in [u'\r', u'\n', u'\r\n']:
+        self.best_line_break = '\n'
+        if line_break in ['\r', '\n', '\r\n']:
             self.best_line_break = line_break
 
         # Tag prefixes.
@@ -176,7 +176,7 @@ class Emitter(object):
                 self.write_version_directive(version_text)
             self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
             if self.event.tags:
-                handles = self.event.tags.keys()
+                handles = list(self.event.tags.keys())
                 handles.sort()
                 for handle in handles:
                     prefix = self.event.tags[handle]
@@ -189,7 +189,7 @@ class Emitter(object):
                     and not self.check_empty_document())
             if not implicit:
                 self.write_indent()
-                self.write_indicator(u'---', True)
+                self.write_indicator('---', True)
                 if self.canonical:
                     self.write_indent()
             self.state = self.expect_document_root
@@ -204,7 +204,7 @@ class Emitter(object):
         if isinstance(self.event, DocumentEndEvent):
             self.write_indent()
             if self.event.explicit:
-                self.write_indicator(u'...', True)
+                self.write_indicator('...', True)
                 self.write_indent()
             self.flush_stream()
             self.state = self.expect_document_start
@@ -227,7 +227,7 @@ class Emitter(object):
         if isinstance(self.event, AliasEvent):
             self.expect_alias()
         elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
-            self.process_anchor(u'&')
+            self.process_anchor('&')
             self.process_tag()
             if isinstance(self.event, ScalarEvent):
                 self.expect_scalar()
@@ -249,7 +249,7 @@ class Emitter(object):
     def expect_alias(self):
         if self.event.anchor is None:
             raise EmitterError("anchor is not specified for alias")
-        self.process_anchor(u'*')
+        self.process_anchor('*')
         self.state = self.states.pop()
 
     def expect_scalar(self):
@@ -261,7 +261,7 @@ class Emitter(object):
     # Flow sequence handlers.
 
     def expect_flow_sequence(self):
-        self.write_indicator(u'[', True, whitespace=True)
+        self.write_indicator('[', True, whitespace=True)
         self.flow_level += 1
         self.increase_indent(flow=True)
         self.state = self.expect_first_flow_sequence_item
@@ -270,7 +270,7 @@ class Emitter(object):
         if isinstance(self.event, SequenceEndEvent):
             self.indent = self.indents.pop()
             self.flow_level -= 1
-            self.write_indicator(u']', False)
+            self.write_indicator(']', False)
             self.state = self.states.pop()
         else:
             if self.canonical or self.column > self.best_width:
@@ -283,12 +283,12 @@ class Emitter(object):
             self.indent = self.indents.pop()
             self.flow_level -= 1
             if self.canonical:
-                self.write_indicator(u',', False)
+                self.write_indicator(',', False)
                 self.write_indent()
-            self.write_indicator(u']', False)
+            self.write_indicator(']', False)
             self.state = self.states.pop()
         else:
-            self.write_indicator(u',', False)
+            self.write_indicator(',', False)
             if self.canonical or self.column > self.best_width:
                 self.write_indent()
             self.states.append(self.expect_flow_sequence_item)
@@ -297,7 +297,7 @@ class Emitter(object):
     # Flow mapping handlers.
 
     def expect_flow_mapping(self):
-        self.write_indicator(u'{', True, whitespace=True)
+        self.write_indicator('{', True, whitespace=True)
         self.flow_level += 1
         self.increase_indent(flow=True)
         self.state = self.expect_first_flow_mapping_key
@@ -306,7 +306,7 @@ class Emitter(object):
         if isinstance(self.event, MappingEndEvent):
             self.indent = self.indents.pop()
             self.flow_level -= 1
-            self.write_indicator(u'}', False)
+            self.write_indicator('}', False)
             self.state = self.states.pop()
         else:
             if self.canonical or self.column > self.best_width:
@@ -315,7 +315,7 @@ class Emitter(object):
                 self.states.append(self.expect_flow_mapping_simple_value)
                 self.expect_node(mapping=True, simple_key=True)
             else:
-                self.write_indicator(u'?', True)
+                self.write_indicator('?', True)
                 self.states.append(self.expect_flow_mapping_value)
                 self.expect_node(mapping=True)
 
@@ -324,31 +324,31 @@ class Emitter(object):
             self.indent = self.indents.pop()
             self.flow_level -= 1
             if self.canonical:
-                self.write_indicator(u',', False)
+                self.write_indicator(',', False)
                 self.write_indent()
-            self.write_indicator(u'}', False)
+            self.write_indicator('}', False)
             self.state = self.states.pop()
         else:
-            self.write_indicator(u',', False)
+            self.write_indicator(',', False)
             if self.canonical or self.column > self.best_width:
                 self.write_indent()
             if not self.canonical and self.check_simple_key():
                 self.states.append(self.expect_flow_mapping_simple_value)
                 self.expect_node(mapping=True, simple_key=True)
             else:
-                self.write_indicator(u'?', True)
+                self.write_indicator('?', True)
                 self.states.append(self.expect_flow_mapping_value)
                 self.expect_node(mapping=True)
 
     def expect_flow_mapping_simple_value(self):
-        self.write_indicator(u':', False)
+        self.write_indicator(':', False)
         self.states.append(self.expect_flow_mapping_key)
         self.expect_node(mapping=True)
 
     def expect_flow_mapping_value(self):
         if self.canonical or self.column > self.best_width:
             self.write_indent()
-        self.write_indicator(u':', True)
+        self.write_indicator(':', True)
         self.states.append(self.expect_flow_mapping_key)
         self.expect_node(mapping=True)
 
@@ -368,7 +368,7 @@ class Emitter(object):
             self.state = self.states.pop()
         else:
             self.write_indent()
-            self.write_indicator(u'-', True, indention=True)
+            self.write_indicator('-', True, indention=True)
             self.states.append(self.expect_block_sequence_item)
             self.expect_node(sequence=True)
 
@@ -391,18 +391,18 @@ class Emitter(object):
                 self.states.append(self.expect_block_mapping_simple_value)
                 self.expect_node(mapping=True, simple_key=True)
             else:
-                self.write_indicator(u'?', True, indention=True)
+                self.write_indicator('?', True, indention=True)
                 self.states.append(self.expect_block_mapping_value)
                 self.expect_node(mapping=True)
 
     def expect_block_mapping_simple_value(self):
-        self.write_indicator(u':', False)
+        self.write_indicator(':', False)
         self.states.append(self.expect_block_mapping_key)
         self.expect_node(mapping=True)
 
     def expect_block_mapping_value(self):
         self.write_indent()
-        self.write_indicator(u':', True, indention=True)
+        self.write_indicator(':', True, indention=True)
         self.states.append(self.expect_block_mapping_key)
         self.expect_node(mapping=True)
 
@@ -421,7 +421,7 @@ class Emitter(object):
             return False
         event = self.events[0]
         return (isinstance(event, ScalarEvent) and event.anchor is None
-                and event.tag is None and event.implicit and event.value == u'')
+                and event.tag is None and event.implicit and event.value == '')
 
     def check_simple_key(self):
         length = 0
@@ -466,7 +466,7 @@ class Emitter(object):
                 self.prepared_tag = None
                 return
             if self.event.implicit[0] and tag is None:
-                tag = u'!'
+                tag = '!'
                 self.prepared_tag = None
         else:
             if (not self.canonical or tag is None) and self.event.implicit:
@@ -529,17 +529,17 @@ class Emitter(object):
         major, minor = version
         if major != 1:
             raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
-        return u'%d.%d' % (major, minor)
+        return '%d.%d' % (major, minor)
 
     def prepare_tag_handle(self, handle):
         if not handle:
             raise EmitterError("tag handle must not be empty")
-        if handle[0] != u'!' or handle[-1] != u'!':
+        if handle[0] != '!' or handle[-1] != '!':
             raise EmitterError("tag handle must start and end with '!': %r"
                     % (handle.encode('utf-8')))
         for ch in handle[1:-1]:
-            if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'    \
-                    or ch in u'-_'):
+            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
+                    or ch in '-_'):
                 raise EmitterError("invalid character %r in the tag handle: %r"
                         % (ch.encode('utf-8'), handle.encode('utf-8')))
         return handle
@@ -549,12 +549,12 @@ class Emitter(object):
             raise EmitterError("tag prefix must not be empty")
         chunks = []
         start = end = 0
-        if prefix[0] == u'!':
+        if prefix[0] == '!':
             end = 1
         while end < len(prefix):
             ch = prefix[end]
-            if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
-                    or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                    or ch in '-;/?!:@&=+$,_.~*\'()[]':
                 end += 1
             else:
                 if start < end:
@@ -562,30 +562,30 @@ class Emitter(object):
                 start = end = end+1
                 data = ch.encode('utf-8')
                 for ch in data:
-                    chunks.append(u'%%%02X' % ord(ch))
+                    chunks.append('%%%02X' % ord(ch))
         if start < end:
             chunks.append(prefix[start:end])
-        return u''.join(chunks)
+        return ''.join(chunks)
 
     def prepare_tag(self, tag):
         if not tag:
             raise EmitterError("tag must not be empty")
-        if tag == u'!':
+        if tag == '!':
             return tag
         handle = None
         suffix = tag
         for prefix in self.tag_prefixes:
             if tag.startswith(prefix)   \
-                    and (prefix == u'!' or len(prefix) < len(tag)):
+                    and (prefix == '!' or len(prefix) < len(tag)):
                 handle = self.tag_prefixes[prefix]
                 suffix = tag[len(prefix):]
         chunks = []
         start = end = 0
         while end < len(suffix):
             ch = suffix[end]
-            if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
-                    or ch in u'-;/?:@&=+$,_.~*\'()[]'   \
-                    or (ch == u'!' and handle != u'!'):
+            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                    or ch in '-;/?:@&=+$,_.~*\'()[]'   \
+                    or (ch == '!' and handle != '!'):
                 end += 1
             else:
                 if start < end:
@@ -593,21 +593,21 @@ class Emitter(object):
                 start = end = end+1
                 data = ch.encode('utf-8')
                 for ch in data:
-                    chunks.append(u'%%%02X' % ord(ch))
+                    chunks.append('%%%02X' % ord(ch))
         if start < end:
             chunks.append(suffix[start:end])
-        suffix_text = u''.join(chunks)
+        suffix_text = ''.join(chunks)
         if handle:
-            return u'%s%s' % (handle, suffix_text)
+            return '%s%s' % (handle, suffix_text)
         else:
-            return u'!<%s>' % suffix_text
+            return '!<%s>' % suffix_text
 
     def prepare_anchor(self, anchor):
         if not anchor:
             raise EmitterError("anchor must not be empty")
         for ch in anchor:
-            if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'    \
-                    or ch in u'-_'):
+            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
+                    or ch in '-_'):
                 raise EmitterError("invalid character %r in the anchor: %r"
                         % (ch.encode('utf-8'), anchor.encode('utf-8')))
         return anchor
@@ -638,7 +638,7 @@ class Emitter(object):
         mixed_breaks_spaces = False    # anything else
 
         # Check document indicators.
-        if scalar.startswith(u'---') or scalar.startswith(u'...'):
+        if scalar.startswith('---') or scalar.startswith('...'):
             block_indicators = True
             flow_indicators = True
 
@@ -647,7 +647,7 @@ class Emitter(object):
 
         # Last character or followed by a whitespace.
         followed_by_space = (len(scalar) == 1 or
-                scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+                scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
 
         # The current series of whitespaces contain plain spaces.
         spaces = False
@@ -671,35 +671,35 @@ class Emitter(object):
 
             if index == 0:
                 # Leading indicators are special characters.
-                if ch in u'#,[]{}&*!|>\'\"%@`': 
+                if ch in '#,[]{}&*!|>\'\"%@`': 
                     flow_indicators = True
                     block_indicators = True
-                if ch in u'?:':
+                if ch in '?:':
                     flow_indicators = True
                     if followed_by_space:
                         block_indicators = True
-                if ch == u'-' and followed_by_space:
+                if ch == '-' and followed_by_space:
                     flow_indicators = True
                     block_indicators = True
             else:
                 # Some indicators cannot appear within a scalar as well.
-                if ch in u',?[]{}':
+                if ch in ',?[]{}':
                     flow_indicators = True
-                if ch == u':':
+                if ch == ':':
                     flow_indicators = True
                     if followed_by_space:
                         block_indicators = True
-                if ch == u'#' and preceeded_by_space:
+                if ch == '#' and preceeded_by_space:
                     flow_indicators = True
                     block_indicators = True
 
             # Check for line breaks, special, and unicode characters.
 
-            if ch in u'\n\x85\u2028\u2029':
+            if ch in '\n\x85\u2028\u2029':
                 line_breaks = True
-            if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
-                if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
-                        or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+            if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+                if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+                        or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
                     unicode_characters = True
                     if not self.allow_unicode:
                         special_characters = True
@@ -709,20 +709,20 @@ class Emitter(object):
             # Spaces, line breaks, and how they are mixed. State machine.
 
             # Start or continue series of whitespaces.
-            if ch in u' \n\x85\u2028\u2029':
+            if ch in ' \n\x85\u2028\u2029':
                 if spaces and breaks:
-                    if ch != u' ':      # break+ (space+ break+)    => mixed
+                    if ch != ' ':      # break+ (space+ break+)    => mixed
                         mixed = True
                 elif spaces:
-                    if ch != u' ':      # (space+ break+)   => mixed
+                    if ch != ' ':      # (space+ break+)   => mixed
                         breaks = True
                         mixed = True
                 elif breaks:
-                    if ch == u' ':      # break+ space+
+                    if ch == ' ':      # break+ space+
                         spaces = True
                 else:
                     leading = (index == 0)
-                    if ch == u' ':      # space+
+                    if ch == ' ':      # space+
                         spaces = True
                     else:               # break+
                         breaks = True
@@ -763,9 +763,9 @@ class Emitter(object):
 
             # Prepare for the next character.
             index += 1
-            preceeded_by_space = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+            preceeded_by_space = (ch in '\0 \t\r\n\x85\u2028\u2029')
             followed_by_space = (index+1 >= len(scalar) or
-                    scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+                    scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
 
         # Let's decide what styles are allowed.
         allow_flow_plain = True
@@ -824,7 +824,7 @@ class Emitter(object):
     def write_stream_start(self):
         # Write BOM if needed.
         if self.encoding and self.encoding.startswith('utf-16'):
-            self.stream.write(u'\xFF\xFE'.encode(self.encoding))
+            self.stream.write('\xFF\xFE'.encode(self.encoding))
 
     def write_stream_end(self):
         self.flush_stream()
@@ -834,7 +834,7 @@ class Emitter(object):
         if self.whitespace or not need_whitespace:
             data = indicator
         else:
-            data = u' '+indicator
+            data = ' '+indicator
         self.whitespace = whitespace
         self.indention = self.indention and indention
         self.column += len(data)
@@ -849,7 +849,7 @@ class Emitter(object):
             self.write_line_break()
         if self.column < indent:
             self.whitespace = True
-            data = u' '*(indent-self.column)
+            data = ' '*(indent-self.column)
             self.column = indent
             if self.encoding:
                 data = data.encode(self.encoding)
@@ -867,14 +867,14 @@ class Emitter(object):
         self.stream.write(data)
 
     def write_version_directive(self, version_text):
-        data = u'%%YAML %s' % version_text
+        data = '%%YAML %s' % version_text
         if self.encoding:
             data = data.encode(self.encoding)
         self.stream.write(data)
         self.write_line_break()
 
     def write_tag_directive(self, handle_text, prefix_text):
-        data = u'%%TAG %s %s' % (handle_text, prefix_text)
+        data = '%%TAG %s %s' % (handle_text, prefix_text)
         if self.encoding:
             data = data.encode(self.encoding)
         self.stream.write(data)
@@ -883,7 +883,7 @@ class Emitter(object):
     # Scalar streams.
 
     def write_single_quoted(self, text, split=True):
-        self.write_indicator(u'\'', True)
+        self.write_indicator('\'', True)
         spaces = False
         breaks = False
         start = end = 0
@@ -892,7 +892,7 @@ class Emitter(object):
             if end < len(text):
                 ch = text[end]
             if spaces:
-                if ch is None or ch != u' ':
+                if ch is None or ch != ' ':
                     if start+1 == end and self.column > self.best_width and split   \
                             and start != 0 and end != len(text):
                         self.write_indent()
@@ -904,18 +904,18 @@ class Emitter(object):
                         self.stream.write(data)
                     start = end
             elif breaks:
-                if ch is None or ch not in u'\n\x85\u2028\u2029':
-                    if text[start] == u'\n':
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    if text[start] == '\n':
                         self.write_line_break()
                     for br in text[start:end]:
-                        if br == u'\n':
+                        if br == '\n':
                             self.write_line_break()
                         else:
                             self.write_line_break(br)
                     self.write_indent()
                     start = end
             else:
-                if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+                if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
                     if start < end:
                         data = text[start:end]
                         self.column += len(data)
@@ -923,49 +923,49 @@ class Emitter(object):
                             data = data.encode(self.encoding)
                         self.stream.write(data)
                         start = end
-            if ch == u'\'':
-                data = u'\'\''
+            if ch == '\'':
+                data = '\'\''
                 self.column += 2
                 if self.encoding:
                     data = data.encode(self.encoding)
                 self.stream.write(data)
                 start = end + 1
             if ch is not None:
-                spaces = (ch == u' ')
-                breaks = (ch in u'\n\x85\u2028\u2029')
+                spaces = (ch == ' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
             end += 1
-        self.write_indicator(u'\'', False)
+        self.write_indicator('\'', False)
 
     ESCAPE_REPLACEMENTS = {
-        u'\0':      u'0',
-        u'\x07':    u'a',
-        u'\x08':    u'b',
-        u'\x09':    u't',
-        u'\x0A':    u'n',
-        u'\x0B':    u'v',
-        u'\x0C':    u'f',
-        u'\x0D':    u'r',
-        u'\x1B':    u'e',
-        u'\"':      u'\"',
-        u'\\':      u'\\',
-        u'\x85':    u'N',
-        u'\xA0':    u'_',
-        u'\u2028':  u'L',
-        u'\u2029':  u'P',
+        '\0':      '0',
+        '\x07':    'a',
+        '\x08':    'b',
+        '\x09':    't',
+        '\x0A':    'n',
+        '\x0B':    'v',
+        '\x0C':    'f',
+        '\x0D':    'r',
+        '\x1B':    'e',
+        '\"':      '\"',
+        '\\':      '\\',
+        '\x85':    'N',
+        '\xA0':    '_',
+        '\u2028':  'L',
+        '\u2029':  'P',
     }
 
     def write_double_quoted(self, text, split=True):
-        self.write_indicator(u'"', True)
+        self.write_indicator('"', True)
         start = end = 0
         while end <= len(text):
             ch = None
             if end < len(text):
                 ch = text[end]
-            if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
-                    or not (u'\x20' <= ch <= u'\x7E'
+            if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+                    or not ('\x20' <= ch <= '\x7E'
                         or (self.allow_unicode
-                            and (u'\xA0' <= ch <= u'\uD7FF'
-                                or u'\uE000' <= ch <= u'\uFFFD'))):
+                            and ('\xA0' <= ch <= '\uD7FF'
+                                or '\uE000' <= ch <= '\uFFFD'))):
                 if start < end:
                     data = text[start:end]
                     self.column += len(data)
@@ -975,21 +975,21 @@ class Emitter(object):
                     start = end
                 if ch is not None:
                     if ch in self.ESCAPE_REPLACEMENTS:
-                        data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
-                    elif ch <= u'\xFF':
-                        data = u'\\x%02X' % ord(ch)
-                    elif ch <= u'\uFFFF':
-                        data = u'\\u%04X' % ord(ch)
+                        data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+                    elif ch <= '\xFF':
+                        data = '\\x%02X' % ord(ch)
+                    elif ch <= '\uFFFF':
+                        data = '\\u%04X' % ord(ch)
                     else:
-                        data = u'\\U%08X' % ord(ch)
+                        data = '\\U%08X' % ord(ch)
                     self.column += len(data)
                     if self.encoding:
                         data = data.encode(self.encoding)
                     self.stream.write(data)
                     start = end+1
-            if 0 < end < len(text)-1 and (ch == u' ' or start >= end)   \
+            if 0 < end < len(text)-1 and (ch == ' ' or start >= end)   \
                     and self.column+(end-start) > self.best_width and split:
-                data = text[start:end]+u'\\'
+                data = text[start:end]+'\\'
                 if start < end:
                     start = end
                 self.column += len(data)
@@ -999,30 +999,30 @@ class Emitter(object):
                 self.write_indent()
                 self.whitespace = False
                 self.indention = False
-                if text[start] == u' ':
-                    data = u'\\'
+                if text[start] == ' ':
+                    data = '\\'
                     self.column += len(data)
                     if self.encoding:
                         data = data.encode(self.encoding)
                     self.stream.write(data)
             end += 1
-        self.write_indicator(u'"', False)
+        self.write_indicator('"', False)
 
     def determine_chomp(self, text):
         tail = text[-2:]
         while len(tail) < 2:
-            tail = u' '+tail
-        if tail[-1] in u'\n\x85\u2028\u2029':
-            if tail[-2] in u'\n\x85\u2028\u2029':
-                return u'+'
+            tail = ' '+tail
+        if tail[-1] in '\n\x85\u2028\u2029':
+            if tail[-2] in '\n\x85\u2028\u2029':
+                return '+'
             else:
-                return u''
+                return ''
         else:
-            return u'-'
+            return '-'
 
     def write_folded(self, text):
         chomp = self.determine_chomp(text)
-        self.write_indicator(u'>'+chomp, True)
+        self.write_indicator('>'+chomp, True)
         self.write_indent()
         leading_space = False
         spaces = False
@@ -1033,13 +1033,13 @@ class Emitter(object):
             if end < len(text):
                 ch = text[end]
             if breaks:
-                if ch is None or ch not in u'\n\x85\u2028\u2029':
-                    if not leading_space and ch is not None and ch != u' '  \
-                            and text[start] == u'\n':
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    if not leading_space and ch is not None and ch != ' '  \
+                            and text[start] == '\n':
                         self.write_line_break()
-                    leading_space = (ch == u' ')
+                    leading_space = (ch == ' ')
                     for br in text[start:end]:
-                        if br == u'\n':
+                        if br == '\n':
                             self.write_line_break()
                         else:
                             self.write_line_break(br)
@@ -1047,7 +1047,7 @@ class Emitter(object):
                         self.write_indent()
                     start = end
             elif spaces:
-                if ch != u' ':
+                if ch != ' ':
                     if start+1 == end and self.column > self.best_width:
                         self.write_indent()
                     else:
@@ -1058,7 +1058,7 @@ class Emitter(object):
                         self.stream.write(data)
                     start = end
             else:
-                if ch is None or ch in u' \n\x85\u2028\u2029':
+                if ch is None or ch in ' \n\x85\u2028\u2029':
                     data = text[start:end]
                     if self.encoding:
                         data = data.encode(self.encoding)
@@ -1067,13 +1067,13 @@ class Emitter(object):
                         self.write_line_break()
                     start = end
             if ch is not None:
-                breaks = (ch in u'\n\x85\u2028\u2029')
-                spaces = (ch == u' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
+                spaces = (ch == ' ')
             end += 1
 
     def write_literal(self, text):
         chomp = self.determine_chomp(text)
-        self.write_indicator(u'|'+chomp, True)
+        self.write_indicator('|'+chomp, True)
         self.write_indent()
         breaks = False
         start = end = 0
@@ -1082,9 +1082,9 @@ class Emitter(object):
             if end < len(text):
                 ch = text[end]
             if breaks:
-                if ch is None or ch not in u'\n\x85\u2028\u2029':
+                if ch is None or ch not in '\n\x85\u2028\u2029':
                     for br in text[start:end]:
-                        if br == u'\n':
+                        if br == '\n':
                             self.write_line_break()
                         else:
                             self.write_line_break(br)
@@ -1092,7 +1092,7 @@ class Emitter(object):
                         self.write_indent()
                     start = end
             else:
-                if ch is None or ch in u'\n\x85\u2028\u2029':
+                if ch is None or ch in '\n\x85\u2028\u2029':
                     data = text[start:end]
                     if self.encoding:
                         data = data.encode(self.encoding)
@@ -1101,14 +1101,14 @@ class Emitter(object):
                         self.write_line_break()
                     start = end
             if ch is not None:
-                breaks = (ch in u'\n\x85\u2028\u2029')
+                breaks = (ch in '\n\x85\u2028\u2029')
             end += 1
 
     def write_plain(self, text, split=True):
         if not text:
             return
         if not self.whitespace:
-            data = u' '
+            data = ' '
             self.column += len(data)
             if self.encoding:
                 data = data.encode(self.encoding)
@@ -1123,7 +1123,7 @@ class Emitter(object):
             if end < len(text):
                 ch = text[end]
             if spaces:
-                if ch != u' ':
+                if ch != ' ':
                     if start+1 == end and self.column > self.best_width and split:
                         self.write_indent()
                         self.writespace = False
@@ -1136,11 +1136,11 @@ class Emitter(object):
                         self.stream.write(data)
                     start = end
             elif breaks:
-                if ch not in u'\n\x85\u2028\u2029':
-                    if text[start] == u'\n':
+                if ch not in '\n\x85\u2028\u2029':
+                    if text[start] == '\n':
                         self.write_line_break()
                     for br in text[start:end]:
-                        if br == u'\n':
+                        if br == '\n':
                             self.write_line_break()
                         else:
                             self.write_line_break(br)
@@ -1149,7 +1149,7 @@ class Emitter(object):
                     self.indention = False
                     start = end
             else:
-                if ch is None or ch in u' \n\x85\u2028\u2029':
+                if ch is None or ch in ' \n\x85\u2028\u2029':
                     data = text[start:end]
                     self.column += len(data)
                     if self.encoding:
@@ -1157,7 +1157,7 @@ class Emitter(object):
                     self.stream.write(data)
                     start = end
             if ch is not None:
-                spaces = (ch == u' ')
-                breaks = (ch in u'\n\x85\u2028\u2029')
+                spaces = (ch == ' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
             end += 1
 
diff --git a/src/madpack/yaml/error.py b/src/madpack/yaml/error.py
index 577686db..e68d5f45 100644
--- a/src/madpack/yaml/error.py
+++ b/src/madpack/yaml/error.py
@@ -16,7 +16,7 @@ class Mark(object):
             return None
         head = ''
         start = self.pointer
-        while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+        while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
             start -= 1
             if self.pointer-start > max_length/2-1:
                 head = ' ... '
@@ -24,7 +24,7 @@ class Mark(object):
                 break
         tail = ''
         end = self.pointer
-        while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+        while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
             end += 1
             if end-self.pointer > max_length/2-1:
                 tail = ' ... '
diff --git a/src/madpack/yaml/loader.py b/src/madpack/yaml/loader.py
index 293ff467..08c8f01b 100644
--- a/src/madpack/yaml/loader.py
+++ b/src/madpack/yaml/loader.py
@@ -1,12 +1,12 @@
 
 __all__ = ['BaseLoader', 'SafeLoader', 'Loader']
 
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
 
 class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
 
diff --git a/src/madpack/yaml/parser.py b/src/madpack/yaml/parser.py
index a46bb9e9..9074710d 100644
--- a/src/madpack/yaml/parser.py
+++ b/src/madpack/yaml/parser.py
@@ -61,10 +61,10 @@
 
 __all__ = ['Parser', 'ParserError']
 
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
 
 class ParserError(MarkedYAMLError):
     pass
@@ -76,8 +76,8 @@ class Parser(object):
     # language, you may replace all 'yield'-s with event handler calls.
 
     DEFAULT_TAGS = {
-        u'!':   u'!',
-        u'!!':  u'tag:yaml.org,2002:',
+        '!':   '!',
+        '!!':  'tag:yaml.org,2002:',
     }
 
     def __init__(self):
@@ -216,7 +216,7 @@ class Parser(object):
         self.tag_handles = {}
         while self.check_token(DirectiveToken):
             token = self.get_token()
-            if token.name == u'YAML':
+            if token.name == 'YAML':
                 if self.yaml_version is not None:
                     raise ParserError(None, None,
                             "found duplicate YAML directive", token.start_mark)
@@ -226,7 +226,7 @@ class Parser(object):
                             "found incompatible YAML document (version 1.* is required)",
                             token.start_mark)
                 self.yaml_version = token.value
-            elif token.name == u'TAG':
+            elif token.name == 'TAG':
                 handle, prefix = token.value
                 if handle in self.tag_handles:
                     raise ParserError(None, None,
@@ -312,7 +312,7 @@ class Parser(object):
             if start_mark is None:
                 start_mark = end_mark = self.peek_token().start_mark
             event = None
-            implicit = (tag is None or tag == u'!')
+            implicit = (tag is None or tag == '!')
             if indentless_sequence and self.check_token(BlockEntryToken):
                 end_mark = self.peek_token().end_mark
                 event = SequenceStartEvent(anchor, tag, implicit,
@@ -322,7 +322,7 @@ class Parser(object):
                 if self.check_token(ScalarToken):
                     token = self.get_token()
                     end_mark = token.end_mark
-                    if (token.plain and tag is None) or tag == u'!':
+                    if (token.plain and tag is None) or tag == '!':
                         implicit = (True, False)
                     elif tag is None:
                         implicit = (False, True)
@@ -354,7 +354,7 @@ class Parser(object):
                 elif anchor is not None or tag is not None:
                     # Empty scalars are allowed even if a tag or an anchor is
                     # specified.
-                    event = ScalarEvent(anchor, tag, (implicit, False), u'',
+                    event = ScalarEvent(anchor, tag, (implicit, False), '',
                             start_mark, end_mark)
                     self.state = self.states.pop()
                 else:
@@ -582,5 +582,5 @@ class Parser(object):
         return self.process_empty_scalar(self.peek_token().start_mark)
 
     def process_empty_scalar(self, mark):
-        return ScalarEvent(None, None, (True, False), u'', mark, mark)
+        return ScalarEvent(None, None, (True, False), '', mark, mark)
 
diff --git a/src/madpack/yaml/reader.py b/src/madpack/yaml/reader.py
index 1d4667cc..c99fd2cd 100644
--- a/src/madpack/yaml/reader.py
+++ b/src/madpack/yaml/reader.py
@@ -17,7 +17,7 @@
 
 __all__ = ['Reader', 'ReaderError']
 
-from error import YAMLError, Mark
+from .error import YAMLError, Mark
 
 import codecs, re
 
@@ -96,7 +96,7 @@ class Reader(object):
         self.stream = None
         self.stream_pointer = 0
         self.eof = True
-        self.buffer = u''
+        self.buffer = ''
         self.pointer = 0
         self.raw_buffer = None
         self.raw_decode = None
@@ -104,10 +104,10 @@ class Reader(object):
         self.index = 0
         self.line = 0
         self.column = 0
-        if isinstance(stream, unicode):
+        if isinstance(stream, str):
             self.name = "<unicode string>"
             self.check_printable(stream)
-            self.buffer = stream+u'\0'
+            self.buffer = stream+'\0'
         elif isinstance(stream, str):
             self.name = "<string>"
             self.raw_buffer = stream
@@ -138,11 +138,11 @@ class Reader(object):
             ch = self.buffer[self.pointer]
             self.pointer += 1
             self.index += 1
-            if ch in u'\n\x85\u2028\u2029'  \
-                    or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+            if ch in '\n\x85\u2028\u2029'  \
+                    or (ch == '\r' and self.buffer[self.pointer] != '\n'):
                 self.line += 1
                 self.column = 0
-            elif ch != u'\uFEFF':
+            elif ch != '\uFEFF':
                 self.column += 1
             length -= 1
 
@@ -157,7 +157,7 @@ class Reader(object):
     def determine_encoding(self):
         while not self.eof and len(self.raw_buffer) < 2:
             self.update_raw()
-        if not isinstance(self.raw_buffer, unicode):
+        if not isinstance(self.raw_buffer, str):
             if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
                 self.raw_decode = utf_16_le_decode
                 self.encoding = 'utf-16-le'
@@ -169,7 +169,7 @@ class Reader(object):
                 self.encoding = 'utf-8'
         self.update(1)
 
-    NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+    NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
     def check_printable(self, data):
         match = self.NON_PRINTABLE.search(data)
         if match:
@@ -190,7 +190,7 @@ class Reader(object):
                 try:
                     data, converted = self.raw_decode(self.raw_buffer,
                             'strict', self.eof)
-                except UnicodeDecodeError, exc:
+                except UnicodeDecodeError as exc:
                     character = exc.object[exc.start]
                     if self.stream is not None:
                         position = self.stream_pointer-len(self.raw_buffer)+exc.start
@@ -205,7 +205,7 @@ class Reader(object):
             self.buffer += data
             self.raw_buffer = self.raw_buffer[converted:]
             if self.eof:
-                self.buffer += u'\0'
+                self.buffer += '\0'
                 self.raw_buffer = None
                 break
 
diff --git a/src/madpack/yaml/representer.py b/src/madpack/yaml/representer.py
index 1f4fe594..31d60e2d 100644
--- a/src/madpack/yaml/representer.py
+++ b/src/madpack/yaml/representer.py
@@ -2,8 +2,8 @@
 __all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
     'RepresenterError']
 
-from error import *
-from nodes import *
+from .error import *
+from .nodes import *
 
 import datetime
 
@@ -12,7 +12,7 @@ try:
 except NameError:
     from sets import Set as set
 
-import sys, copy_reg, types
+import sys, copyreg, types
 
 class RepresenterError(YAMLError):
     pass
@@ -56,7 +56,7 @@ class BaseRepresenter(object):
             #self.represented_objects[alias_key] = None
             self.object_keeper.append(data)
         data_types = type(data).__mro__
-        if type(data) is types.InstanceType:
+        if type(data) is getattr(types, "InstanceType", object):
             data_types = self.get_classobj_bases(data.__class__)+list(data_types)
         if data_types[0] in self.yaml_representers:
             node = self.yaml_representers[data_types[0]](self, data)
@@ -71,7 +71,7 @@ class BaseRepresenter(object):
                 elif None in self.yaml_representers:
                     node = self.yaml_representers[None](self, data)
                 else:
-                    node = ScalarNode(None, unicode(data))
+                    node = ScalarNode(None, str(data))
         #if alias_key is not None:
         #    self.represented_objects[alias_key] = node
         return node
@@ -121,7 +121,7 @@ class BaseRepresenter(object):
             self.represented_objects[self.alias_key] = node
         best_style = True
         if hasattr(mapping, 'items'):
-            mapping = mapping.items()
+            mapping = list(mapping.items())
             mapping.sort()
         for item_key, item_value in mapping:
             node_key = self.represent_data(item_key)
@@ -146,44 +146,44 @@ class SafeRepresenter(BaseRepresenter):
     def ignore_aliases(self, data):
         if data in [None, ()]:
             return True
-        if isinstance(data, (str, unicode, bool, int, float)):
+        if isinstance(data, (str, bool, int, float)):
             return True
 
     def represent_none(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:null',
-                u'null')
+        return self.represent_scalar('tag:yaml.org,2002:null',
+                'null')
 
     def represent_str(self, data):
         tag = None
         style = None
         try:
-            data = unicode(data, 'ascii')
-            tag = u'tag:yaml.org,2002:str'
+            data = str(data, 'ascii')
+            tag = 'tag:yaml.org,2002:str'
         except UnicodeDecodeError:
             try:
-                data = unicode(data, 'utf-8')
-                tag = u'tag:yaml.org,2002:str'
+                data = str(data, 'utf-8')
+                tag = 'tag:yaml.org,2002:str'
             except UnicodeDecodeError:
                 data = data.encode('base64')
-                tag = u'tag:yaml.org,2002:binary'
+                tag = 'tag:yaml.org,2002:binary'
                 style = '|'
         return self.represent_scalar(tag, data, style=style)
 
     def represent_unicode(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+        return self.represent_scalar('tag:yaml.org,2002:str', data)
 
     def represent_bool(self, data):
         if data:
-            value = u'true'
+            value = 'true'
         else:
-            value = u'false'
-        return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+            value = 'false'
+        return self.represent_scalar('tag:yaml.org,2002:bool', value)
 
     def represent_int(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+        return self.represent_scalar('tag:yaml.org,2002:int', str(data))
 
     def represent_long(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+        return self.represent_scalar('tag:yaml.org,2002:int', str(data))
 
     inf_value = 1e300
     while repr(inf_value) != repr(inf_value*inf_value):
@@ -191,13 +191,13 @@ class SafeRepresenter(BaseRepresenter):
 
     def represent_float(self, data):
         if data != data or (data == 0.0 and data == 1.0):
-            value = u'.nan'
+            value = '.nan'
         elif data == self.inf_value:
-            value = u'.inf'
+            value = '.inf'
         elif data == -self.inf_value:
-            value = u'-.inf'
+            value = '-.inf'
         else:
-            value = unicode(repr(data)).lower()
+            value = str(repr(data)).lower()
             # Note that in some cases `repr(data)` represents a float number
             # without the decimal parts.  For instance:
             #   >>> repr(1e17)
@@ -205,9 +205,9 @@ class SafeRepresenter(BaseRepresenter):
             # Unfortunately, this is not a valid float representation according
             # to the definition of the `!!float` tag.  We fix this by adding
             # '.0' before the 'e' symbol.
-            if u'.' not in value and u'e' in value:
-                value = value.replace(u'e', u'.0e', 1)
-        return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+            if '.' not in value and 'e' in value:
+                value = value.replace('e', '.0e', 1)
+        return self.represent_scalar('tag:yaml.org,2002:float', value)
 
     def represent_list(self, data):
         #pairs = (len(data) > 0 and isinstance(data, list))
@@ -217,7 +217,7 @@ class SafeRepresenter(BaseRepresenter):
         #            pairs = False
         #            break
         #if not pairs:
-            return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+            return self.represent_sequence('tag:yaml.org,2002:seq', data)
         #value = []
         #for item_key, item_value in data:
         #    value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
@@ -225,21 +225,21 @@ class SafeRepresenter(BaseRepresenter):
         #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
 
     def represent_dict(self, data):
-        return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+        return self.represent_mapping('tag:yaml.org,2002:map', data)
 
     def represent_set(self, data):
         value = {}
         for key in data:
             value[key] = None
-        return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+        return self.represent_mapping('tag:yaml.org,2002:set', value)
 
     def represent_date(self, data):
-        value = unicode(data.isoformat())
-        return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+        value = str(data.isoformat())
+        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
 
     def represent_datetime(self, data):
-        value = unicode(data.isoformat(' '))
-        return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+        value = str(data.isoformat(' '))
+        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
 
     def represent_yaml_object(self, tag, data, cls, flow_style=None):
         if hasattr(data, '__getstate__'):
@@ -257,7 +257,7 @@ SafeRepresenter.add_representer(type(None),
 SafeRepresenter.add_representer(str,
         SafeRepresenter.represent_str)
 
-SafeRepresenter.add_representer(unicode,
+SafeRepresenter.add_representer(str,
         SafeRepresenter.represent_unicode)
 
 SafeRepresenter.add_representer(bool,
@@ -266,7 +266,7 @@ SafeRepresenter.add_representer(bool,
 SafeRepresenter.add_representer(int,
         SafeRepresenter.represent_int)
 
-SafeRepresenter.add_representer(long,
+SafeRepresenter.add_representer(int,
         SafeRepresenter.represent_long)
 
 SafeRepresenter.add_representer(float,
@@ -298,15 +298,15 @@ class Representer(SafeRepresenter):
         tag = None
         style = None
         try:
-            data = unicode(data, 'ascii')
-            tag = u'tag:yaml.org,2002:str'
+            data = str(data, 'ascii')
+            tag = 'tag:yaml.org,2002:str'
         except UnicodeDecodeError:
             try:
-                data = unicode(data, 'utf-8')
-                tag = u'tag:yaml.org,2002:python/str'
+                data = str(data, 'utf-8')
+                tag = 'tag:yaml.org,2002:python/str'
             except UnicodeDecodeError:
                 data = data.encode('base64')
-                tag = u'tag:yaml.org,2002:binary'
+                tag = 'tag:yaml.org,2002:binary'
                 style = '|'
         return self.represent_scalar(tag, data, style=style)
 
@@ -314,38 +314,38 @@ class Representer(SafeRepresenter):
         tag = None
         try:
             data.encode('ascii')
-            tag = u'tag:yaml.org,2002:python/unicode'
+            tag = 'tag:yaml.org,2002:python/unicode'
         except UnicodeEncodeError:
-            tag = u'tag:yaml.org,2002:str'
+            tag = 'tag:yaml.org,2002:str'
         return self.represent_scalar(tag, data)
 
     def represent_long(self, data):
-        tag = u'tag:yaml.org,2002:int'
+        tag = 'tag:yaml.org,2002:int'
         if int(data) is not data:
-            tag = u'tag:yaml.org,2002:python/long'
-        return self.represent_scalar(tag, unicode(data))
+            tag = 'tag:yaml.org,2002:python/long'
+        return self.represent_scalar(tag, str(data))
 
     def represent_complex(self, data):
         if data.imag == 0.0:
-            data = u'%r' % data.real
+            data = '%r' % data.real
         elif data.real == 0.0:
-            data = u'%rj' % data.imag
+            data = '%rj' % data.imag
         elif data.imag > 0:
-            data = u'%r+%rj' % (data.real, data.imag)
+            data = '%r+%rj' % (data.real, data.imag)
         else:
-            data = u'%r%rj' % (data.real, data.imag)
-        return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+            data = '%r%rj' % (data.real, data.imag)
+        return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
 
     def represent_tuple(self, data):
-        return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+        return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
 
     def represent_name(self, data):
-        name = u'%s.%s' % (data.__module__, data.__name__)
-        return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+        name = '%s.%s' % (data.__module__, data.__name__)
+        return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
 
     def represent_module(self, data):
         return self.represent_scalar(
-                u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+                'tag:yaml.org,2002:python/module:'+data.__name__, '')
 
     def represent_instance(self, data):
         # For instances of classic classes, we use __getinitargs__ and
@@ -366,7 +366,7 @@ class Representer(SafeRepresenter):
         # !!python/object/new node.
 
         cls = data.__class__
-        class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+        class_name = '%s.%s' % (cls.__module__, cls.__name__)
         args = None
         state = None
         if hasattr(data, '__getinitargs__'):
@@ -377,16 +377,16 @@ class Representer(SafeRepresenter):
             state = data.__dict__
         if args is None and isinstance(state, dict):
             return self.represent_mapping(
-                    u'tag:yaml.org,2002:python/object:'+class_name, state)
+                    'tag:yaml.org,2002:python/object:'+class_name, state)
         if isinstance(state, dict) and not state:
             return self.represent_sequence(
-                    u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+                    'tag:yaml.org,2002:python/object/new:'+class_name, args)
         value = {}
         if args:
             value['args'] = args
         value['state'] = state
         return self.represent_mapping(
-                u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+                'tag:yaml.org,2002:python/object/new:'+class_name, value)
 
     def represent_object(self, data):
         # We use __reduce__ API to save the data. data.__reduce__ returns
@@ -406,8 +406,8 @@ class Representer(SafeRepresenter):
         # !!python/object/apply node.
 
         cls = type(data)
-        if cls in copy_reg.dispatch_table:
-            reduce = copy_reg.dispatch_table[cls](data)
+        if cls in copyreg.dispatch_table:
+            reduce = copyreg.dispatch_table[cls](data)
         elif hasattr(data, '__reduce_ex__'):
             reduce = data.__reduce_ex__(2)
         elif hasattr(data, '__reduce__'):
@@ -426,16 +426,16 @@ class Representer(SafeRepresenter):
         if function.__name__ == '__newobj__':
             function = args[0]
             args = args[1:]
-            tag = u'tag:yaml.org,2002:python/object/new:'
+            tag = 'tag:yaml.org,2002:python/object/new:'
             newobj = True
         else:
-            tag = u'tag:yaml.org,2002:python/object/apply:'
+            tag = 'tag:yaml.org,2002:python/object/apply:'
             newobj = False
-        function_name = u'%s.%s' % (function.__module__, function.__name__)
+        function_name = '%s.%s' % (function.__module__, function.__name__)
         if not args and not listitems and not dictitems \
                 and isinstance(state, dict) and newobj:
             return self.represent_mapping(
-                    u'tag:yaml.org,2002:python/object:'+function_name, state)
+                    'tag:yaml.org,2002:python/object:'+function_name, state)
         if not listitems and not dictitems  \
                 and isinstance(state, dict) and not state:
             return self.represent_sequence(tag+function_name, args)
@@ -453,10 +453,10 @@ class Representer(SafeRepresenter):
 Representer.add_representer(str,
         Representer.represent_str)
 
-Representer.add_representer(unicode,
+Representer.add_representer(str,
         Representer.represent_unicode)
 
-Representer.add_representer(long,
+Representer.add_representer(int,
         Representer.represent_long)
 
 Representer.add_representer(complex,
@@ -468,7 +468,7 @@ Representer.add_representer(tuple,
 Representer.add_representer(type,
         Representer.represent_name)
 
-Representer.add_representer(types.ClassType,
+Representer.add_representer(type,
         Representer.represent_name)
 
 Representer.add_representer(types.FunctionType,
@@ -480,7 +480,7 @@ Representer.add_representer(types.BuiltinFunctionType,
 Representer.add_representer(types.ModuleType,
         Representer.represent_module)
 
-Representer.add_multi_representer(types.InstanceType,
+Representer.add_multi_representer(getattr(types, "InstanceType", object),
         Representer.represent_instance)
 
 Representer.add_multi_representer(object,
diff --git a/src/madpack/yaml/resolver.py b/src/madpack/yaml/resolver.py
index 5cbf6b3d..818f35d7 100644
--- a/src/madpack/yaml/resolver.py
+++ b/src/madpack/yaml/resolver.py
@@ -1,8 +1,8 @@
 
 __all__ = ['BaseResolver', 'Resolver']
 
-from error import *
-from nodes import *
+from .error import *
+from .nodes import *
 
 import re
 
@@ -11,9 +11,9 @@ class ResolverError(YAMLError):
 
 class BaseResolver(object):
 
-    DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
-    DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
-    DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+    DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+    DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+    DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
 
     yaml_implicit_resolvers = {}
     yaml_path_resolvers = {}
@@ -66,10 +66,10 @@ class BaseResolver(object):
             elif node_check is dict:
                 node_check = MappingNode
             elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
-                    and not isinstance(node_check, basestring)  \
+                    and not isinstance(node_check, str)  \
                     and node_check is not None:
                 raise ResolverError("Invalid node checker: %s" % node_check)
-            if not isinstance(index_check, (basestring, int))   \
+            if not isinstance(index_check, (str, int))   \
                     and index_check is not None:
                 raise ResolverError("Invalid index checker: %s" % index_check)
             new_path.append((node_check, index_check))
@@ -117,7 +117,7 @@ class BaseResolver(object):
     def check_resolver_prefix(self, depth, path, kind,
             current_node, current_index):
         node_check, index_check = path[depth-1]
-        if isinstance(node_check, basestring):
+        if isinstance(node_check, str):
             if current_node.tag != node_check:
                 return
         elif node_check is not None:
@@ -128,7 +128,7 @@ class BaseResolver(object):
         if (index_check is False or index_check is None)    \
                 and current_index is None:
             return
-        if isinstance(index_check, basestring):
+        if isinstance(index_check, str):
             if not (isinstance(current_index, ScalarNode)
                     and index_check == current_index.value):
                 return
@@ -139,8 +139,8 @@ class BaseResolver(object):
 
     def resolve(self, kind, value, implicit):
         if kind is ScalarNode and implicit[0]:
-            if value == u'':
-                resolvers = self.yaml_implicit_resolvers.get(u'', [])
+            if value == '':
+                resolvers = self.yaml_implicit_resolvers.get('', [])
             else:
                 resolvers = self.yaml_implicit_resolvers.get(value[0], [])
             resolvers += self.yaml_implicit_resolvers.get(None, [])
@@ -165,59 +165,59 @@ class Resolver(BaseResolver):
     pass
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:bool',
-        re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+        'tag:yaml.org,2002:bool',
+        re.compile(r'''^(?:yes|Yes|YES|no|No|NO
                     |true|True|TRUE|false|False|FALSE
                     |on|On|ON|off|Off|OFF)$''', re.X),
-        list(u'yYnNtTfFoO'))
+        list('yYnNtTfFoO'))
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:float',
-        re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)?
+        'tag:yaml.org,2002:float',
+        re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)?
                     |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
                     |[-+]?\.(?:inf|Inf|INF)
                     |\.(?:nan|NaN|NAN))$''', re.X),
-        list(u'-+0123456789.'))
+        list('-+0123456789.'))
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:int',
-        re.compile(ur'''^(?:[-+]?0b[0-1_]+
+        'tag:yaml.org,2002:int',
+        re.compile(r'''^(?:[-+]?0b[0-1_]+
                     |[-+]?0[0-7_]+
                     |[-+]?(?:0|[1-9][0-9_]*)
                     |[-+]?0x[0-9a-fA-F_]+
                     |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
-        list(u'-+0123456789'))
+        list('-+0123456789'))
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:merge',
-        re.compile(ur'^(?:<<)$'),
+        'tag:yaml.org,2002:merge',
+        re.compile(r'^(?:<<)$'),
         ['<'])
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:null',
-        re.compile(ur'''^(?: ~
+        'tag:yaml.org,2002:null',
+        re.compile(r'''^(?: ~
                     |null|Null|NULL
                     | )$''', re.X),
-        [u'~', u'n', u'N', u''])
+        ['~', 'n', 'N', ''])
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:timestamp',
-        re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+        'tag:yaml.org,2002:timestamp',
+        re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
                     |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
                      (?:[Tt]|[ \t]+)[0-9][0-9]?
                      :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
                      (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
-        list(u'0123456789'))
+        list('0123456789'))
 
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:value',
-        re.compile(ur'^(?:=)$'),
+        'tag:yaml.org,2002:value',
+        re.compile(r'^(?:=)$'),
         ['='])
 
 # The following resolver is only for documentation purposes. It cannot work
 # because plain scalars cannot start with '!', '&', or '*'.
 Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:yaml',
-        re.compile(ur'^(?:!|&|\*)$'),
-        list(u'!&*'))
+        'tag:yaml.org,2002:yaml',
+        re.compile(r'^(?:!|&|\*)$'),
+        list('!&*'))
 
diff --git a/src/madpack/yaml/scanner.py b/src/madpack/yaml/scanner.py
index a3ecdd01..27f55154 100644
--- a/src/madpack/yaml/scanner.py
+++ b/src/madpack/yaml/scanner.py
@@ -26,8 +26,8 @@
 
 __all__ = ['Scanner', 'ScannerError']
 
-from error import MarkedYAMLError
-from tokens import *
+from .error import MarkedYAMLError
+from .tokens import *
 
 class ScannerError(MarkedYAMLError):
     pass
@@ -166,19 +166,19 @@ class Scanner(object):
         ch = self.peek()
 
         # Is it the end of stream?
-        if ch == u'\0':
+        if ch == '\0':
             return self.fetch_stream_end()
 
         # Is it a directive?
-        if ch == u'%' and self.check_directive():
+        if ch == '%' and self.check_directive():
             return self.fetch_directive()
 
         # Is it the document start?
-        if ch == u'-' and self.check_document_start():
+        if ch == '-' and self.check_document_start():
             return self.fetch_document_start()
 
         # Is it the document end?
-        if ch == u'.' and self.check_document_end():
+        if ch == '.' and self.check_document_end():
             return self.fetch_document_end()
 
         # TODO: support for BOM within a stream.
@@ -188,63 +188,63 @@ class Scanner(object):
         # Note: the order of the following checks is NOT significant.
 
         # Is it the flow sequence start indicator?
-        if ch == u'[':
+        if ch == '[':
             return self.fetch_flow_sequence_start()
 
         # Is it the flow mapping start indicator?
-        if ch == u'{':
+        if ch == '{':
             return self.fetch_flow_mapping_start()
 
         # Is it the flow sequence end indicator?
-        if ch == u']':
+        if ch == ']':
             return self.fetch_flow_sequence_end()
 
         # Is it the flow mapping end indicator?
-        if ch == u'}':
+        if ch == '}':
             return self.fetch_flow_mapping_end()
 
         # Is it the flow entry indicator?
-        if ch == u',':
+        if ch == ',':
             return self.fetch_flow_entry()
 
         # Is it the block entry indicator?
-        if ch == u'-' and self.check_block_entry():
+        if ch == '-' and self.check_block_entry():
             return self.fetch_block_entry()
 
         # Is it the key indicator?
-        if ch == u'?' and self.check_key():
+        if ch == '?' and self.check_key():
             return self.fetch_key()
 
         # Is it the value indicator?
-        if ch == u':' and self.check_value():
+        if ch == ':' and self.check_value():
             return self.fetch_value()
 
         # Is it an alias?
-        if ch == u'*':
+        if ch == '*':
             return self.fetch_alias()
 
         # Is it an anchor?
-        if ch == u'&':
+        if ch == '&':
             return self.fetch_anchor()
 
         # Is it a tag?
-        if ch == u'!':
+        if ch == '!':
             return self.fetch_tag()
 
         # Is it a literal scalar?
-        if ch == u'|' and not self.flow_level:
+        if ch == '|' and not self.flow_level:
             return self.fetch_literal()
 
         # Is it a folded scalar?
-        if ch == u'>' and not self.flow_level:
+        if ch == '>' and not self.flow_level:
             return self.fetch_folded()
 
         # Is it a single quoted scalar?
-        if ch == u'\'':
+        if ch == '\'':
             return self.fetch_single()
 
         # Is it a double quoted scalar?
-        if ch == u'\"':
+        if ch == '\"':
             return self.fetch_double()
 
         # It must be a plain scalar then.
@@ -280,7 +280,7 @@ class Scanner(object):
         # - should be no longer than 1024 characters.
         # Disabling this procedure will allow simple keys of any length and
         # height (may cause problems if indentation is broken though).
-        for level in self.possible_simple_keys.keys():
+        for level in list(self.possible_simple_keys.keys()):
             key = self.possible_simple_keys[level]
             if key.line != self.line  \
                     or self.index-key.index > 1024:
@@ -691,22 +691,22 @@ class Scanner(object):
 
         # DOCUMENT-START:   ^ '---' (' '|'\n')
         if self.column == 0:
-            if self.prefix(3) == u'---'  \
-                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+            if self.prefix(3) == '---'  \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
                 return True
 
     def check_document_end(self):
 
         # DOCUMENT-END:     ^ '...' (' '|'\n')
         if self.column == 0:
-            if self.prefix(3) == u'...'  \
-                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+            if self.prefix(3) == '...'  \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
                 return True
 
     def check_block_entry(self):
 
         # BLOCK-ENTRY:      '-' (' '|'\n')
-        return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
 
     def check_key(self):
 
@@ -716,7 +716,7 @@ class Scanner(object):
 
         # KEY(block context):   '?' (' '|'\n')
         else:
-            return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
 
     def check_value(self):
 
@@ -726,7 +726,7 @@ class Scanner(object):
 
         # VALUE(block context): ':' (' '|'\n')
         else:
-            return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
 
     def check_plain(self):
 
@@ -743,9 +743,9 @@ class Scanner(object):
         # '-' character) because we want the flow context to be space
         # independent.
         ch = self.peek()
-        return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
-                or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
-                        and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
+                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+                        and (ch == '-' or (not self.flow_level and ch in '?:')))
 
     # Scanners.
 
@@ -769,14 +769,14 @@ class Scanner(object):
         # `unwind_indent` before issuing BLOCK-END.
         # Scanners for block, flow, and plain scalars need to be modified.
 
-        if self.index == 0 and self.peek() == u'\uFEFF':
+        if self.index == 0 and self.peek() == '\uFEFF':
             self.forward()
         found = False
         while not found:
-            while self.peek() == u' ':
+            while self.peek() == ' ':
                 self.forward()
-            if self.peek() == u'#':
-                while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+            if self.peek() == '#':
+                while self.peek() not in '\0\r\n\x85\u2028\u2029':
                     self.forward()
             if self.scan_line_break():
                 if not self.flow_level:
@@ -790,15 +790,15 @@ class Scanner(object):
         self.forward()
         name = self.scan_directive_name(start_mark)
         value = None
-        if name == u'YAML':
+        if name == 'YAML':
             value = self.scan_yaml_directive_value(start_mark)
             end_mark = self.get_mark()
-        elif name == u'TAG':
+        elif name == 'TAG':
             value = self.scan_tag_directive_value(start_mark)
             end_mark = self.get_mark()
         else:
             end_mark = self.get_mark()
-            while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
                 self.forward()
         self.scan_directive_ignored_line(start_mark)
         return DirectiveToken(name, value, start_mark, end_mark)
@@ -807,8 +807,8 @@ class Scanner(object):
         # See the specification for details.
         length = 0
         ch = self.peek(length)
-        while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
-                or ch in u'-_':
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-_':
             length += 1
             ch = self.peek(length)
         if not length:
@@ -818,7 +818,7 @@ class Scanner(object):
         value = self.prefix(length)
         self.forward(length)
         ch = self.peek()
-        if ch not in u'\0 \r\n\x85\u2028\u2029':
+        if ch not in '\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a directive", start_mark,
                     "expected alphabetic or numeric character, but found %r"
                     % ch.encode('utf-8'), self.get_mark())
@@ -826,7 +826,7 @@ class Scanner(object):
 
     def scan_yaml_directive_value(self, start_mark):
         # See the specification for details.
-        while self.peek() == u' ':
+        while self.peek() == ' ':
             self.forward()
         major = self.scan_yaml_directive_number(start_mark)
         if self.peek() != '.':
@@ -836,7 +836,7 @@ class Scanner(object):
                     self.get_mark())
         self.forward()
         minor = self.scan_yaml_directive_number(start_mark)
-        if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a directive", start_mark,
                     "expected a digit or ' ', but found %r"
                     % self.peek().encode('utf-8'),
@@ -846,12 +846,12 @@ class Scanner(object):
     def scan_yaml_directive_number(self, start_mark):
         # See the specification for details.
         ch = self.peek()
-        if not (u'0' <= ch <= '9'):
+        if not ('0' <= ch <= '9'):
             raise ScannerError("while scanning a directive", start_mark,
                     "expected a digit, but found %r" % ch.encode('utf-8'),
                     self.get_mark())
         length = 0
-        while u'0' <= self.peek(length) <= u'9':
+        while '0' <= self.peek(length) <= '9':
             length += 1
         value = int(self.prefix(length))
         self.forward(length)
@@ -859,10 +859,10 @@ class Scanner(object):
 
     def scan_tag_directive_value(self, start_mark):
         # See the specification for details.
-        while self.peek() == u' ':
+        while self.peek() == ' ':
             self.forward()
         handle = self.scan_tag_directive_handle(start_mark)
-        while self.peek() == u' ':
+        while self.peek() == ' ':
             self.forward()
         prefix = self.scan_tag_directive_prefix(start_mark)
         return (handle, prefix)
@@ -871,7 +871,7 @@ class Scanner(object):
         # See the specification for details.
         value = self.scan_tag_handle('directive', start_mark)
         ch = self.peek()
-        if ch != u' ':
+        if ch != ' ':
             raise ScannerError("while scanning a directive", start_mark,
                     "expected ' ', but found %r" % ch.encode('utf-8'),
                     self.get_mark())
@@ -881,7 +881,7 @@ class Scanner(object):
         # See the specification for details.
         value = self.scan_tag_uri('directive', start_mark)
         ch = self.peek()
-        if ch not in u'\0 \r\n\x85\u2028\u2029':
+        if ch not in '\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a directive", start_mark,
                     "expected ' ', but found %r" % ch.encode('utf-8'),
                     self.get_mark())
@@ -889,13 +889,13 @@ class Scanner(object):
 
     def scan_directive_ignored_line(self, start_mark):
         # See the specification for details.
-        while self.peek() == u' ':
+        while self.peek() == ' ':
             self.forward()
-        if self.peek() == u'#':
-            while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+        if self.peek() == '#':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
                 self.forward()
         ch = self.peek()
-        if ch not in u'\0\r\n\x85\u2028\u2029':
+        if ch not in '\0\r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a directive", start_mark,
                     "expected a comment or a line break, but found %r"
                         % ch.encode('utf-8'), self.get_mark())
@@ -919,8 +919,8 @@ class Scanner(object):
         self.forward()
         length = 0
         ch = self.peek(length)
-        while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
-                or ch in u'-_':
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-_':
             length += 1
             ch = self.peek(length)
         if not length:
@@ -930,7 +930,7 @@ class Scanner(object):
         value = self.prefix(length)
         self.forward(length)
         ch = self.peek()
-        if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
             raise ScannerError("while scanning an %s" % name, start_mark,
                     "expected alphabetic or numeric character, but found %r"
                     % ch.encode('utf-8'), self.get_mark())
@@ -941,37 +941,37 @@ class Scanner(object):
         # See the specification for details.
         start_mark = self.get_mark()
         ch = self.peek(1)
-        if ch == u'<':
+        if ch == '<':
             handle = None
             self.forward(2)
             suffix = self.scan_tag_uri('tag', start_mark)
-            if self.peek() != u'>':
+            if self.peek() != '>':
                 raise ScannerError("while parsing a tag", start_mark,
                         "expected '>', but found %r" % self.peek().encode('utf-8'),
                         self.get_mark())
             self.forward()
-        elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+        elif ch in '\0 \t\r\n\x85\u2028\u2029':
             handle = None
-            suffix = u'!'
+            suffix = '!'
             self.forward()
         else:
             length = 1
             use_handle = False
-            while ch not in u'\0 \r\n\x85\u2028\u2029':
-                if ch == u'!':
+            while ch not in '\0 \r\n\x85\u2028\u2029':
+                if ch == '!':
                     use_handle = True
                     break
                 length += 1
                 ch = self.peek(length)
-            handle = u'!'
+            handle = '!'
             if use_handle:
                 handle = self.scan_tag_handle('tag', start_mark)
             else:
-                handle = u'!'
+                handle = '!'
                 self.forward()
             suffix = self.scan_tag_uri('tag', start_mark)
         ch = self.peek()
-        if ch not in u'\0 \r\n\x85\u2028\u2029':
+        if ch not in '\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a tag", start_mark,
                     "expected ' ', but found %r" % ch.encode('utf-8'),
                     self.get_mark())
@@ -1005,29 +1005,29 @@ class Scanner(object):
         else:
             indent = min_indent+increment-1
             breaks, end_mark = self.scan_block_scalar_breaks(indent)
-        line_break = u''
+        line_break = ''
 
         # Scan the inner part of the block scalar.
-        while self.column == indent and self.peek() != u'\0':
+        while self.column == indent and self.peek() != '\0':
             chunks.extend(breaks)
-            leading_non_space = self.peek() not in u' \t'
+            leading_non_space = self.peek() not in ' \t'
             length = 0
-            while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
                 length += 1
             chunks.append(self.prefix(length))
             self.forward(length)
             line_break = self.scan_line_break()
             breaks, end_mark = self.scan_block_scalar_breaks(indent)
-            if self.column == indent and self.peek() != u'\0':
+            if self.column == indent and self.peek() != '\0':
 
                 # Unfortunately, folding rules are ambiguous.
                 #
                 # This is the folding according to the specification:
                 
-                if folded and line_break == u'\n'   \
-                        and leading_non_space and self.peek() not in u' \t':
+                if folded and line_break == '\n'   \
+                        and leading_non_space and self.peek() not in ' \t':
                     if not breaks:
-                        chunks.append(u' ')
+                        chunks.append(' ')
                 else:
                     chunks.append(line_break)
                 
@@ -1052,7 +1052,7 @@ class Scanner(object):
             chunks.extend(breaks)
 
         # We are done.
-        return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
                 style)
 
     def scan_block_scalar_indicators(self, start_mark):
@@ -1060,21 +1060,21 @@ class Scanner(object):
         chomping = None
         increment = None
         ch = self.peek()
-        if ch in u'+-':
+        if ch in '+-':
             if ch == '+':
                 chomping = True
             else:
                 chomping = False
             self.forward()
             ch = self.peek()
-            if ch in u'0123456789':
+            if ch in '0123456789':
                 increment = int(ch)
                 if increment == 0:
                     raise ScannerError("while scanning a block scalar", start_mark,
                             "expected indentation indicator in the range 1-9, but found 0",
                             self.get_mark())
                 self.forward()
-        elif ch in u'0123456789':
+        elif ch in '0123456789':
             increment = int(ch)
             if increment == 0:
                 raise ScannerError("while scanning a block scalar", start_mark,
@@ -1082,14 +1082,14 @@ class Scanner(object):
                         self.get_mark())
             self.forward()
             ch = self.peek()
-            if ch in u'+-':
+            if ch in '+-':
                 if ch == '+':
                     chomping = True
                 else:
                     chomping = False
                 self.forward()
         ch = self.peek()
-        if ch not in u'\0 \r\n\x85\u2028\u2029':
+        if ch not in '\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a block scalar", start_mark,
                     "expected chomping or indentation indicators, but found %r"
                         % ch.encode('utf-8'), self.get_mark())
@@ -1097,13 +1097,13 @@ class Scanner(object):
 
     def scan_block_scalar_ignored_line(self, start_mark):
         # See the specification for details.
-        while self.peek() == u' ':
+        while self.peek() == ' ':
             self.forward()
-        if self.peek() == u'#':
-            while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+        if self.peek() == '#':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
                 self.forward()
         ch = self.peek()
-        if ch not in u'\0\r\n\x85\u2028\u2029':
+        if ch not in '\0\r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a block scalar", start_mark,
                     "expected a comment or a line break, but found %r"
                         % ch.encode('utf-8'), self.get_mark())
@@ -1114,8 +1114,8 @@ class Scanner(object):
         chunks = []
         max_indent = 0
         end_mark = self.get_mark()
-        while self.peek() in u' \r\n\x85\u2028\u2029':
-            if self.peek() != u' ':
+        while self.peek() in ' \r\n\x85\u2028\u2029':
+            if self.peek() != ' ':
                 chunks.append(self.scan_line_break())
                 end_mark = self.get_mark()
             else:
@@ -1128,12 +1128,12 @@ class Scanner(object):
         # See the specification for details.
         chunks = []
         end_mark = self.get_mark()
-        while self.column < indent and self.peek() == u' ':
+        while self.column < indent and self.peek() == ' ':
             self.forward()
-        while self.peek() in u'\r\n\x85\u2028\u2029':
+        while self.peek() in '\r\n\x85\u2028\u2029':
             chunks.append(self.scan_line_break())
             end_mark = self.get_mark()
-            while self.column < indent and self.peek() == u' ':
+            while self.column < indent and self.peek() == ' ':
                 self.forward()
         return chunks, end_mark
 
@@ -1158,33 +1158,33 @@ class Scanner(object):
             chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
         self.forward()
         end_mark = self.get_mark()
-        return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
                 style)
 
     ESCAPE_REPLACEMENTS = {
-        u'0':   u'\0',
-        u'a':   u'\x07',
-        u'b':   u'\x08',
-        u't':   u'\x09',
-        u'\t':  u'\x09',
-        u'n':   u'\x0A',
-        u'v':   u'\x0B',
-        u'f':   u'\x0C',
-        u'r':   u'\x0D',
-        u'e':   u'\x1B',
-        u' ':   u'\x20',
-        u'\"':  u'\"',
-        u'\\':  u'\\',
-        u'N':   u'\x85',
-        u'_':   u'\xA0',
-        u'L':   u'\u2028',
-        u'P':   u'\u2029',
+        '0':   '\0',
+        'a':   '\x07',
+        'b':   '\x08',
+        't':   '\x09',
+        '\t':  '\x09',
+        'n':   '\x0A',
+        'v':   '\x0B',
+        'f':   '\x0C',
+        'r':   '\x0D',
+        'e':   '\x1B',
+        ' ':   '\x20',
+        '\"':  '\"',
+        '\\':  '\\',
+        'N':   '\x85',
+        '_':   '\xA0',
+        'L':   '\u2028',
+        'P':   '\u2029',
     }
 
     ESCAPE_CODES = {
-        u'x':   2,
-        u'u':   4,
-        u'U':   8,
+        'x':   2,
+        'u':   4,
+        'U':   8,
     }
 
     def scan_flow_scalar_non_spaces(self, double, start_mark):
@@ -1192,19 +1192,19 @@ class Scanner(object):
         chunks = []
         while True:
             length = 0
-            while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
                 length += 1
             if length:
                 chunks.append(self.prefix(length))
                 self.forward(length)
             ch = self.peek()
-            if not double and ch == u'\'' and self.peek(1) == u'\'':
-                chunks.append(u'\'')
+            if not double and ch == '\'' and self.peek(1) == '\'':
+                chunks.append('\'')
                 self.forward(2)
-            elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+            elif (double and ch == '\'') or (not double and ch in '\"\\'):
                 chunks.append(ch)
                 self.forward()
-            elif double and ch == u'\\':
+            elif double and ch == '\\':
                 self.forward()
                 ch = self.peek()
                 if ch in self.ESCAPE_REPLACEMENTS:
@@ -1214,14 +1214,14 @@ class Scanner(object):
                     length = self.ESCAPE_CODES[ch]
                     self.forward()
                     for k in range(length):
-                        if self.peek(k) not in u'0123456789ABCDEFabcdef':
+                        if self.peek(k) not in '0123456789ABCDEFabcdef':
                             raise ScannerError("while scanning a double-quoted scalar", start_mark,
                                     "expected escape sequence of %d hexdecimal numbers, but found %r" %
                                         (length, self.peek(k).encode('utf-8')), self.get_mark())
                     code = int(self.prefix(length), 16)
-                    chunks.append(unichr(code))
+                    chunks.append(chr(code))
                     self.forward(length)
-                elif ch in u'\r\n\x85\u2028\u2029':
+                elif ch in '\r\n\x85\u2028\u2029':
                     self.scan_line_break()
                     chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
                 else:
@@ -1234,21 +1234,21 @@ class Scanner(object):
         # See the specification for details.
         chunks = []
         length = 0
-        while self.peek(length) in u' \t':
+        while self.peek(length) in ' \t':
             length += 1
         whitespaces = self.prefix(length)
         self.forward(length)
         ch = self.peek()
-        if ch == u'\0':
+        if ch == '\0':
             raise ScannerError("while scanning a quoted scalar", start_mark,
                     "found unexpected end of stream", self.get_mark())
-        elif ch in u'\r\n\x85\u2028\u2029':
+        elif ch in '\r\n\x85\u2028\u2029':
             line_break = self.scan_line_break()
             breaks = self.scan_flow_scalar_breaks(double, start_mark)
-            if line_break != u'\n':
+            if line_break != '\n':
                 chunks.append(line_break)
             elif not breaks:
-                chunks.append(u' ')
+                chunks.append(' ')
             chunks.extend(breaks)
         else:
             chunks.append(whitespaces)
@@ -1261,13 +1261,13 @@ class Scanner(object):
             # Instead of checking indentation, we check for document
             # separators.
             prefix = self.prefix(3)
-            if (prefix == u'---' or prefix == u'...')   \
-                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+            if (prefix == '---' or prefix == '...')   \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
                 raise ScannerError("while scanning a quoted scalar", start_mark,
                         "found unexpected document separator", self.get_mark())
-            while self.peek() in u' \t':
+            while self.peek() in ' \t':
                 self.forward()
-            if self.peek() in u'\r\n\x85\u2028\u2029':
+            if self.peek() in '\r\n\x85\u2028\u2029':
                 chunks.append(self.scan_line_break())
             else:
                 return chunks
@@ -1289,19 +1289,19 @@ class Scanner(object):
         spaces = []
         while True:
             length = 0
-            if self.peek() == u'#':
+            if self.peek() == '#':
                 break
             while True:
                 ch = self.peek(length)
-                if ch in u'\0 \t\r\n\x85\u2028\u2029'   \
-                        or (not self.flow_level and ch == u':' and
-                                self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
-                        or (self.flow_level and ch in u',:?[]{}'):
+                if ch in '\0 \t\r\n\x85\u2028\u2029'   \
+                        or (not self.flow_level and ch == ':' and
+                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
+                        or (self.flow_level and ch in ',:?[]{}'):
                     break
                 length += 1
             # It's not clear what we should do with ':' in the flow context.
-            if (self.flow_level and ch == u':'
-                    and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+            if (self.flow_level and ch == ':'
+                    and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
                 self.forward(length)
                 raise ScannerError("while scanning a plain scalar", start_mark,
                     "found unexpected ':'", self.get_mark(),
@@ -1314,10 +1314,10 @@ class Scanner(object):
             self.forward(length)
             end_mark = self.get_mark()
             spaces = self.scan_plain_spaces(indent, start_mark)
-            if not spaces or self.peek() == u'#' \
+            if not spaces or self.peek() == '#' \
                     or (not self.flow_level and self.column < indent):
                 break
-        return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
 
     def scan_plain_spaces(self, indent, start_mark):
         # See the specification for details.
@@ -1325,32 +1325,32 @@ class Scanner(object):
         # We just forbid them completely. Do not use tabs in YAML!
         chunks = []
         length = 0
-        while self.peek(length) in u' ':
+        while self.peek(length) in ' ':
             length += 1
         whitespaces = self.prefix(length)
         self.forward(length)
         ch = self.peek()
-        if ch in u'\r\n\x85\u2028\u2029':
+        if ch in '\r\n\x85\u2028\u2029':
             line_break = self.scan_line_break()
             self.allow_simple_key = True
             prefix = self.prefix(3)
-            if (prefix == u'---' or prefix == u'...')   \
-                    and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+            if (prefix == '---' or prefix == '...')   \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
                 return
             breaks = []
-            while self.peek() in u' \r\n\x85\u2028\u2029':
+            while self.peek() in ' \r\n\x85\u2028\u2029':
                 if self.peek() == ' ':
                     self.forward()
                 else:
                     breaks.append(self.scan_line_break())
                     prefix = self.prefix(3)
-                    if (prefix == u'---' or prefix == u'...')   \
-                            and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+                    if (prefix == '---' or prefix == '...')   \
+                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
                         return
-            if line_break != u'\n':
+            if line_break != '\n':
                 chunks.append(line_break)
             elif not breaks:
-                chunks.append(u' ')
+                chunks.append(' ')
             chunks.extend(breaks)
         elif whitespaces:
             chunks.append(whitespaces)
@@ -1361,18 +1361,18 @@ class Scanner(object):
         # For some strange reasons, the specification does not allow '_' in
         # tag handles. I have allowed it anyway.
         ch = self.peek()
-        if ch != u'!':
+        if ch != '!':
             raise ScannerError("while scanning a %s" % name, start_mark,
                     "expected '!', but found %r" % ch.encode('utf-8'),
                     self.get_mark())
         length = 1
         ch = self.peek(length)
-        if ch != u' ':
-            while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
-                    or ch in u'-_':
+        if ch != ' ':
+            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                    or ch in '-_':
                 length += 1
                 ch = self.peek(length)
-            if ch != u'!':
+            if ch != '!':
                 self.forward(length)
                 raise ScannerError("while scanning a %s" % name, start_mark,
                         "expected '!', but found %r" % ch.encode('utf-8'),
@@ -1388,9 +1388,9 @@ class Scanner(object):
         chunks = []
         length = 0
         ch = self.peek(length)
-        while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z'  \
-                or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
-            if ch == u'%':
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+            if ch == '%':
                 chunks.append(self.prefix(length))
                 self.forward(length)
                 length = 0
@@ -1406,24 +1406,24 @@ class Scanner(object):
             raise ScannerError("while parsing a %s" % name, start_mark,
                     "expected URI, but found %r" % ch.encode('utf-8'),
                     self.get_mark())
-        return u''.join(chunks)
+        return ''.join(chunks)
 
     def scan_uri_escapes(self, name, start_mark):
         # See the specification for details.
         bytes = []
         mark = self.get_mark()
-        while self.peek() == u'%':
+        while self.peek() == '%':
             self.forward()
             for k in range(2):
-                if self.peek(k) not in u'0123456789ABCDEFabcdef':
+                if self.peek(k) not in '0123456789ABCDEFabcdef':
                     raise ScannerError("while scanning a %s" % name, start_mark,
                             "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
                                 (self.peek(k).encode('utf-8')), self.get_mark())
             bytes.append(chr(int(self.prefix(2), 16)))
             self.forward(2)
         try:
-            value = unicode(''.join(bytes), 'utf-8')
-        except UnicodeDecodeError, exc:
+            value = str(''.join(bytes), 'utf-8')
+        except UnicodeDecodeError as exc:
             raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
         return value
 
@@ -1437,16 +1437,16 @@ class Scanner(object):
         #   '\u2029     :   '\u2029'
         #   default     :   ''
         ch = self.peek()
-        if ch in u'\r\n\x85':
-            if self.prefix(2) == u'\r\n':
+        if ch in '\r\n\x85':
+            if self.prefix(2) == '\r\n':
                 self.forward(2)
             else:
                 self.forward()
-            return u'\n'
-        elif ch in u'\u2028\u2029':
+            return '\n'
+        elif ch in '\u2028\u2029':
             self.forward()
             return ch
-        return u''
+        return ''
 
 #try:
 #    import psyco
diff --git a/src/madpack/yaml/serializer.py b/src/madpack/yaml/serializer.py
index 2101f950..6200124c 100644
--- a/src/madpack/yaml/serializer.py
+++ b/src/madpack/yaml/serializer.py
@@ -1,16 +1,16 @@
 
 __all__ = ['Serializer', 'SerializerError']
 
-from error import YAMLError
-from events import *
-from nodes import *
+from .error import YAMLError
+from .events import *
+from .nodes import *
 
 class SerializerError(YAMLError):
     pass
 
 class Serializer(object):
 
-    ANCHOR_TEMPLATE = u'id%03d'
+    ANCHOR_TEMPLATE = 'id%03d'
 
     def __init__(self, encoding=None,
             explicit_start=None, explicit_end=None, version=None, tags=None):
diff --git a/src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in b/src/ports/postgres/13/CMakeLists.txt
similarity index 65%
copy from src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in
copy to src/ports/postgres/13/CMakeLists.txt
index dd186490..2d44a6f3 100644
--- a/src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in
+++ b/src/ports/postgres/13/CMakeLists.txt
@@ -1,5 +1,4 @@
-# coding=utf-8
-#
+# ------------------------------------------------------------------------------
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -16,28 +15,8 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+# ------------------------------------------------------------------------------
 
-m4_changequote(`<!', `!>')
-def __init__(self):
-    pass
-
-def error(message):
-    raise PLPYException(message)
-
-def execute(query):
-    pass
-
-def warning(query):
-    pass
-
-def info(query):
-    print query
-
-
-class PLPYException(Exception):
-    def __init__(self, message):
-        super(PLPYException, self).__init__()
-        self.message = message
+add_current_postgresql_version()
+add_extension_support()
 
-    def __str__(self):
-        return repr(self.message)
diff --git a/src/ports/postgres/madpack/SQLCommon.m4_in b/src/ports/postgres/13/madpack/SQLCommon.m4
similarity index 70%
copy from src/ports/postgres/madpack/SQLCommon.m4_in
copy to src/ports/postgres/13/madpack/SQLCommon.m4
index 4a7c0420..2e282951 100644
--- a/src/ports/postgres/madpack/SQLCommon.m4_in
+++ b/src/ports/postgres/13/madpack/SQLCommon.m4
@@ -5,7 +5,26 @@
 /*
  * During build time, macro definitions will be inserted here.
  */
-@M4_DEFINES_CODE@
+m4_define(`__POSTGRESQL__')
+m4_define(`__PORT__', `POSTGRESQL')
+m4_define(`__DBMS__', `postgresql_13')
+m4_define(`__DBMS_VERSION__', `13.3.0')
+m4_define(`__DBMS_VERSION_MAJOR__', `13')
+m4_define(`__DBMS_VERSION_MINOR__', `3')
+m4_define(`__DBMS_VERSION_PATCH__', `0')
+m4_define(`__DBMS_ARCHITECTURE__', `x86_64')
+m4_define(`__MADLIB_VERSION__', `1.19.0-dev')
+m4_define(`__MADLIB_VERSION_MAJOR__', `1')
+m4_define(`__MADLIB_VERSION_MINOR__', `19')
+m4_define(`__MADLIB_VERSION_PATCH__', `0')
+m4_define(`__MADLIB_GIT_REVISION__', `rel/v1.18.0-9-g798de717')
+m4_define(`__MADLIB_BUILD_TIME__', `Sat Aug 21 13:35:02 UTC 2021')
+m4_define(`__MADLIB_BUILD_TYPE__', `Release')
+m4_define(`__MADLIB_BUILD_SYSTEM__', `Linux-5.4.0-77-generic')
+m4_define(`__MADLIB_C_COMPILER__', `gcc 9')
+m4_define(`__MADLIB_CXX_COMPILER__', `g++ 9')
+m4_define(`__HAS_ORDERED_AGGREGATES__')
+m4_define(`__HAS_BOOL_TO_TEXT_CAST__')
 
 /*
  * There is no way in m4 to escape the quote characters, so we change it
@@ -13,47 +32,6 @@
  */
 m4_changequote(<!,!>)
 
-/*
- * WithTracebackForwarding
- *
- * @param $1 python statement which might raise an exception
- *
- * Use this macro in the sql definition of a plpythonu function
- *   that runs on the segments.  If the function raises an exception,
- *   traceback information will be attached to the exception message
- *   which gets forwarded back to the coordinator.
- *
- * On the coordinator side, to attach the message to the DETAIL of the
- *   exception before displaying, you must call the segment UDF
- *   or UDA like this:
- *
- *   DEBUG.plpy_execute(sql, ..., segment_traceback_reporting=True)
- */
-m4_define(<!WithTracebackForwarding!>, <!
-    import traceback
-    from sys import exc_info
-    import plpy
-    try:
-        $1
-    except Exception as e:
-        global SD
-        global GD
-
-        for k in SD.keys():
-            del SD[k]
-        del SD
-        for k in GD.keys():
-            del GD[k]
-        del GD
-
-        etype, _, tb = exc_info()
-        detail = ''.join(traceback.format_exception(etype, e, tb))
-        message = e.message + 'SegmentTraceback' + detail
-        e.message = message
-        e.args = (message,)
-        raise e
-!>)
-
 /*
  * PythonFunction
  *
diff --git a/src/ports/postgres/CMakeLists.txt b/src/ports/postgres/CMakeLists.txt
index 3bec9543..22941b4c 100644
--- a/src/ports/postgres/CMakeLists.txt
+++ b/src/ports/postgres/CMakeLists.txt
@@ -2,6 +2,9 @@
 # PostgreSQL Port
 # ------------------------------------------------------------------------------
 
+message(STATUS "!!TODO Platform run in src/ports/postgres/CMakeLists.txt")
+message(STATUS "!!TODO Platform 's CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}")
+
 set(PORT "PostgreSQL")
 string(TOUPPER ${PORT} PORT_UC)
 string(TOLOWER ${PORT} PORT_LC)
@@ -9,6 +12,8 @@ set(PORT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
 get_filename_component(PORT_DIR_NAME "${PORT_SOURCE_DIR}" NAME)
 set(PORT_DEPLOY_SCRIPT "${CMAKE_BINARY_DIR}/deploy/Component_${PORT}.cmake")
 
+message(STATUS "!!TODO Platform 's PORT_DEPLOY_SCRIPT ${PORT_DEPLOY_SCRIPT}")
+
 list(APPEND CMAKE_MODULE_PATH
     ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
 include(PostgreSQLUtils)
@@ -171,6 +176,7 @@ function(add_${PORT_LC}_library IN_PORT_VERSION)
     define_postgresql_features(${IN_PORT_VERSION} DBMS_FEATURES)
     define_m4_macros(M4_DEFINES_CMD_LINE M4_DEFINES_CODE ${DBMS_FEATURES})
 
+    message(STATUS "!!TODO run m4 ${DBMS_FEATURES}")
     add_python_files(
         PYTHON_TARGET_FILES
         "${PORT_SOURCE_DIR}/modules"
@@ -201,10 +207,11 @@ function(add_${PORT_LC}_library IN_PORT_VERSION)
 
     # END Legacy Code
 
-    configure_file("${PORT_SOURCE_DIR}/madpack/SQLCommon.m4_in"
-        "${CMAKE_CURRENT_BINARY_DIR}/madpack/SQLCommon.m4"
-        @ONLY
-    )
+    # TODO py3
+    #configure_file("${PORT_SOURCE_DIR}/madpack/SQLCommon.m4_in"
+    #    "${CMAKE_CURRENT_BINARY_DIR}/madpack/SQLCommon.m4"
+    #    @ONLY
+    #)
 
     add_custom_target(pythonFiles_${DBMS} ALL
         DEPENDS ${PYTHON_TARGET_FILES})
@@ -242,6 +249,9 @@ macro(add_current_${PORT_LC}_version)
     get_filename_component(_VERSION "${CMAKE_CURRENT_SOURCE_DIR}" NAME)
     string(REPLACE "." "_" _VERSION_UNDERSCORES ${_VERSION})
 
+    # TODO
+    # set(PostgreSQL_ADDITIONAL_VERSIONS _VERSION_UNDERSCORES)
+    # find_package(${PORT})
     find_package(${PORT}_${_VERSION_UNDERSCORES})
     if(${PORT_UC}_${_VERSION_UNDERSCORES}_FOUND)
         add_postgresql_library(${_VERSION})
diff --git a/src/ports/postgres/dbconnector/SystemInformation_impl.hpp b/src/ports/postgres/dbconnector/SystemInformation_impl.hpp
index f7190b45..3d88e33a 100644
--- a/src/ports/postgres/dbconnector/SystemInformation_impl.hpp
+++ b/src/ports/postgres/dbconnector/SystemInformation_impl.hpp
@@ -4,6 +4,11 @@
  *
  *//* ----------------------------------------------------------------------- */
 
+extern "C"{
+    #include <common/hashfn.h>
+    extern uint32 uint32_hash(const void *key, Size keysize);
+}
+
 #ifndef MADLIB_POSTGRES_SYSTEMINFORMATION_IMPL_HPP
 #define MADLIB_POSTGRES_SYSTEMINFORMATION_IMPL_HPP
 
@@ -27,7 +32,7 @@ initializeOidHashTable(HTAB*& ioHashTable, MemoryContext inCacheContext,
         HASHCTL ctl;
         ctl.keysize = sizeof(Oid);
         ctl.entrysize = inEntrySize;
-        ctl.hash = oid_hash;
+        ctl.hash = uint32_hash;
         ctl.hcxt = inCacheContext;
         ioHashTable = madlib_hash_create(
             /* tabname -- a name for the table (for debugging purposes) */
diff --git a/src/ports/postgres/extension/madlib.control_in b/src/ports/postgres/extension/madlib.control_in
index ba0027fe..e894e568 100644
--- a/src/ports/postgres/extension/madlib.control_in
+++ b/src/ports/postgres/extension/madlib.control_in
@@ -1,5 +1,5 @@
 default_version = '@MADLIB_VERSION_MAJOR@.@MADLIB_VERSION_MINOR@.@MADLIB_VERSION_PATCH@'
 comment = 'A scalable in-database analytics library'
 relocatable = false
-module_pathname = 'libmadlib.so'
-requires = 'plpythonu'
+module_pathname = '$libdir/libmadlib.so'
+requires = 'plpython3u'
diff --git a/src/ports/postgres/madpack/SQLCommon.m4_in b/src/ports/postgres/madpack/SQLCommon.m4_in
index 4a7c0420..d46254bc 100644
--- a/src/ports/postgres/madpack/SQLCommon.m4_in
+++ b/src/ports/postgres/madpack/SQLCommon.m4_in
@@ -68,7 +68,7 @@ m4_define(<!WithTracebackForwarding!>, <!
  *     "indepColumn" VARCHAR)
  * RETURNS DOUBLE PRECISION[]
  * AS $$PythonFunction(regress, logistic, compute_logregr_coef)$$
- * LANGUAGE plpythonu VOLATILE;
+ * LANGUAGE plpython3u VOLATILE;
  */
 m4_define(<!PythonFunction!>, <!
     import sys
diff --git a/src/ports/postgres/modules/assoc_rules/assoc_rules.sql_in b/src/ports/postgres/modules/assoc_rules/assoc_rules.sql_in
index 73329728..c3cbceff 100644
--- a/src/ports/postgres/modules/assoc_rules/assoc_rules.sql_in
+++ b/src/ports/postgres/modules/assoc_rules/assoc_rules.sql_in
@@ -592,7 +592,7 @@ AS $$
                                        max_rhs_size
                                        );
 
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.assoc_rules
@@ -626,7 +626,7 @@ AS $$
                                        None
                                        );
 
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -660,7 +660,7 @@ AS $$
                                        None
                                        );
 
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -697,7 +697,7 @@ AS $$
                                        None,
                                        None);
 
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -728,7 +728,7 @@ AS $$
                                        10,
                                        None,
                                        None);
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 --------------------------------------------------------------------------
@@ -736,7 +736,7 @@ m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.assoc_rules(message TEXT)
 RETURNS text AS $$
 PythonFunction(assoc_rules, assoc_rules, assoc_rules_help_message)
-$$ language plpythonu
+$$ language plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.assoc_rules()
diff --git a/src/ports/postgres/modules/bayes/bayes.py_in b/src/ports/postgres/modules/bayes/bayes.py_in
index b97929f9..a6aa8837 100644
--- a/src/ports/postgres/modules/bayes/bayes.py_in
+++ b/src/ports/postgres/modules/bayes/bayes.py_in
@@ -272,7 +272,7 @@ def __get_feature_probs_sql(**kwargs):
         ) p
         """
 
-    if kwargs.has_key("numericAttrsColumnIndices") \
+    if "numericAttrsColumnIndices" in kwargs \
             and kwargs['numericAttrsColumnIndices'].lower() != 'array[]':
         sql += """
         WHERE NOT EXISTS (
@@ -668,13 +668,13 @@ def create_prepared_data(**kwargs):
     @param numericFeatureStatsDestName Name of the statistics table for numeric attributes
 
     """
-    if not kwargs.has_key('numAttrs'):
+    if 'numAttrs' not in kwargs:
         plpy.error("'numAttrs' must be provided")
 
-    if not kwargs.has_key('trainingSource'):
+    if 'trainingSource' not in kwargs:
         plpy.error("'trainingSource' must be provided")
 
-    if not kwargs.has_key('trainingAttrColumn'):
+    if 'trainingAttrColumn' not in kwargs:
         plpy.error("'trainingAttrColumn' must be provided")
 
     __verify_attr_num(
@@ -683,12 +683,12 @@ def create_prepared_data(**kwargs):
         kwargs["numAttrs"])
 
     # if only one of these is present, error out
-    if kwargs.has_key('numericAttrsColumnIndices') ^ kwargs.has_key('numericFeatureStatsDestName'):
+    if ('numericAttrsColumnIndices' in kwargs) ^ ('numericFeatureStatsDestName' in kwargs):
         plpy.error("Both 'numericAttrsColumnIndices' and 'numericFeatureStatsDestName' must be provided");
 
     # verify if the attributes specified in the numericAttrsColumnIndices are
     # really numeric in nature. TODO
-    if kwargs.has_key('numericAttrsColumnIndices') and kwargs.has_key('numericFeatureStatsDestName'):
+    if 'numericAttrsColumnIndices' in kwargs and 'numericFeatureStatsDestName' in kwargs:
         __verify_numeric_attr_type(
             kwargs["trainingSource"],
             kwargs["trainingAttrColumn"],
@@ -752,7 +752,7 @@ def create_prepared_data(**kwargs):
     # Create the attr-mean-var-class table (for later use in computing probs).
     # This can be directly computed using the sql query. no need for intermediate table
     # Also update here the pointer to this globally created table
-    if kwargs.has_key('numericAttrsColumnIndices') and kwargs.has_key('numericFeatureStatsDestName'):
+    if 'numericAttrsColumnIndices' in kwargs and 'numericFeatureStatsDestName' in kwargs:
         plpy.execute("""
             CREATE {whatToCreate} {numericFeatureStatsDestName}
             AS
@@ -791,7 +791,7 @@ def create_prepared_data(**kwargs):
             DROP TABLE {attrValuesSource};
             """.format(**kwargs))
 
-    if kwargs.has_key('numericAttrsColumnIndices') and kwargs.has_key('numericFeatureStatsDestName'):
+    if 'numericAttrsColumnIndices' in kwargs and 'numericFeatureStatsDestName' in kwargs:
         if kwargs['whatToCreate'] == 'TABLE':
             plpy.execute("""
                 ALTER TABLE {numericFeatureStatsDestName} ADD PRIMARY KEY (class, attr);
@@ -862,10 +862,10 @@ def create_classification(**kwargs):
     """
     __init_prepared_data(kwargs)
 
-    if kwargs.has_key('trainingSource') <> kwargs.has_key('trainingAttrColumn'):
+    if ('trainingSource' in kwargs) != ('trainingAttrColumn' in kwargs):
         plpy.error("'trainingSource' and 'trainingAttrColumn' must be provided together")
 
-    if not kwargs.has_key('numAttrs'):
+    if 'numAttrs' not in kwargs:
         plpy.error("'numAttrs' must be provided")
 
     if 'trainingSource' in kwargs:
@@ -874,10 +874,10 @@ def create_classification(**kwargs):
             kwargs["trainingAttrColumn"],
             kwargs["numAttrs"])
 
-    if not kwargs.has_key('classifySource'):
+    if 'classifySource' not in kwargs:
         plpy.error("'classifySource' must be provided")
 
-    if not kwargs.has_key('classifyAttrColumn'):
+    if 'classifyAttrColumn' not in kwargs:
         plpy.error("'classifyAttrColumn' must be provided")
 
     __verify_attr_num(
@@ -891,7 +891,7 @@ def create_classification(**kwargs):
         )
 
     # if there are any numeric attributes in the test data, compute their probabilities as well.
-    if kwargs.has_key('numericFeatureStatsSource'):
+    if 'numericFeatureStatsSource' in kwargs:
         kwargs.update(dict(
             numeric_attrs_keys_and_prob_values = "(" + __get_keys_and_probs_values_numeric_attrs_sql(**kwargs) + ")"
             ))
@@ -1026,10 +1026,10 @@ where P(A = a | C = c) = ||  P(A_i = a_i | C = c).
 
     __init_prepared_data(kwargs)
 
-    if kwargs.has_key('trainingSource') <> kwargs.has_key('trainingAttrColumn'):
+    if ('trainingSource' in kwargs) != ('trainingAttrColumn' in kwargs):
         plpy.error("'trainingSource' and 'trainingAttrColumn' must be provided together")
 
-    if not kwargs.has_key('numAttrs'):
+    if 'numAttrs' not in kwargs:
         plpy.error("'numAttrs' must be provided")
 
     if 'trainingSource' in kwargs:
@@ -1038,10 +1038,10 @@ where P(A = a | C = c) = ||  P(A_i = a_i | C = c).
             kwargs["trainingAttrColumn"],
             kwargs["numAttrs"])
 
-    if not kwargs.has_key('classifySource'):
+    if 'classifySource' not in kwargs:
         plpy.error("'classifySource' must be provided")
 
-    if not kwargs.has_key('classifyAttrColumn'):
+    if 'classifyAttrColumn' not in kwargs:
         plpy.error("'classifyAttrColumn' must be provided")
 
     __verify_attr_num(
@@ -1055,7 +1055,7 @@ where P(A = a | C = c) = ||  P(A_i = a_i | C = c).
         ))
 
     # if there are any numeric attributes in the test data, compute their probabilities as well.
-    if kwargs.has_key('numericFeatureStatsSource'):
+    if 'numericFeatureStatsSource' in kwargs:
         kwargs.update(dict(
             numeric_attrs_keys_and_prob_values = "(" + __get_keys_and_probs_values_numeric_attrs_sql(**kwargs) + ")"
             ))
@@ -1143,7 +1143,7 @@ def __init_prepared_data(kwargs):
         kwargs.update(dict(
                 classPriorsSource = "(" + __get_class_priors_sql(**kwargs) + ")"
             ))
-    if kwargs.has_key('numericAttrsColumnIndices') and 'numericFeatureStatsSource' not in kwargs:
+    if 'numericAttrsColumnIndices' in kwargs and 'numericFeatureStatsSource' not in kwargs:
         # sanity check the indices before executing any sql query
         __verify_numeric_attr_type(
             kwargs["trainingSource"],
@@ -1179,7 +1179,7 @@ def __verify_attr_num(sourceTable, attrColumn, numAttr):
         )
 
     dsize = result[0]['size']
-    if (dsize <> 0):
+    if (dsize != 0):
         plpy.error('found %d records in "%s" where "%s" was not of expected length (%d)'\
             % (dsize, sourceTable, attrColumn, numAttr))
 
diff --git a/src/ports/postgres/modules/bayes/bayes.sql_in b/src/ports/postgres/modules/bayes/bayes.sql_in
index 9121cc10..f4591353 100644
--- a/src/ports/postgres/modules/bayes/bayes.sql_in
+++ b/src/ports/postgres/modules/bayes/bayes.sql_in
@@ -708,7 +708,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_prepared_data_tables(
     "classPriorsDestName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_prepared_data_table)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_prepared_data_tables(
@@ -722,7 +722,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_prepared_data_tables(
     "classPriorsDestName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_prepared_data_table)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -776,7 +776,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_classify_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_classification_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /* API with numeric attributes and pre-trained data */
@@ -791,7 +791,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_classify_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_classification_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /* API without numeric attrs but ad-hoc computation */
@@ -806,7 +806,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_classify_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_classification_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /* API with numeric attrs but ad-hoc computation */
@@ -822,7 +822,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_classify_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_classification_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -878,7 +878,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_probs_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_bayes_probabilities_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /* API with support for numeric attributes and with support for pre-computed data */
@@ -893,7 +893,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_probs_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_bayes_probabilities_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /* API without support for numeric attributes and without support for pre-computed data */
@@ -908,7 +908,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_probs_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_bayes_probabilities_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /* API with support for numeric attributes but without support for pre-computed data */
@@ -924,5 +924,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.create_nb_probs_view(
     "destName" VARCHAR)
 RETURNS VOID
 AS $$PythonFunction(bayes, bayes, create_bayes_probabilities_view)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/convex/lmf.sql_in b/src/ports/postgres/modules/convex/lmf.sql_in
index e608d0a1..5f506ecc 100644
--- a/src/ports/postgres/modules/convex/lmf.sql_in
+++ b/src/ports/postgres/modules/convex/lmf.sql_in
@@ -322,7 +322,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_compute_lmf_igd(
     col_value       VARCHAR)
 RETURNS INTEGER
 AS $$PythonFunction(convex, lmf_igd, compute_lmf_igd)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
diff --git a/src/ports/postgres/modules/convex/mlp.sql_in b/src/ports/postgres/modules/convex/mlp.sql_in
index bac7d6ce..ee61e5d2 100644
--- a/src/ports/postgres/modules/convex/mlp.sql_in
+++ b/src/ports/postgres/modules/convex/mlp.sql_in
@@ -1646,7 +1646,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_classification(
                     warm_start,
                     verbose,
                     grouping_col)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_regression(
@@ -1677,7 +1677,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_regression(
                     warm_start,
                     verbose,
                     grouping_col )
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_classification(
@@ -1871,7 +1871,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_predict(
                             id_col_name,
                             output_table,
                             pred_type)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE FUNCTION MADLIB_SCHEMA.internal_predict_mlp(
@@ -1895,7 +1895,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_classification(
 ) RETURNS TEXT AS $$
     PythonFunctionBodyOnly(`convex', `mlp_igd')
     return mlp_igd.mlp_help(schema_madlib,message,True)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_classification()
@@ -1909,7 +1909,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_regression(
 ) RETURNS TEXT AS $$
     PythonFunctionBodyOnly(`convex', `mlp_igd')
     return mlp_igd.mlp_help(schema_madlib,message,False)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_regression()
@@ -1923,7 +1923,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_predict(
 ) RETURNS TEXT AS $$
     PythonFunctionBodyOnly(`convex', `mlp_igd')
     return mlp_igd.mlp_predict_help(schema_madlib,message)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.mlp_predict()
diff --git a/src/ports/postgres/modules/convex/mlp_igd.py_in b/src/ports/postgres/modules/convex/mlp_igd.py_in
index 7e83727b..13bbc878 100644
--- a/src/ports/postgres/modules/convex/mlp_igd.py_in
+++ b/src/ports/postgres/modules/convex/mlp_igd.py_in
@@ -398,7 +398,7 @@ def mlp(schema_madlib, source_table, output_table, independent_varname,
                     # used, it will be an empty list if there was no grouping.
                     groups = [t[col_grp_key] for t in res if t[col_grp_key]]
                     losses = [t['loss'] for t in res]
-                    loss = zip(groups, losses) if groups else losses
+                    loss = list(zip(groups, losses)) if groups else losses
                     plpy.info("Iteration: {0}, Loss: <{1}>".
                               format(it.iteration, ', '.join(map(str, loss))))
             it.final()
@@ -701,8 +701,8 @@ def _get_optimizer_params(param_str):
         "beta2": ("NULL", float),
         "eps": ("NULL", float)
     }
-    param_defaults = dict([(k, v[0]) for k, v in params_defaults.items()])
-    param_types = dict([(k, v[1]) for k, v in params_defaults.items()])
+    param_defaults = dict([(k, v[0]) for k, v in list(params_defaults.items())])
+    param_types = dict([(k, v[1]) for k, v in list(params_defaults.items())])
 
     if not param_str:
         return param_defaults
@@ -748,8 +748,7 @@ def _validate_warm_start(output_table, summary_table, standardization_table,
                       param)
     output = plpy.execute("SELECT * FROM {0}".format(output_table))
     num_coeffs = sum(
-        map(lambda i: (layer_sizes[i] + 1) * (layer_sizes[i + 1]),
-            range(len(layer_sizes) - 1)))
+        [(layer_sizes[i] + 1) * (layer_sizes[i + 1]) for i in range(len(layer_sizes) - 1)])
     for row in output:
         coeff = row['coeff']
         _assert_equal(num_coeffs,
diff --git a/src/ports/postgres/modules/convex/test/unit_tests/plpy_mock.py_in b/src/ports/postgres/modules/convex/test/unit_tests/plpy_mock.py_in
index dd186490..9b3c128d 100644
--- a/src/ports/postgres/modules/convex/test/unit_tests/plpy_mock.py_in
+++ b/src/ports/postgres/modules/convex/test/unit_tests/plpy_mock.py_in
@@ -31,7 +31,7 @@ def warning(query):
     pass
 
 def info(query):
-    print query
+    print(query)
 
 
 class PLPYException(Exception):
diff --git a/src/ports/postgres/modules/crf/crf.sql_in b/src/ports/postgres/modules/crf/crf.sql_in
index 9f12f48f..acb1612b 100644
--- a/src/ports/postgres/modules/crf/crf.sql_in
+++ b/src/ports/postgres/modules/crf/crf.sql_in
@@ -739,7 +739,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.compute_lincrf(
     "maxNumIterations" INTEGER)
 RETURNS INTEGER
 AS $$PythonFunction(crf, crf, compute_lincrf)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -780,7 +780,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.lincrf_train(
      max_iterations         INTEGER /* DEFAULT 20 */
 ) RETURNS TEXT AS $$
 PythonFunction(crf, crf, lincrf_train)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.lincrf_train(
diff --git a/src/ports/postgres/modules/crf/crf_data_loader.sql_in b/src/ports/postgres/modules/crf/crf_data_loader.sql_in
index cd9b4a4d..f1e15923 100644
--- a/src/ports/postgres/modules/crf/crf_data_loader.sql_in
+++ b/src/ports/postgres/modules/crf/crf_data_loader.sql_in
@@ -167,7 +167,7 @@ $$
                 "CREATE TABLE MADLIB_SCHEMA.crf_feature_dic(f_index integer, f_name text, feature integer[]);"
         plpy.execute(query);
 
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -224,9 +224,9 @@ $$
 	        "CREATE TABLE MADLIB_SCHEMA.viterbi_mtbl (score integer[])";
 	plpy.execute(query);
 
-        # import tokenized document to the segment table
-        query = "COPY MADLIB_SCHEMA.test_segmenttbl (start_pos,doc_id,seg_text,max_pos) FROM '" + datapath + "/crf_testdata.tab'";
-        plpy.execute(query);
+	# import tokenized document to the segment table
+	query = "COPY MADLIB_SCHEMA.test_segmenttbl (start_pos,doc_id,seg_text,max_pos) FROM '" + datapath + "/crf_testdata.tab'";
+	plpy.execute(query);
 
-$$ language plpythonu STRICT
+$$ language plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/crf/crf_feature_gen.sql_in b/src/ports/postgres/modules/crf/crf_feature_gen.sql_in
index c65786f5..e554158d 100644
--- a/src/ports/postgres/modules/crf/crf_feature_gen.sql_in
+++ b/src/ports/postgres/modules/crf/crf_feature_gen.sql_in
@@ -31,7 +31,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.crf_train_fgen(
         train_featureset_tbl text
 ) RETURNS void AS $$
 PythonFunction(crf, crf_feature_gen, generate_train_features)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -94,5 +94,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.crf_test_fgen(
         viterbi_rtbl text
 ) RETURNS VOID AS $$
 PythonFunction(crf, crf_feature_gen, generate_test_features)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/crf/viterbi.sql_in b/src/ports/postgres/modules/crf/viterbi.sql_in
index d3c20cd6..02dfb6ef 100644
--- a/src/ports/postgres/modules/crf/viterbi.sql_in
+++ b/src/ports/postgres/modules/crf/viterbi.sql_in
@@ -24,7 +24,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.vcrf_top1_table(
     result_tbl      TEXT
 ) returns TEXT AS $$
 PythonFunction(crf, viterbi, vcrf_top1_table)
-$$ LANGUAGE plpythonu strict
+$$ LANGUAGE plpython3u strict
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -57,5 +57,5 @@ MADLIB_SCHEMA.vcrf_label(
     result_tbl TEXT
 )   RETURNS TEXT AS $$
 PythonFunction(crf, viterbi, vcrf_label)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/dbscan/dbscan.sql_in b/src/ports/postgres/modules/dbscan/dbscan.sql_in
index 0cfac3d8..a0bd4e16 100644
--- a/src/ports/postgres/modules/dbscan/dbscan.sql_in
+++ b/src/ports/postgres/modules/dbscan/dbscan.sql_in
@@ -413,8 +413,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
     max_segmentation_depth      INTEGER
 ) RETURNS VOID AS $$
     PythonFunction(dbscan, dbscan, dbscan)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
     source_table                VARCHAR,
@@ -428,7 +428,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.dbscan($1, $2, $3, $4, $5, $6, $7, $8, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
     source_table                VARCHAR,
@@ -453,7 +453,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.dbscan($1, $2, $3, $4, $5, $6, NULL, NULL, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
     source_table                VARCHAR,
@@ -464,7 +464,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.dbscan($1, $2, $3, $4, $5, NULL, NULL, NULL, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan_predict(
     dbscan_table                VARCHAR,
@@ -474,34 +474,34 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan_predict(
     output_table                VARCHAR
 ) RETURNS VOID AS $$
     PythonFunction(dbscan, dbscan, dbscan_predict)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
     message                VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(dbscan, dbscan, dbscan_help)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan(
 ) RETURNS VARCHAR AS $$
     PythonFunction(dbscan, dbscan, dbscan_help)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan_predict(
     message                VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(dbscan, dbscan, dbscan_predict_help)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.dbscan_predict(
 ) RETURNS VARCHAR AS $$
     PythonFunction(dbscan, dbscan, dbscan_predict_help)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.sf_merge(
@@ -717,7 +717,7 @@ $$
                         $6,
                         left_eps_bins,
                         all_eps_bins
-                    ), 
+                    ),
                     $7,
                     $8
                 )
diff --git a/src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in b/src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in
index dd186490..9b3c128d 100644
--- a/src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in
+++ b/src/ports/postgres/modules/dbscan/test/unit_tests/plpy_mock.py_in
@@ -31,7 +31,7 @@ def warning(query):
     pass
 
 def info(query):
-    print query
+    print(query)
 
 
 class PLPYException(Exception):
diff --git a/src/ports/postgres/modules/deep_learning/gpu_info_from_tf.py_in b/src/ports/postgres/modules/deep_learning/gpu_info_from_tf.py_in
index 6456128f..7a8c2794 100644
--- a/src/ports/postgres/modules/deep_learning/gpu_info_from_tf.py_in
+++ b/src/ports/postgres/modules/deep_learning/gpu_info_from_tf.py_in
@@ -26,7 +26,7 @@ for more details.
 
 import tensorflow as tf
 from tensorflow.python.client import device_lib
-from tensorflow.keras import backend as K
+from keras import backend as K
 
 config = tf.ConfigProto()
 config.gpu_options.allow_growth = True
@@ -37,4 +37,4 @@ sess.close()
 if local_device_protos:
     for x in local_device_protos:
         if x.device_type == 'GPU':
-            print x.physical_device_desc
+            print(x.physical_device_desc)
diff --git a/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in b/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
index 21838a3b..c2aaaba4 100644
--- a/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
+++ b/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
@@ -24,6 +24,7 @@
 """
 from math import ceil
 import plpy
+import time
 
 from internal.db_utils import get_distinct_col_levels
 from internal.db_utils import quote_literal
@@ -47,14 +48,16 @@ from utilities.utilities import get_seg_number
 from utilities.validate_args import input_tbl_valid
 from utilities.validate_args import get_expr_type
 
-from madlib_keras_helper import *
-import time
+from deep_learning.madlib_keras_helper import *
 
 NUM_CLASSES_COLNAME = "num_classes"
+
+
 class DistributionRulesOptions:
     ALL_SEGMENTS = 'all_segments'
     GPU_SEGMENTS = 'gpu_segments'
 
+
 class InputDataPreprocessorDL(object):
     def __init__(self, schema_madlib, source_table, output_table,
                  dependent_varname, independent_varname, buffer_size,
@@ -217,7 +220,6 @@ class InputDataPreprocessorDL(object):
                     shape = shape + self._get_var_shape(dep)
         return shape
 
-
     def input_preprocessor_dl(self, order_by_random=True):
         """
             Creates the output and summary table that does the following
@@ -266,7 +268,6 @@ class InputDataPreprocessorDL(object):
                 rescale_independent_var.append("{i}::{float32}[] AS {i}_norm".format(**locals()))
         rescale_independent_var = ', '.join(rescale_independent_var)
 
-
         # It's important that we shuffle all rows before batching for fit(), but
         #  we can skip that for predict()
         order_by_clause = " ORDER BY RANDOM()" if order_by_random else ""
@@ -286,7 +287,7 @@ class InputDataPreprocessorDL(object):
                 {self.schema_madlib}.array_to_bytea({i}) AS {i}
                 """.format(**locals()))
 
-        for i,j in zip(self.dependent_varname, dep_shape):
+        for i, j in zip(self.dependent_varname, dep_shape):
             concat_sql.append("""
                 {self.schema_madlib}.agg_array_concat(ARRAY[{i}]) AS {i}
                 """.format(**locals()))
@@ -386,7 +387,7 @@ class InputDataPreprocessorDL(object):
             all_segments = False
 
         if self.distribution_rules == DistributionRulesOptions.GPU_SEGMENTS:
-            #TODO can we reuse the function `get_accessible_gpus_for_seg` from
+            # TODO can we reuse the function `get_accessible_gpus_for_seg` from
             # madlib_keras_helper
             gpu_info_table = unique_string(desp='gpu_info')
             plpy.execute("""
diff --git a/src/ports/postgres/modules/deep_learning/input_data_preprocessor.sql_in b/src/ports/postgres/modules/deep_learning/input_data_preprocessor.sql_in
index 447ee215..e5431084 100644
--- a/src/ports/postgres/modules/deep_learning/input_data_preprocessor.sql_in
+++ b/src/ports/postgres/modules/deep_learning/input_data_preprocessor.sql_in
@@ -891,7 +891,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.validation_preprocessor_dl(
         with MinWarning('error'):
             validation_preprocessor_obj = input_data_preprocessor.ValidationDataPreprocessorDL(**globals())
             validation_preprocessor_obj.validation_preprocessor_dl()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.validation_preprocessor_dl(
@@ -899,7 +899,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.validation_preprocessor_dl(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, input_data_preprocessor)
     return input_data_preprocessor.InputDataPreprocessorDocumentation.validation_preprocessor_dl_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -------------------------------------------------------------------------------
@@ -921,7 +921,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.training_preprocessor_dl(
         with MinWarning('error'):
             training_preprocessor_obj = input_data_preprocessor.TrainingDataPreprocessorDL(**globals())
             training_preprocessor_obj.training_preprocessor_dl()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.training_preprocessor_dl(
@@ -929,7 +929,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.training_preprocessor_dl(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, input_data_preprocessor)
     return input_data_preprocessor.InputDataPreprocessorDocumentation.training_preprocessor_dl_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- aggregation for independent var
@@ -955,7 +955,7 @@ $$
 import numpy as np
 
 return np.array(var, dtype=np.float32).tobytes()
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 CREATE FUNCTION MADLIB_SCHEMA.convert_array_to_bytea(var SMALLINT[])
 RETURNS BYTEA
@@ -964,7 +964,7 @@ $$
 import numpy as np
 
 return np.array(var, dtype=np.int16).tobytes()
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 CREATE FUNCTION MADLIB_SCHEMA.convert_bytea_to_real_array(var BYTEA)
@@ -974,7 +974,7 @@ $$
 import numpy as np
 
 return np.frombuffer(var, dtype=np.float32)
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 
 CREATE FUNCTION MADLIB_SCHEMA.convert_bytea_to_smallint_array(var BYTEA)
@@ -984,5 +984,5 @@ $$
 import numpy as np
 
 return np.frombuffer(var, dtype=np.int16)
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
diff --git a/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in b/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
index 0d6fc7cf..1e755a83 100644
--- a/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
+++ b/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
@@ -34,6 +34,7 @@ from utilities.validate_args import input_tbl_valid
 from utilities.validate_args import quote_ident
 from utilities.validate_args import table_exists
 
+
 class ModelArchSchema:
     """Expected format of keras_model_arch_table.
        Example uses:
@@ -59,6 +60,7 @@ class ModelArchSchema:
     (MODEL_ID, MODEL_ARCH, MODEL_WEIGHTS, NAME, DESCRIPTION,
      __INTERNAL_MADLIB_ID__) = col_names
 
+
 @MinWarning("error")
 def load_keras_model(keras_model_arch_table, model_arch, model_weights,
                      name, description, **kwargs):
@@ -100,6 +102,7 @@ def load_keras_model(keras_model_arch_table, model_arch, model_weights,
     plpy.info("Keras Model Arch: Added model id {0} to {1} table".
               format(select_res[0][ModelArchSchema.MODEL_ID], model_arch_table))
 
+
 @MinWarning("error")
 def delete_keras_model(keras_model_arch_table, model_id, **kwargs):
     model_arch_table = quote_ident(keras_model_arch_table)
@@ -130,6 +133,7 @@ def delete_keras_model(keras_model_arch_table, model_id, **kwargs):
         sql = "DROP TABLE {0}".format(model_arch_table)
         plpy.execute(sql, 0)
 
+
 class KerasModelArchDocumentation:
     @staticmethod
     def _returnHelpMsg(schema_madlib, message, summary, usage, method):
diff --git a/src/ports/postgres/modules/deep_learning/keras_model_arch_table.sql_in b/src/ports/postgres/modules/deep_learning/keras_model_arch_table.sql_in
index 0c099e0b..416ec9f7 100644
--- a/src/ports/postgres/modules/deep_learning/keras_model_arch_table.sql_in
+++ b/src/ports/postgres/modules/deep_learning/keras_model_arch_table.sql_in
@@ -162,9 +162,9 @@ delete_keras_model(
 -# Define model architecture.  Use tensorflow.keras to define
 the model architecture:
 <pre class="example">
-from tensorflow import keras
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.layers import Dense
+import keras
+from keras.models import Sequential
+from keras.layers import Dense
 model_simple = Sequential()
 model_simple.add(Dense(10, activation='relu', input_shape=(4,)))
 model_simple.add(Dense(10, activation='relu'))
@@ -254,8 +254,9 @@ passed to Keras functions.
 <pre class="example">
 CREATE OR REPLACE FUNCTION load_weights() RETURNS VOID AS
 $$
-from tensorflow.keras.layers import *
-from tensorflow.keras import Sequential
+import keras
+from keras.layers import *
+from keras import Sequential
 import numpy as np
 import plpy
 \#
@@ -277,7 +278,7 @@ load_query = plpy.prepare("""SELECT madlib.load_keras_model(
                         $1, $2)
                     """, ['json','bytea'])
 plpy.execute(load_query, [model.to_json(), weights_bytea])
-$$ language plpythonu;
+$$ language plpython3u;
 -- Call load function
 SELECT load_weights();
 SELECT model_id, name, description, (model_weights IS NOT NULL) AS has_model_weights FROM model_arch_library ORDER BY model_id;
@@ -298,8 +299,8 @@ import psycopg2
 import psycopg2 as p2
 conn = p2.connect('postgresql://gpadmin@35.239.240.26:5432/madlib')
 cur = conn.cursor()
-from tensorflow.keras.layers import *
-from tensorflow.keras import Sequential
+from keras.layers import *
+from keras import Sequential
 import numpy as np
 \#
 \# create model
@@ -362,7 +363,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_keras_model(
     from utilities.control import AOControl
     with AOControl(False):
         keras_model_arch_table.load_keras_model(**globals())
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 -- Function to add a keras model to arch table
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_keras_model(
@@ -406,14 +407,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_keras_model(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, keras_model_arch_table)
     return keras_model_arch_table.KerasModelArchDocumentation.load_keras_model_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_keras_model()
 RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, keras_model_arch_table)
     return keras_model_arch_table.KerasModelArchDocumentation.load_keras_model_help(schema_madlib, '')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- Function to delete a keras model from arch table
@@ -426,7 +427,7 @@ RETURNS VOID AS $$
     from utilities.control import AOControl
     with AOControl(False):
         keras_model_arch_table.delete_keras_model(**globals())
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 -- Functions for online help
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_keras_model(
@@ -434,12 +435,12 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_keras_model(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, keras_model_arch_table)
     return keras_model_arch_table.KerasModelArchDocumentation.delete_keras_model_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_keras_model()
 RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, keras_model_arch_table)
     return keras_model_arch_table.KerasModelArchDocumentation.delete_keras_model_help(schema_madlib, '')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
index 796f7434..6acf0365 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
@@ -23,13 +23,11 @@ import plpy
 import sys
 import time
 
-from madlib_keras_helper import *
-from madlib_keras_validator import *
-from madlib_keras_wrapper import *
-from model_arch_info import *
-import tensorflow as tf
-
-from madlib_keras_model_selection import ModelSelectionSchema
+from deep_learning.madlib_keras_helper import *
+from deep_learning.madlib_keras_validator import *
+from deep_learning.madlib_keras_wrapper import *
+from deep_learning.model_arch_info import *
+from deep_learning.madlib_keras_model_selection import ModelSelectionSchema
 
 from internal.db_utils import quote_literal
 from utilities.utilities import _assert
@@ -43,17 +41,17 @@ from utilities.validate_args import quote_ident
 from utilities.validate_args import input_tbl_valid
 from utilities.control import MinWarning
 
-import tensorflow as tf
 import utilities.debug as DEBUG
 
+from keras import backend as K
+from keras.layers import *
+from keras.models import *
+from keras.optimizers import *
+from keras.regularizers import *
+
 DEBUG.timings_enabled = False
 DEBUG.plpy_info_enabled = False
 
-from tensorflow.keras import backend as K
-from tensorflow.keras.layers import *
-from tensorflow.keras.models import *
-from tensorflow.keras.optimizers import *
-from tensorflow.keras.regularizers import *
 
 class GD_STORE:
     SESS = 'sess'
@@ -72,6 +70,7 @@ class GD_STORE:
         if GD_STORE.AGG_IMAGE_COUNT in GD:
             del GD[GD_STORE.AGG_IMAGE_COUNT]
 
+
 def get_init_model_and_sess(GD, device_name, gpu_count, segments_per_host,
                                model_architecture, compile_params, custom_function_map):
     # If a live session is present, re-use it. Otherwise, recreate it.
@@ -90,6 +89,7 @@ def get_init_model_and_sess(GD, device_name, gpu_count, segments_per_host,
         GD_STORE.init(GD, sess, segment_model)
     return segment_model, sess
 
+
 @MinWarning("warning")
 def fit(schema_madlib, source_table, model, model_arch_table,
         model_id, compile_params, fit_params, num_iterations,
@@ -485,6 +485,7 @@ def get_initial_weights(model_table, model_arch, serialized_weights, warm_start,
                 model.get_weights())
     return serialized_weights
 
+
 def get_source_summary_table_dict(source_summary_table):
     source_summary = plpy.execute("""
             SELECT *
@@ -493,6 +494,7 @@ def get_source_summary_table_dict(source_summary_table):
 
     return source_summary
 
+
 def compute_loss_and_metrics(schema_madlib, table, dependent_varname,
                              independent_varname, compile_params,
                              model_arch, serialized_weights, use_gpus,
@@ -526,6 +528,7 @@ def compute_loss_and_metrics(schema_madlib, table, dependent_varname,
     loss_list.append(loss)
     return end_val - start_val, metric, loss
 
+
 def should_compute_metrics_this_iter(curr_iter, metrics_compute_frequency,
                                      num_iterations):
     """
@@ -543,6 +546,7 @@ def should_compute_metrics_this_iter(curr_iter, metrics_compute_frequency,
     return (curr_iter)%metrics_compute_frequency == 0 or \
            curr_iter == num_iterations
 
+
 def init_model(model_architecture, compile_params, custom_function_map):
     """
         Should only be called at the first row of first iteration.
@@ -551,6 +555,7 @@ def init_model(model_architecture, compile_params, custom_function_map):
     compile_model(segment_model, compile_params, custom_function_map)
     return segment_model
 
+
 def fit_transition_wide(state, dependent_var1, dependent_var2, dependent_var3,
                    dependent_var4, dependent_var5, independent_var1,
                    independent_var2, independent_var3, independent_var4,
@@ -578,6 +583,7 @@ def fit_transition_wide(state, dependent_var1, dependent_var2, dependent_var3,
                    accessible_gpus_for_seg, prev_serialized_weights,
                    is_multiple_model, custom_function_map, **kwargs)
 
+
 def fit_transition(state, dependent_var, independent_var, dependent_var_shape,
                    independent_var_shape, model_architecture,
                    compile_params, fit_params, dist_key, dist_key_mapping,
@@ -659,6 +665,7 @@ def fit_transition(state, dependent_var, independent_var, dependent_var_shape,
 
     return return_state
 
+
 def fit_multiple_transition_caching(dependent_var, independent_var, dependent_var_shape,
                              independent_var_shape, model_architecture,
                              compile_params, fit_params, dist_key, dist_key_mapping,
@@ -757,6 +764,7 @@ def fit_multiple_transition_caching(dependent_var, independent_var, dependent_va
 
     return return_state
 
+
 def get_state_to_return(segment_model, is_last_row, is_multiple_model, agg_image_count,
                         total_images=None):
     """
@@ -793,6 +801,7 @@ def get_state_to_return(segment_model, is_last_row, is_multiple_model, agg_image
 
     return new_state
 
+
 def fit_merge(state1, state2, **kwargs):
 
     # Return if called early
@@ -813,6 +822,7 @@ def fit_merge(state1, state2, **kwargs):
     return madlib_keras_serializer.serialize_state_with_1d_weights(
         image_count, total_weights)
 
+
 def fit_final(state, **kwargs):
     # Return if called early
     if not state:
@@ -825,6 +835,7 @@ def fit_final(state, **kwargs):
     weights /= image_count
     return madlib_keras_serializer.serialize_nd_weights(weights)
 
+
 def evaluate(schema_madlib, model_table, test_table, output_table,
              use_gpus, mst_key, **kwargs):
 
@@ -910,6 +921,7 @@ def evaluate(schema_madlib, model_table, test_table, output_table,
     if is_mult_model:
         plpy.execute("DROP VIEW IF EXISTS {0}".format(model_summary_table))
 
+
 def validate_evaluate(module_name, model_table, model_summary_table, test_table, test_summary_table, output_table, is_mult_model):
     def _validate_test_summary_tbl():
         input_tbl_valid(test_summary_table, module_name,
@@ -935,6 +947,7 @@ def validate_evaluate(module_name, model_table, model_summary_table, test_table,
     for i in dependent_varname:
         validate_bytea_var_for_minibatch(test_table, i)
 
+
 def get_loss_metric_from_keras_eval(schema_madlib, table, dependent_varname,
                                     independent_varname, compile_params,
                                     model_arch, serialized_weights, use_gpus,
@@ -1005,6 +1018,7 @@ def get_loss_metric_from_keras_eval(schema_madlib, table, dependent_varname,
         loss_metric = res[0]['loss_metric']
     return loss_metric
 
+
 def internal_keras_eval_transition(state, dependent_var, independent_var,
                                    dependent_var_shape, independent_var_shape,
                                    model_architecture, serialized_weights, compile_params,
@@ -1118,6 +1132,7 @@ def internal_keras_eval_transition(state, dependent_var, independent_var,
 
     return state
 
+
 def internal_keras_eval_merge(state1, state2, **kwargs):
     # If either state is None, return the other one
     if not state1 or not state2:
@@ -1129,6 +1144,7 @@ def internal_keras_eval_merge(state1, state2, **kwargs):
 
     return merged_state
 
+
 def internal_keras_eval_final(state, **kwargs):
     image_count = state[-1]
 
@@ -1140,6 +1156,7 @@ def internal_keras_eval_final(state, **kwargs):
 
     return state
 
+
 def fit_help(schema_madlib, message, **kwargs):
     """
     Help function for keras fit
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras.sql_in
index b75d0fc5..092b09b0 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras.sql_in
@@ -1064,9 +1064,9 @@ __internal_gpu_config__ | all_segments
 -# Define and load model architecture.  Use Keras to define
 the model architecture:
 <pre class="example">
-from tensorflow import keras
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.layers import Dense
+import keras
+from keras.models import Sequential
+from keras.layers import Dense
 model_simple = Sequential()
 model_simple.add(Dense(10, activation='relu', input_shape=(4,)))
 model_simple.add(Dense(10, activation='relu'))
@@ -1847,7 +1847,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_fit(
     with AOControl(False):
         with SetGUC("plan_cache_mode", "force_generic_plan"):
             madlib_keras.fit(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.fit_transition(
@@ -1880,7 +1880,7 @@ PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
         message = e.message + 'TransAggDetail' + detail
         e.args = (message,)
         raise e
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 
@@ -1922,7 +1922,7 @@ PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
         message = e.message + 'TransAggDetail' + detail
         e.args = (message,)
         raise e
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.fit_merge(
@@ -1942,7 +1942,7 @@ PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
         message = e.message + 'MergeAggDetail' + detail
         e.args = (message,)
         raise e
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.fit_final(
@@ -1961,7 +1961,7 @@ PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
         e.args = (message,)
         raise e
 
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 DROP AGGREGATE IF EXISTS MADLIB_SCHEMA.fit_step(
@@ -2080,7 +2080,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
                    pred_type,
                    use_gpus,
                    mst_key)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
@@ -2106,7 +2106,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
                    pred_type,
                    use_gpus,
                    mst_key)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
@@ -2132,7 +2132,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
                    pred_type,
                    use_gpus,
                    mst_key)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_predict(
@@ -2152,7 +2152,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_predict(
 ) RETURNS DOUBLE PRECISION[] AS $$
     PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras_predict')
     return madlib_keras_predict.internal_keras_predict_wide(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -------------------------------------------------------------------------------
@@ -2174,7 +2174,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
     with AOControl(False):
         with SetGUC("plan_cache_mode", "force_generic_plan"):
             madlib_keras_predict.PredictBYOM(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
@@ -2194,7 +2194,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
     with AOControl(False):
         with SetGUC("plan_cache_mode", "force_generic_plan"):
             madlib_keras_predict.PredictBYOM(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
@@ -2214,7 +2214,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
     with AOControl(False):
         with SetGUC("plan_cache_mode", "force_generic_plan"):
             madlib_keras_predict.PredictBYOM(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_evaluate(
@@ -2229,7 +2229,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_evaluate(
     with AOControl(False):
         with SetGUC("plan_cache_mode", "force_generic_plan"):
             madlib_keras.evaluate(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_evaluate(
@@ -2271,7 +2271,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_eval_transition(
 ) RETURNS REAL[] AS $$
 PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
     return madlib_keras.internal_keras_eval_transition(**globals())
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_eval_merge(
@@ -2280,7 +2280,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_eval_merge(
 ) RETURNS REAL[] AS $$
 PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
     return madlib_keras.internal_keras_eval_merge(**globals())
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_eval_final(
@@ -2288,7 +2288,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_keras_eval_final(
 ) RETURNS REAL[2] AS $$
 PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
     return madlib_keras.internal_keras_eval_final(**globals())
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 DROP AGGREGATE IF EXISTS MADLIB_SCHEMA.internal_keras_evaluate(
@@ -2339,14 +2339,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_fit(
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras')
     with AOControl(False):
         return madlib_keras.fit_help(**globals())
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_fit()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.madlib_keras_fit('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_evaluate(
      message VARCHAR
@@ -2354,14 +2354,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_evaluate(
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras')
     with AOControl(False):
         return madlib_keras.evaluate_help(**globals())
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_evaluate()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.madlib_keras_evaluate('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
      message VARCHAR
@@ -2369,14 +2369,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict(
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_predict')
     with AOControl(False):
         return madlib_keras_predict.predict_help(**globals())
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.madlib_keras_predict('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
      message VARCHAR
@@ -2384,11 +2384,11 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom(
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_predict')
     with AOControl(False):
         return madlib_keras_predict.predict_byom_help(**globals())
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_predict_byom()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.madlib_keras_predict_byom('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_automl.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_automl.py_in
index 578c5d78..7c21ff54 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_automl.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_automl.py_in
@@ -19,15 +19,16 @@
 
 import plpy
 
-from madlib_keras_validator import MstLoaderInputValidator
+from deep_learning.madlib_keras_validator import MstLoaderInputValidator
 from utilities.utilities import get_current_timestamp, get_seg_number, get_segments_per_host, \
     unique_string, add_postfix, extract_keyvalue_params, _assert, _assert_equal, rename_table, \
     is_platform_pg
-from madlib_keras_model_selection import ModelSelectionSchema
-from keras_model_arch_table import ModelArchSchema
+from deep_learning.madlib_keras_model_selection import ModelSelectionSchema
+from deep_learning.keras_model_arch_table import ModelArchSchema
 from utilities.validate_args import table_exists, drop_tables, input_tbl_valid
 from utilities.validate_args import quote_ident
-from madlib_keras_helper import DISTRIBUTION_KEY_COLNAME
+from deep_learning.madlib_keras_helper import DISTRIBUTION_KEY_COLNAME
+
 
 class AutoMLConstants:
     BRACKET = 's'
@@ -55,6 +56,7 @@ class AutoMLConstants:
     INT_MAX = 2 ** 31 - 1
     TARGET_SCHEMA = 'public'
 
+
 class KerasAutoML(object):
     """
     The core AutoML class for running AutoML algorithms such as Hyperband and Hyperopt.
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_automl.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_automl.sql_in
index 1990491d..e6ff0c5c 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_automl.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_automl.sql_in
@@ -923,9 +923,9 @@ num_classes         | 3
 -# Define and load model architecture.  Use Keras to define
 the model architecture with 1 hidden layer:
 <pre class="example">
-from tensorflow import keras
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.layers import Dense
+import keras
+from keras.models import Sequential
+from keras.layers import Dense
 model1 = Sequential()
 model1.add(Dense(10, activation='relu', input_shape=(4,)))
 model1.add(Dense(10, activation='relu'))
@@ -1385,7 +1385,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.hyperband_schedule(
         with MinWarning('warning'):
             schedule_loader = madlib_keras_automl_hyperband.HyperbandSchedule(schedule_table, r, eta, skip_last)
             schedule_loader.load()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
               m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_automl(
@@ -1407,17 +1407,18 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_automl(
     description                    VARCHAR DEFAULT NULL,
     use_caching                    BOOLEAN DEFAULT FALSE
 ) RETURNS VOID AS $$
+global schema_madlib
 if automl_method is None or automl_method.lower() == 'hyperband':
-    PythonFunctionBodyOnly(`deep_learning', `madlib_keras_automl_hyperband')
+    PythonFunctionBodyOnlyNoGlobalSchema(`deep_learning', `madlib_keras_automl_hyperband')
     with AOControl(False):
         with MinWarning('warning'):
             schedule_loader = madlib_keras_automl_hyperband.AutoMLHyperband(**globals())
 elif automl_method.lower() == 'hyperopt':
-    PythonFunctionBodyOnly(`deep_learning', `madlib_keras_automl_hyperopt')
+    PythonFunctionBodyOnlyNoGlobalSchema(`deep_learning', `madlib_keras_automl_hyperopt')
     with AOControl(False):
         with MinWarning('warning'):
             schedule_loader = madlib_keras_automl_hyperopt.AutoMLHyperopt(**globals())
 else:
     plpy.error("madlib_keras_automl: The chosen automl method must be 'hyperband' or 'hyperopt'")
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
     m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperband.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperband.py_in
index a3437d84..f1b7e76f 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperband.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperband.py_in
@@ -20,15 +20,16 @@
 import math
 import plpy
 import time
-from madlib_keras_automl import KerasAutoML, AutoMLConstants
+from deep_learning.madlib_keras_automl import KerasAutoML, AutoMLConstants
 from utilities.utilities import get_current_timestamp, get_seg_number, get_segments_per_host, \
     unique_string, add_postfix, extract_keyvalue_params, _assert, _assert_equal, rename_table, \
     is_platform_pg
 from utilities.control import SetGUC
-from madlib_keras_fit_multiple_model import FitMultipleModel
-from madlib_keras_model_selection import MstSearch, ModelSelectionSchema
+from deep_learning.madlib_keras_fit_multiple_model import FitMultipleModel
+from deep_learning.madlib_keras_model_selection import MstSearch, ModelSelectionSchema
 from utilities.validate_args import table_exists, drop_tables, input_tbl_valid
 
+
 class HyperbandSchedule():
     """The utility class for loading a hyperband schedule table with algorithm inputs.
 
@@ -80,7 +81,7 @@ class HyperbandSchedule():
         Calculates the hyperband schedule (number of configs and allocated resources)
         in each round of each bracket and skips the number of last rounds specified in 'skip_last'
         """
-        for s in reversed(range(self.s_max+1)):
+        for s in reversed(list(range(self.s_max+1))):
             n = int(math.ceil(int((self.s_max+1)/(s+1))*math.pow(self.eta, s))) # initial number of configurations
             r = self.R * math.pow(self.eta, -s)
 
@@ -139,6 +140,7 @@ class HyperbandSchedule():
                                       **locals())
             plpy.execute(insert_query)
 
+
 class AutoMLHyperband(KerasAutoML):
     """
     This class implements Hyperband, an infinite-arm bandit based algorithm that speeds up random search
@@ -198,7 +200,7 @@ class AutoMLHyperband(KerasAutoML):
         initial_vals = {}
 
         # get hyper parameter configs for each s
-        for s in reversed(range(self.s_max+1)):
+        for s in reversed(list(range(self.s_max+1))):
             n = int(math.ceil(int((self.s_max+1)/(s+1))*math.pow(self.eta, s))) # initial number of configurations
             r = self.R * math.pow(self.eta, -s) # initial number of iterations to run configurations for
             initial_vals[s] = (n, int(round(r)))
@@ -222,7 +224,7 @@ class AutoMLHyperband(KerasAutoML):
         ranges_dict = self.mst_key_ranges_dict(initial_vals)
         # to store the bracket and round numbers
         s_dict, i_dict = {}, {}
-        for key, val in ranges_dict.items():
+        for key, val in list(ranges_dict.items()):
             for mst_key in range(val[0], val[1]+1):
                 s_dict[mst_key] = key
                 i_dict[mst_key] = -1
@@ -281,7 +283,7 @@ class AutoMLHyperband(KerasAutoML):
         executing a particular SHA bracket.
         """
         d = {}
-        for s_val in sorted(initial_vals.keys(), reverse=True): # going from s_max to 0
+        for s_val in sorted(list(initial_vals.keys()), reverse=True): # going from s_max to 0
             if s_val == self.s_max:
                 d[s_val] = (1, initial_vals[s_val][0])
             else:
@@ -387,7 +389,7 @@ class AutoMLHyperband(KerasAutoML):
         schedule.
         """
         # normalizing factor for metrics_iters due to warm start
-        epochs_factor = sum([n[1] for n in initial_vals.values()][::-1][:i]) # i & initial_vals args needed
+        epochs_factor = sum([n[1] for n in list(initial_vals.values())][::-1][:i]) # i & initial_vals args needed
         iters = plpy.execute("SELECT {AutoMLSchema.METRICS_ITERS} " \
                              "FROM {AutoMLSchema.MODEL_SUMMARY_TABLE}".format(AutoMLSchema=AutoMLConstants))
         metrics_iters_val = [epochs_factor+mi for mi in iters[0]['metrics_iters']] # global iteration counter
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperopt.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperopt.py_in
index c0bbe57d..44dfc7d4 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperopt.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_automl_hyperopt.py_in
@@ -24,17 +24,18 @@ import numpy as np
 import plpy
 import time
 
-from madlib_keras_automl import KerasAutoML, AutoMLConstants
-from input_data_preprocessor import DistributionRulesOptions
-from madlib_keras_fit_multiple_model import FitMultipleModel
-from madlib_keras_helper import generate_row_string
-from madlib_keras_helper import DISTRIBUTION_RULES_COLNAME
-from madlib_keras_model_selection import ModelSelectionSchema
+from deep_learning.madlib_keras_automl import KerasAutoML, AutoMLConstants
+from deep_learning.input_data_preprocessor import DistributionRulesOptions
+from deep_learning.madlib_keras_fit_multiple_model import FitMultipleModel
+from deep_learning.madlib_keras_helper import generate_row_string
+from deep_learning.madlib_keras_helper import DISTRIBUTION_RULES_COLNAME
+from deep_learning.madlib_keras_model_selection import ModelSelectionSchema
 from utilities.control import SetGUC
 from utilities.utilities import get_current_timestamp, get_seg_number, get_segments_per_host, \
     unique_string, add_postfix, extract_keyvalue_params, _assert, _assert_equal, rename_table
 from utilities.validate_args import table_exists, drop_tables, input_tbl_valid
 
+
 class AutoMLHyperopt(KerasAutoML):
     """
     This class implements Hyperopt, another automl method that explores awkward search spaces using
@@ -243,7 +244,7 @@ class AutoMLHyperopt(KerasAutoML):
                 name = o_param + '_' + str(counter)
                 hyperopt_search_dict[name] = self.get_hyperopt_exps(name, optimizer_dict[o_param])
             # appending deep copy
-            hyperopt_search_space_lst.append({k:v for k, v in hyperopt_search_dict.items()})
+            hyperopt_search_space_lst.append({k:v for k, v in list(hyperopt_search_dict.items())})
             for o_param in optimizer_dict:
                 name = o_param + '_' + str(counter)
                 del hyperopt_search_dict[name]
@@ -462,8 +463,7 @@ class AutoMLHyperopt(KerasAutoML):
             INSERT INTO {self.model_output_table}
             SELECT * FROM {AutoMLConstants.MODEL_OUTPUT_TABLE}
         """.format(self=self,
-                   AutoMLConstants=AutoMLConstants
-                  )
+                   AutoMLConstants=AutoMLConstants)
         )
 
         plpy.execute("""
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.py_in
index f5565564..f6de44f8 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.py_in
@@ -32,6 +32,8 @@ from utilities.validate_args import unquote_ident
 from utilities.validate_args import table_exists
 
 module_name = 'Keras Custom Function'
+
+
 class CustomFunctionSchema:
     """Expected format of custom function table.
        Example uses:
@@ -58,6 +60,7 @@ class CustomFunctionSchema:
     col_names = (FN_ID, FN_NAME, FN_DESC, FN_OBJ)
     col_types = ('SERIAL', 'TEXT', 'TEXT', 'BYTEA')
 
+
 def _validate_object(object, **kwargs):
     _assert(object is not None, "{0}: function object cannot be NULL!".format(module_name))
     try:
@@ -65,6 +68,7 @@ def _validate_object(object, **kwargs):
     except Exception as e:
         plpy.error("{0}: Invalid function object".format(module_name, e))
 
+
 @MinWarning("error")
 def load_custom_function(schema_madlib, object_table, object, name, description=None, **kwargs):
 
@@ -128,6 +132,7 @@ def load_custom_function(schema_madlib, object_table, object, name, description=
     plpy.notice("{0}: Added function {1} to {2} table".
               format(module_name, name, object_table))
 
+
 @MinWarning("error")
 def delete_custom_function(schema_madlib, object_table, id=None, name=None, **kwargs):
 
@@ -172,8 +177,10 @@ def delete_custom_function(schema_madlib, object_table, id=None, name=None, **kw
         sql = "DROP TABLE {0}".format(object_table)
         plpy.execute(sql, 0)
 
+
 dangerous_builtins = set(('serialize', 'deserialize', 'get'))
 
+
 def update_builtin_metrics(builtin_metrics):
     builtin_metrics.append('accuracy')
     builtin_metrics.append('acc')
@@ -186,12 +193,14 @@ def update_builtin_metrics(builtin_metrics):
 
     return builtin_metrics
 
+
 def update_builtin_losses(builtin_losses):
     builtin_losses = [ b for b in builtin_losses \
                         if not b.startswith('_') and \
                          b not in dangerous_builtins ]
     return builtin_losses
 
+
 @MinWarning("error")
 def load_top_k_accuracy_function(schema_madlib, object_table, k, **kwargs):
 
@@ -214,6 +223,7 @@ def load_top_k_accuracy_function(schema_madlib, object_table, k, **kwargs):
                 format(module_name, fn_name, object_table))
     return
 
+
 class KerasCustomFunctionDocumentation:
     @staticmethod
     def _returnHelpMsg(schema_madlib, message, summary, usage, method):
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.sql_in
index e984b0b5..147986ca 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_custom_function.sql_in
@@ -247,12 +247,12 @@ cur = conn.cursor()
 import dill
 \# custom loss
 def squared_error(y_true, y_pred):
-    import tensorflow.keras.backend as K
+    import keras.backend as K
     return K.square(y_pred - y_true)
 pb_squared_error=dill.dumps(squared_error)
 \# custom metric
 def rmse(y_true, y_pred):
-    import tensorflow.keras.backend as K
+    import keras.backend as K
     return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
 pb_rmse=dill.dumps(rmse)
 \# call load function
@@ -278,21 +278,21 @@ RETURNS BYTEA AS
 $$
 import dill
 def squared_error(y_true, y_pred):
-    import tensorflow.keras.backend as K
+    import keras.backend as K
     return K.square(y_pred - y_true)
 pb_squared_error=dill.dumps(squared_error)
 return pb_squared_error
-$$ language plpythonu;
+$$ language plpython3u;
 CREATE OR REPLACE FUNCTION custom_function_rmse()
 RETURNS BYTEA AS
 $$
 import dill
 def rmse(y_true, y_pred):
-    import tensorflow.keras.backend as K
+    import keras.backend as K
     return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
 pb_rmse=dill.dumps(rmse)
 return pb_rmse
-$$ language plpythonu;
+$$ language plpython3u;
 </pre>
 Now call loader:
 <pre class="result">
@@ -363,7 +363,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_custom_function(
 ) RETURNS VOID AS $$
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_custom_function')
     madlib_keras_custom_function.load_custom_function(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_custom_function(
@@ -381,14 +381,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_custom_function(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, madlib_keras_custom_function)
     return madlib_keras_custom_function.KerasCustomFunctionDocumentation.load_custom_function_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_custom_function()
 RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, madlib_keras_custom_function)
     return madlib_keras_custom_function.KerasCustomFunctionDocumentation.load_custom_function_help(schema_madlib, '')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- Function to delete a keras custom function from object table
@@ -400,7 +400,7 @@ RETURNS VOID AS $$
     PythonFunctionBodyOnly(`deep_learning',`madlib_keras_custom_function')
     with AOControl(False):
         madlib_keras_custom_function.delete_custom_function(schema_madlib, object_table, id=id)
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_custom_function(
     object_table VARCHAR,
@@ -410,7 +410,7 @@ RETURNS VOID AS $$
     PythonFunctionBodyOnly(`deep_learning',`madlib_keras_custom_function')
     with AOControl(False):
         madlib_keras_custom_function.delete_custom_function(schema_madlib, object_table, name=name)
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 -- Functions for online help
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_custom_function(
@@ -418,14 +418,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_custom_function(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, madlib_keras_custom_function)
     return madlib_keras_custom_function.KerasCustomFunctionDocumentation.delete_custom_function_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.delete_custom_function()
 RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, madlib_keras_custom_function)
     return madlib_keras_custom_function.KerasCustomFunctionDocumentation.delete_custom_function_help(schema_madlib, '')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- Top n accuracy function
@@ -435,7 +435,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_top_k_accuracy_function(
 ) RETURNS VOID AS $$
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_custom_function')
     madlib_keras_custom_function.load_top_k_accuracy_function(**globals())
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_top_k_accuracy_function(
@@ -443,14 +443,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_top_k_accuracy_function(
 ) RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, madlib_keras_custom_function)
     return madlib_keras_custom_function.KerasCustomFunctionDocumentation.load_top_k_accuracy_function_help(schema_madlib, message)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_top_k_accuracy_function()
 RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(deep_learning, madlib_keras_custom_function)
     return madlib_keras_custom_function.KerasCustomFunctionDocumentation.load_top_k_accuracy_function_help(schema_madlib, '')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.top_k_categorical_acc_pickled(
@@ -458,7 +458,7 @@ n INTEGER,
 fn_name VARCHAR
 ) RETURNS BYTEA AS $$
     import dill
-    from tensorflow.keras.metrics import top_k_categorical_accuracy
+    from keras.metrics import top_k_categorical_accuracy
 
     def fn(Y_true, Y_pred):
         return top_k_categorical_accuracy(Y_true,
@@ -467,5 +467,5 @@ fn_name VARCHAR
     fn.__name__= fn_name
     pb=dill.dumps(fn)
     return pb
-$$ language plpythonu
+$$ language plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
index aa88fbee..830a909c 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
@@ -24,17 +24,17 @@ import json
 import random
 import datetime
 from collections import defaultdict
-# from tensorflow.keras.models import *
-
-from madlib_keras import compute_loss_and_metrics
-from madlib_keras import get_model_arch
-from madlib_keras import get_source_summary_table_dict
-from madlib_keras import should_compute_metrics_this_iter
-from madlib_keras import get_initial_weights
-from madlib_keras_helper import *
-from madlib_keras_model_selection import ModelSelectionSchema
-from madlib_keras_validator import *
-from madlib_keras_wrapper import *
+from keras.models import *
+
+from deep_learning.madlib_keras import compute_loss_and_metrics
+from deep_learning.madlib_keras import get_model_arch
+from deep_learning.madlib_keras import get_source_summary_table_dict
+from deep_learning.madlib_keras import should_compute_metrics_this_iter
+from deep_learning.madlib_keras import get_initial_weights
+from deep_learning.madlib_keras_helper import *
+from deep_learning.madlib_keras_model_selection import ModelSelectionSchema
+from deep_learning.madlib_keras_validator import *
+from deep_learning.madlib_keras_wrapper import *
 
 from internal.db_utils import quote_literal
 from utilities.control import OptimizerControl
@@ -78,6 +78,7 @@ segment.
 Note that this function is disabled for Postgres.
 """
 
+
 class FitMultipleModel(object):
     def __init__(self, schema_madlib, source_table, model_output_table,
                  model_selection_table, num_iterations,
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
index d1b52797..0c02079f 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
@@ -914,9 +914,9 @@ num_classes         | 3
 -# Define and load model architecture.  Use Keras to define
 the model architecture with 1 hidden layer:
 <pre class="example">
-from tensorflow import keras
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.layers import Dense
+import keras
+from keras.models import Sequential
+from keras.layers import Dense
 model1 = Sequential()
 model1.add(Dense(10, activation='relu', input_shape=(4,)))
 model1.add(Dense(10, activation='relu'))
@@ -1543,7 +1543,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_fit_multiple_model(
             with MinWarning("warning"):
                 fit_obj = madlib_keras_fit_multiple_model.FitMultipleModel(**globals())
                 fit_obj.fit_multiple_model()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.madlib_keras_fit_multiple_model(
@@ -1646,5 +1646,5 @@ PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras')
         message = e.message + 'UDFDetail' + detail
         e.args = (message,)
         raise e
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.py_in
index 4eec8a7f..1dbb1370 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.py_in
@@ -32,6 +32,7 @@ from utilities.utilities import is_platform_pg
 from utilities.utilities import unique_string
 from utilities.validate_args import output_tbl_valid
 
+
 class OutputInfoSchema:
     TEMP_INFO_TABLE = unique_string(desp='gpu_info')
     SEG_ID_COL = 'gp_seg_id'
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.sql_in
index a35e73b9..a406bfd9 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_gpu_info.sql_in
@@ -240,14 +240,14 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gpu_info_tensorflow() RETURNS TEXT[] as
 $$
     PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras_gpu_info')
     return madlib_keras_gpu_info.GPUInfoFunctions.get_gpu_info_from_tensorflow()
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gpu_info_nvidia() RETURNS TEXT[] as
 $$
     PythonFunctionBodyOnlyNoSchema(`deep_learning', `madlib_keras_gpu_info')
     return madlib_keras_gpu_info.GPUInfoFunctions.get_gpu_info_from_nvidia()
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gpu_configuration(output_table text, source text)
@@ -258,7 +258,7 @@ $$
         with MinWarning("error"):
             madlib_keras_gpu_info.gpu_configuration(schema_madlib, output_table, source)
 $$
-LANGUAGE plpythonu
+LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gpu_configuration(output_table text)
@@ -273,5 +273,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.gpu_configuration()
     RETURNS VARCHAR AS $$
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_gpu_info')
     return madlib_keras_gpu_info.gpu_configuration_help(schema_madlib)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
     m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_helper.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_helper.py_in
index 66641b1e..9b1112a1 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_helper.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_helper.py_in
@@ -18,13 +18,13 @@
 # under the License.
 
 import numpy as np
-from model_arch_info import ModelArchSchema
+from deep_learning.model_arch_info import ModelArchSchema
 from utilities.utilities import add_postfix
 from utilities.utilities import get_seg_number
 from utilities.utilities import is_platform_pg
 from utilities.utilities import unique_string
 from utilities.validate_args import table_exists
-from madlib_keras_gpu_info import GPUInfoFunctions
+from deep_learning.madlib_keras_gpu_info import GPUInfoFunctions
 import plpy
 from math import isnan
 
@@ -57,22 +57,26 @@ INTERNAL_GPU_CONFIG = '__internal_gpu_config__'
 DISTRIBUTION_RULES_COLNAME = "distribution_rules"
 #####################################################################
 
+
 # Prepend a dimension to np arrays using expand_dims.
 def expand_input_dims(input_data):
     input_data = np.array(input_data, dtype=np.float32)
     input_data = np.expand_dims(input_data, axis=0)
     return input_data
 
+
 def np_array_float32(var, var_shape):
     arr = np.frombuffer(var, dtype=np.float32)
     arr.shape = var_shape
     return arr
 
+
 def np_array_int16(var, var_shape):
     arr = np.frombuffer(var, dtype=np.int16)
     arr.shape = var_shape
     return arr
 
+
 def strip_trailing_nulls_from_class_values(class_values):
     """
         class_values is a list of unique class levels in training data. This
@@ -108,6 +112,7 @@ def strip_trailing_nulls_from_class_values(class_values):
         class_values = class_values[:num_of_valid_class_values]
     return class_values
 
+
 def get_image_count_per_seg_from_array(current_seg_id, images_per_seg):
     """
     Get the image count from the array containing all the images
@@ -116,6 +121,7 @@ def get_image_count_per_seg_from_array(current_seg_id, images_per_seg):
     """
     return images_per_seg[current_seg_id]
 
+
 def get_image_count_per_seg_for_minibatched_data_from_db(table_name, shape_col):
     """
     Query the given minibatch formatted table and return the total rows per segment.
@@ -163,6 +169,7 @@ def get_image_count_per_seg_for_minibatched_data_from_db(table_name, shape_col):
 
     return dist_keys, images_per_seg
 
+
 def get_image_count_per_seg_for_non_minibatched_data_from_db(table_name):
     """
     Query the given non minibatch formatted table and return the total rows per segment.
@@ -195,6 +202,7 @@ def get_image_count_per_seg_for_non_minibatched_data_from_db(table_name):
     images_per_seg = [int(image["images_per_seg"]) for image in images_per_seg]
     return gp_segment_id_col, seg_ids, images_per_seg
 
+
 def parse_shape(shape):
     # Parse the shape format given by the sql into an int array
     # [1:10][1:32][1:3] -> [10, 32, 3]
@@ -221,6 +229,7 @@ def query_model_configs(model_selection_table, model_selection_summary_table,
     object_table = summary_res[0][object_table_col]
     return msts, model_arch_table, object_table
 
+
 def query_dist_keys(source_table, dist_key_col):
     """ Read distinct keys from the source table """
     dist_key_query = """
@@ -232,6 +241,7 @@ def query_dist_keys(source_table, dist_key_col):
     res = [x[dist_key_col] for x in res]
     return res
 
+
 def query_weights(model_output_table, model_weights_col, mst_key_col, mst_key):
     mlp_weights_query = """
                         SELECT {model_weights_col}, {mst_key_col}
@@ -243,6 +253,7 @@ def query_weights(model_output_table, model_weights_col, mst_key_col, mst_key):
         plpy.error("query_weights:  No weights in model output table for mst={}".format(mst_key))
     return res[0][model_weights_col]
 
+
 def create_summary_view(module_name, model_table, mst_key):
     tmp_view_summary = unique_string('tmp_view_summary')
     model_summary_table = add_postfix(model_table, "_summary")
@@ -302,7 +313,7 @@ def get_accessible_gpus_for_seg(schema_madlib, segments_per_host, module_name):
         accessible_gpus_for_seg = [0] * len(seg_query_result)
         warning_flag = True
         for i in seg_query_result:
-            if i['hostname'] in host_dict.keys():
+            if i['hostname'] in list(host_dict.keys()):
                 accessible_gpus_for_seg[i['segment_id']] = host_dict[i['hostname']]
             if 0 < accessible_gpus_for_seg[i['segment_id']] < segments_per_host[i['segment_id']] and warning_flag:
                 plpy.warning(
@@ -313,10 +324,12 @@ def get_accessible_gpus_for_seg(schema_madlib, segments_per_host, module_name):
                 warning_flag = False
         return accessible_gpus_for_seg
 
+
 class sqlnull:
     def __repr__(self):
         return 'NULL'
 
+
 class sqlfloat(float):
     """
         Same as a python float, but with a SQL-friendly
@@ -330,6 +343,7 @@ class sqlfloat(float):
     def __str__(self):
         return self.__repr__()
 
+
 def py_to_sql(x):
     """
         Converts a float, list of floats, or multi-dimensional
@@ -339,12 +353,13 @@ def py_to_sql(x):
     if type(x) == float:
         return sqlfloat(x)
     elif type(x) == list:
-        return map(py_to_sql, x)
+        return list(map(py_to_sql, x))
     elif x is None:
         return sqlnull()
     else:
         return x
 
+
 def get_metrics_sql_string(metrics_list, is_metrics_specified=True):
     """
         Return the SQL string to use for creating metrics SQL values.
@@ -412,6 +427,7 @@ def generate_row_string(configs_dict):
         return result_row_string[1:]
     return result_row_string
 
+
 def get_data_distribution_per_segment(table_name):
     """
     Returns a list with count of segments on each host that the input
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.py_in
index 4434e751..8d195282 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.py_in
@@ -19,24 +19,25 @@
 from ast import literal_eval
 from collections import OrderedDict
 from itertools import product as itertools_product
-from keras_model_arch_table import ModelArchSchema
+from deep_learning.keras_model_arch_table import ModelArchSchema
 import numpy as np
 import plpy
 from copy import deepcopy
 
-from madlib_keras_custom_function import CustomFunctionSchema
-from madlib_keras_helper import generate_row_string
-from madlib_keras_validator import MstLoaderInputValidator
-from madlib_keras_wrapper import convert_string_of_args_to_dict
-from madlib_keras_wrapper import parse_and_validate_fit_params
-from madlib_keras_wrapper import parse_and_validate_compile_params
+from deep_learning.madlib_keras_custom_function import CustomFunctionSchema
+from deep_learning.madlib_keras_helper import generate_row_string
+from deep_learning.madlib_keras_validator import MstLoaderInputValidator
+from deep_learning.madlib_keras_wrapper import convert_string_of_args_to_dict
+from deep_learning.madlib_keras_wrapper import parse_and_validate_fit_params
+from deep_learning.madlib_keras_wrapper import parse_and_validate_compile_params
 from utilities.control import MinWarning
 from utilities.utilities import add_postfix, _assert, _assert_equal, extract_keyvalue_params
 from utilities.utilities import quote_ident, get_schema, is_platform_pg
 from utilities.validate_args import table_exists, drop_tables
 
-from tensorflow.keras import losses as losses
-from tensorflow.keras import metrics as metrics
+from keras import losses as losses
+from keras import metrics as metrics
+
 
 class ModelSelectionSchema:
     MST_KEY = 'mst_key'
@@ -50,6 +51,7 @@ class ModelSelectionSchema:
     RANDOM_SEARCH='random'
     OPTIMIZER_PARAMS_LIST = 'optimizer_params_list'
 
+
 @MinWarning("warning")
 class MstLoader():
     """The utility class for loading a model selection table with model parameters.
@@ -130,7 +132,7 @@ class MstLoader():
                                 .format(x, d[x]) for x in sorted(d.keys()))
             dict_dedup[hash_tuple] = string
 
-        return dict_dedup.values()
+        return list(dict_dedup.values())
 
     def find_combinations(self):
         """Backtracking helper for generating the combinations.
@@ -142,7 +144,7 @@ class MstLoader():
         ])
 
         def find_combinations_helper(msts, p, i):
-            param_names = param_grid.keys()
+            param_names = list(param_grid.keys())
             if i < len(param_names):
                 for x in param_grid[param_names[i]]:
                     p[param_names[i]] = x
@@ -230,6 +232,7 @@ class MstLoader():
                                   **locals())
         plpy.execute(insert_summary_query)
 
+
 @MinWarning("warning")
 class MstSearch():
     """
@@ -403,7 +406,7 @@ class MstSearch():
         if not compile_params_lst:
             plpy.error( "compile_params_list cannot be NULL")
         custom_fn_name = []
-        ## Initialize builtin loss/metrics functions
+        # Initialize builtin loss/metrics functions
         builtin_losses = dir(losses)
         builtin_metrics = dir(metrics)
         # Default metrics, since it is not part of the builtin metrics list
@@ -446,8 +449,8 @@ class MstSearch():
         # assuming optimizer_params_list is present
         if ModelSelectionSchema.OPTIMIZER_PARAMS_LIST in self.compile_params_dict:
             for opt_params_dict in self.compile_params_dict[ModelSelectionSchema.OPTIMIZER_PARAMS_LIST]:
-                keys, values = zip(*opt_params_dict.items())
-                opt_configs_params = [dict(zip(keys, v)) for v in itertools_product(*values)]
+                keys, values = list(zip(*list(opt_params_dict.items())))
+                opt_configs_params = [dict(list(zip(keys, v))) for v in itertools_product(*values)]
                 copied_compile_dict = deepcopy(self.compile_params_dict)
                 copied_compile_dict[ModelSelectionSchema.OPTIMIZER_PARAMS_LIST] = opt_configs_params
                 self.grid_combinations_helper(copied_compile_dict, self.fit_params_dict)
@@ -457,8 +460,8 @@ class MstSearch():
     def grid_combinations_helper(self, compile_dict, fit_dict):
         combined_dict = dict(compile_dict, **fit_dict)
         combined_dict[ModelSelectionSchema.MODEL_ID] = self.model_id_list
-        keys, values = zip(*combined_dict.items())
-        all_configs_params = [dict(zip(keys, v)) for v in itertools_product(*values)]
+        keys, values = list(zip(*list(combined_dict.items())))
+        all_configs_params = [dict(list(zip(keys, v))) for v in itertools_product(*values)]
 
         # to separate the compile and fit configs
         for config in all_configs_params:
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in b/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in
index b72e1110..5809b3ee 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_model_selection.sql_in
@@ -302,9 +302,8 @@ load_model_selection_table(
 so we first create a model architecture table with two different models.  Use Keras to define
 a model architecture with 1 hidden layer:
 <pre class="example">
-from tensorflow import keras
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.layers import Dense
+from keras.models import Sequential
+from keras.layers import Dense
 model1 = Sequential()
 model1.add(Dense(10, activation='relu', input_shape=(4,)))
 model1.add(Dense(10, activation='relu'))
@@ -852,7 +851,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_model_selection_table(
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_model_selection')
     mst_loader = madlib_keras_model_selection.MstLoader(**globals())
     mst_loader.load()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.load_model_selection_table(
@@ -884,7 +883,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.generate_model_configs(
 
     mst_loader = madlib_keras_model_selection.MstSearch(**globals())
     mst_loader.load()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /*
@@ -904,5 +903,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.generate_model_configs(
     PythonFunctionBodyOnly(`deep_learning', `madlib_keras_model_selection')
     mst_loader = madlib_keras_model_selection.MstSearch(**globals())
     mst_loader.load()
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_predict.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_predict.py_in
index 1b8f2260..d8d74ee6 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_predict.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_predict.py_in
@@ -19,10 +19,10 @@
 
 import plpy
 
-from model_arch_info import *
-from madlib_keras_helper import *
-from madlib_keras_validator import *
-from predict_input_params import PredictParamsProcessor
+from deep_learning.model_arch_info import *
+from deep_learning.madlib_keras_helper import *
+from deep_learning.madlib_keras_validator import *
+from deep_learning.predict_input_params import PredictParamsProcessor
 from utilities.control import MinWarning
 from utilities.utilities import _assert
 from utilities.utilities import add_postfix
@@ -33,14 +33,13 @@ from utilities.validate_args import get_expr_type
 from utilities.validate_args import input_tbl_valid
 from utilities.validate_args import quote_ident
 
-from madlib_keras_wrapper import *
+from deep_learning.madlib_keras_wrapper import *
+
+import keras
+from keras.layers import *
+from keras.models import *
+from keras.optimizers import *
 
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import backend as K
-from tensorflow.keras.layers import *
-from tensorflow.keras.models import *
-from tensorflow.keras.optimizers import *
 
 class BasePredict():
     def __init__(self, schema_madlib, table_to_validate, test_table, id_col,
@@ -208,7 +207,7 @@ class BasePredict():
         num_classes = get_num_classes(self.model_arch, dependent_var_count)
         for counter, i in enumerate(in_class_values):
             if (i is None) or (i==[None]):
-                self.class_values.append(range(0, num_classes[counter]))
+                self.class_values.append(list(range(0, num_classes[counter])))
             else:
                 self.class_values.append(i)
 
@@ -271,6 +270,7 @@ class Predict(BasePredict):
         InputValidator.validate_input_shape(
             self.test_table, self.independent_varname, input_shape, 1)
 
+
 @MinWarning("warning")
 class PredictBYOM(BasePredict):
     def __init__(self, schema_madlib, model_arch_table, model_id,
@@ -343,6 +343,7 @@ class PredictBYOM(BasePredict):
             self.test_table, self.independent_varname,
             get_input_shape(self.model_arch), 1)
 
+
 def internal_keras_predict_wide(independent_var, independent_var2,
                                 independent_var3, independent_var4, independent_var5,
                                 model_architecture, model_weights,
@@ -355,6 +356,7 @@ def internal_keras_predict_wide(independent_var, independent_var2,
         seg_ids, images_per_seg, gpus_per_host, segments_per_host,
         **kwargs)
 
+
 def internal_keras_predict(independent_var, model_architecture, model_weights,
                            normalizing_const, current_seg_id, seg_ids,
                            images_per_seg, gpus_per_host, segments_per_host,
@@ -407,6 +409,7 @@ def internal_keras_predict(independent_var, model_architecture, model_weights,
         clear_keras_session()
         plpy.error(ex)
 
+
 def predict_help(schema_madlib, message, **kwargs):
     """
     Help function for keras predict
@@ -465,6 +468,7 @@ rank:                   The rank of the estimation.
 
     return help_string.format(schema_madlib=schema_madlib)
 
+
 def predict_byom_help(schema_madlib, message, **kwargs):
     """
     Help function for keras predict
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_serializer.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_serializer.py_in
index 7d968872..1af36bac 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_serializer.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_serializer.py_in
@@ -19,6 +19,7 @@
 import numpy as np
 import plpy
 from utilities.utilities import _assert
+from functools import reduce
 
 # TODO
 # 1. Current serializing logic
@@ -49,6 +50,8 @@ and the 1d state. same for fit final
 6. Return the final state from fit final to fit which will then be deserialized
 as 1d weights to be passed on to the evaluate function
 """
+
+
 def get_serialized_1d_weights_from_state(state):
     """
     Output of this function is used to deserialize the output of each iteration
@@ -61,6 +64,7 @@ def get_serialized_1d_weights_from_state(state):
     _ , weights = deserialize_as_image_1d_weights(state)
     return weights.tostring()
 
+
 def serialize_state_with_nd_weights(image_count, model_weights):
     """
     This function is called when the output of keras.get_weights() (list of nd
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_validator.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_validator.py_in
index b8094779..befa3c23 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_validator.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_validator.py_in
@@ -18,22 +18,22 @@
 # under the License.
 
 import plpy
-from input_data_preprocessor import DistributionRulesOptions
-from keras_model_arch_table import ModelArchSchema
-from model_arch_info import get_num_classes
-from madlib_keras_custom_function import CustomFunctionSchema
-from madlib_keras_helper import CLASS_VALUES_COLNAME
-from madlib_keras_helper import COMPILE_PARAMS_COLNAME
-from madlib_keras_helper import DEPENDENT_VARNAME_COLNAME
-from madlib_keras_helper import DEPENDENT_VARTYPE_COLNAME
-from madlib_keras_helper import MODEL_ID_COLNAME
-from madlib_keras_helper import MODEL_ARCH_TABLE_COLNAME
-from madlib_keras_helper import MODEL_WEIGHTS_COLNAME
-from madlib_keras_helper import NORMALIZING_CONST_COLNAME
-from madlib_keras_helper import DISTRIBUTION_KEY_COLNAME
-from madlib_keras_helper import METRIC_TYPE_COLNAME
-from madlib_keras_helper import INTERNAL_GPU_CONFIG
-from madlib_keras_helper import query_model_configs
+from deep_learning.input_data_preprocessor import DistributionRulesOptions
+from deep_learning.keras_model_arch_table import ModelArchSchema
+from deep_learning.model_arch_info import get_num_classes
+from deep_learning.madlib_keras_custom_function import CustomFunctionSchema
+from deep_learning.madlib_keras_helper import CLASS_VALUES_COLNAME
+from deep_learning.madlib_keras_helper import COMPILE_PARAMS_COLNAME
+from deep_learning.madlib_keras_helper import DEPENDENT_VARNAME_COLNAME
+from deep_learning.madlib_keras_helper import DEPENDENT_VARTYPE_COLNAME
+from deep_learning.madlib_keras_helper import MODEL_ID_COLNAME
+from deep_learning.madlib_keras_helper import MODEL_ARCH_TABLE_COLNAME
+from deep_learning.madlib_keras_helper import MODEL_WEIGHTS_COLNAME
+from deep_learning.madlib_keras_helper import NORMALIZING_CONST_COLNAME
+from deep_learning.madlib_keras_helper import DISTRIBUTION_KEY_COLNAME
+from deep_learning.madlib_keras_helper import METRIC_TYPE_COLNAME
+from deep_learning.madlib_keras_helper import INTERNAL_GPU_CONFIG
+from deep_learning.madlib_keras_helper import query_model_configs
 
 from utilities.minibatch_validation import validate_bytea_var_for_minibatch
 from utilities.utilities import _assert
@@ -47,12 +47,13 @@ from utilities.validate_args import columns_exist_in_table
 from utilities.validate_args import get_expr_type
 from utilities.validate_args import input_tbl_valid
 from utilities.validate_args import output_tbl_valid
-from madlib_keras_wrapper import parse_and_validate_fit_params
-from madlib_keras_wrapper import parse_and_validate_compile_params
-from madlib_keras_custom_function import update_builtin_metrics
-from madlib_keras_custom_function import update_builtin_losses
-import tensorflow.keras.losses as losses
-import tensorflow.keras.metrics as metrics
+from deep_learning.madlib_keras_wrapper import parse_and_validate_fit_params
+from deep_learning.madlib_keras_wrapper import parse_and_validate_compile_params
+from deep_learning.madlib_keras_custom_function import update_builtin_metrics
+from deep_learning.madlib_keras_custom_function import update_builtin_losses
+import keras.losses as losses
+import keras.metrics as metrics
+
 
 class InputValidator:
     @staticmethod
@@ -88,7 +89,6 @@ class InputValidator:
 
         output_tbl_valid(output_table, module_name)
 
-
     @staticmethod
     def validate_pred_type(module_name, pred_type, class_values):
 
@@ -262,6 +262,7 @@ class InputValidator:
                 _assert(accessible_gpus_for_seg[i] != 0,
                     "{0} error: Segment {1} does not have gpu".format(module_name, i))
 
+
 class FitCommonValidator(object):
     def __init__(self, source_table, validation_table, output_model_table,
                  num_iterations, metrics_compute_frequency, warm_start,
@@ -348,7 +349,6 @@ class FitCommonValidator(object):
             output_tbl_valid(self.output_model_table, self.module_name)
             output_tbl_valid(self.output_summary_model_table, self.module_name)
 
-
     def _validate_tables_schema(self):
         # Source table and validation tables must have the same schema
         additional_cols = []
@@ -452,6 +452,7 @@ class FitInputValidator(FitCommonValidator):
         InputValidator.validate_model_arch_table(self.module_name, model_arch_table,
             model_id)
 
+
 class FitMultipleInputValidator(FitCommonValidator):
     def __init__(self, source_table, validation_table, output_model_table,
                  model_selection_table, num_iterations, mst_key_col,
@@ -492,6 +493,7 @@ class FitMultipleInputValidator(FitCommonValidator):
         else:
             output_tbl_valid(self.output_model_info_table, self.module_name)
 
+
 class MstLoaderInputValidator():
     def __init__(self,
                  schema_madlib,
@@ -538,6 +540,7 @@ class MstLoaderInputValidator():
                 self.model_arch_table
             )
         )
+
     def _validate_compile_and_fit_params(self):
         if not self.fit_params_list:
             plpy.error("fit_params_list cannot be NULL")
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in b/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
index 980bf694..6912d0f1 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
@@ -24,11 +24,9 @@ import plpy
 from collections import defaultdict
 from math import ceil
 
-import madlib_keras_serializer
-import madlib_keras_gpu_info
-from madlib_keras_custom_function import CustomFunctionSchema
-from madlib_keras_custom_function import update_builtin_metrics
-from madlib_keras_custom_function import update_builtin_losses
+from deep_learning.madlib_keras_custom_function import CustomFunctionSchema
+from deep_learning.madlib_keras_custom_function import update_builtin_metrics
+from deep_learning.madlib_keras_custom_function import update_builtin_losses
 
 from utilities.utilities import _assert
 from utilities.utilities import is_platform_pg
@@ -36,20 +34,20 @@ from utilities.utilities import current_user
 from utilities.utilities import is_superuser
 
 import tensorflow as tf
-from tensorflow.keras import backend as K
-from tensorflow.keras import utils as keras_utils
-from tensorflow.keras.optimizers import *
-from tensorflow.keras.callbacks import TensorBoard
+from keras import backend as K
+from keras.optimizers import *
+from keras.callbacks import TensorBoard
 
-import tensorflow.keras.optimizers as opt
-import tensorflow.keras.losses as losses
-import tensorflow.keras.metrics as metrics
+import keras.optimizers as opt
+import keras.losses as losses
+import keras.metrics as metrics
 
 CUDA_VISIBLE_DEVICES_KEY = 'CUDA_VISIBLE_DEVICES'
 #######################################################################
 ########### Keras specific functions #####
 #######################################################################
 
+
 def set_cuda_env(value):
     """
     :param value: -1 to disable gpu
@@ -57,6 +55,7 @@ def set_cuda_env(value):
     """
     os.environ[CUDA_VISIBLE_DEVICES_KEY] = value
 
+
 def reset_cuda_env(value):
     """
     This function will reset the cuda env variable. This should only be called
@@ -69,6 +68,7 @@ def reset_cuda_env(value):
         if CUDA_VISIBLE_DEVICES_KEY in os.environ:
             del os.environ[CUDA_VISIBLE_DEVICES_KEY]
 
+
 def enable_xla():
     os.environ['TF_XLA_FLAGS'] = '--tf_xla_auto_jit=2 --tf_xla_cpu_global_jit'
     try:
@@ -76,6 +76,7 @@ def enable_xla():
     except:
         plpy.warning("This version of tensorflow does not fully support XLA auto-cluster JIT optimization.  HINT:  upgrading to tensorflow 1.14.0 may improve performance.")
 
+
 def get_device_name_and_set_cuda_env(gpu_count, seg):
     if gpu_count > 0:
         device_name = '/gpu:0'
@@ -89,10 +90,12 @@ def get_device_name_and_set_cuda_env(gpu_count, seg):
         set_cuda_env('-1')
     return device_name
 
+
 def set_keras_session(device_name, gpu_count, segments_per_host):
     session = get_keras_session(device_name, gpu_count, segments_per_host)
     K.set_session(session)
 
+
 def get_keras_session(device_name, gpu_count, segments_per_host):
     config = tf.ConfigProto()
     if gpu_count > 0:
@@ -103,6 +106,7 @@ def get_keras_session(device_name, gpu_count, segments_per_host):
     enable_xla()
     return session
 
+
 def clear_keras_session(sess = None):
     if sess is None:
         sess = K.get_session()
@@ -120,12 +124,14 @@ def get_gpu_memory_fraction(gpu_count, segments_per_host):
     """
     return 0.9 / ceil(1.0 * segments_per_host / gpu_count)
 
+
 def get_model_shapes(model):
     model_shapes = []
     for a in model.get_weights():
         model_shapes.append(a.shape)
     return model_shapes
 
+
 def compile_and_set_weights(segment_model, compile_params, device_name,
                             serialized_weights):
     model_shapes = get_model_shapes(segment_model)
@@ -148,6 +154,7 @@ Used to convert compile_params and fit_params to actual argument dictionaries
 If strip_quotes is True, each value in the dictionary will be stripped of quotes
 """
 
+
 def convert_string_of_args_to_dict(str_of_args, strip_quotes=True):
     """Uses parenthases matching algorithm to intelligently convert
     a string with valid python code into an argument dictionary"""
@@ -161,10 +168,10 @@ def convert_string_of_args_to_dict(str_of_args, strip_quotes=True):
     key_str = ""
     compile_dict = {}
     for char in str_of_args:
-        if char in dual.keys():
+        if char in list(dual.keys()):
             stack.append(char)
             result_str += char
-        elif char in dual.values() and stack:
+        elif char in list(dual.values()) and stack:
             if dual[stack[-1]] == char:
                 stack.pop(-1)
             result_str += char
@@ -188,6 +195,7 @@ def convert_string_of_args_to_dict(str_of_args, strip_quotes=True):
     compile_dict[key_str]=value_str
     return compile_dict
 
+
 def get_metrics_from_compile_param(str_of_args):
     compile_dict = convert_string_of_args_to_dict(str_of_args)
     metrics = None
@@ -200,6 +208,7 @@ def get_metrics_from_compile_param(str_of_args):
                         "please refer to the documentation"))
     return metrics
 
+
 def get_loss_from_compile_param(str_of_args):
     compile_dict = convert_string_of_args_to_dict(str_of_args)
     loss = None
@@ -210,6 +219,7 @@ def get_loss_from_compile_param(str_of_args):
                     "please refer to the documentation"))
     return loss
 
+
 # Parse the compile parameters and the optimizer.
 def parse_and_validate_compile_params(str_of_args, additional_params=[]):
     """
@@ -243,8 +253,9 @@ def parse_and_validate_compile_params(str_of_args, additional_params=[]):
     _validate_metrics(compile_dict)
     return (opt_name, opt_args, compile_dict)
 
+
 def _validate_metrics(compile_dict):
-    _assert('metrics' not in compile_dict.keys() or
+    _assert('metrics' not in list(compile_dict.keys()) or
             compile_dict['metrics'] is None or
             type(compile_dict['metrics']) is list,
             "wrong input type for compile parameter metrics: multi-output model"
@@ -261,6 +272,7 @@ def _validate_metrics(compile_dict):
         _assert(compile_dict['metrics'][0] not in unsupported_metrics_list,
                 "Metric {0} is not supported.".format(compile_dict['metrics'][0]))
 
+
 # Parse the optimizer name and params.
 def parse_optimizer(compile_dict):
     """
@@ -273,7 +285,7 @@ def parse_optimizer(compile_dict):
     opt_split = compile_dict['optimizer'].split('(')
     opt_name = opt_split[0]
     optimizers = get_optimizers()
-    _assert(opt_name.lower() in [o.lower() for o in optimizers.keys()],
+    _assert(opt_name.lower() in [o.lower() for o in list(optimizers.keys())],
             "model_keras error: invalid optimizer name: {0}".format(opt_name))
 
     # If we use only the optimizer name
@@ -286,11 +298,11 @@ def parse_optimizer(compile_dict):
     else:
         opt_params = opt_split[1][:-1]
         opt_params_array = opt_params.split(',')
-        opt_params_clean = map(split_and_strip, opt_params_array)
+        opt_params_clean = list(map(split_and_strip, opt_params_array))
         key_value_params = { x[0] : x[1] for x in opt_params_clean}
 
         final_args = {}
-        for key,value in key_value_params.iteritems():
+        for key,value in key_value_params.items():
             if value == 'None':
                 final_args[key] = None
             elif value == 'True' or value == 'False':
@@ -320,6 +332,7 @@ def parse_and_validate_fit_params(fit_param_str, current_seg_id=-1):
     else:
         return {}
 
+
 # Parse the callback fit params and create the TensorBoard object in the dictionary
 def parse_callbacks(callbacks, current_seg_id=-1):
     callbacks = callbacks.strip("'")
@@ -345,10 +358,11 @@ def parse_callbacks(callbacks, current_seg_id=-1):
 
     return [TensorBoard(**tb_params_dict)]
 
+
 # Validate the keys of the given dictionary and run literal_eval on the
 # user-defined subset
 def validate_and_literal_eval_keys(keys_dict, literal_eval_list, accepted_list):
-    for ckey in keys_dict.keys():
+    for ckey in list(keys_dict.keys()):
         _assert(ckey in accepted_list,
                 "{0} is not currently accepted as a parameter. ".format(ckey))
         if ckey in literal_eval_list:
@@ -359,11 +373,13 @@ def validate_and_literal_eval_keys(keys_dict, literal_eval_list, accepted_list):
                             "please refer to the documentation").format(ckey, keys_dict[ckey]))
     return keys_dict
 
+
 # Split and strip the whitespace of key=value formatted strings
 def split_and_strip(x):
     y = x.split('=')
     return (y[0].strip(),y[1].strip())
 
+
 # Return the list of keras optimizers
 def get_optimizers():
     optimizers = dict()
@@ -376,6 +392,7 @@ def get_optimizers():
             optimizers[n] = optimizer
     return optimizers
 
+
 # Run the keras.compile with the given parameters
 def compile_model(model, compile_params, custom_function_map=None):
     optimizers = get_optimizers()
@@ -397,25 +414,27 @@ def compile_model(model, compile_params, custom_function_map=None):
     compile_dict['optimizer'] = optimizers[opt_name](**final_args) if final_args else opt_name
     model.compile(**compile_dict)
 
+
 def validate_compile_param_types(compile_dict):
-    _assert('loss_weights' not in compile_dict.keys() or
+    _assert('loss_weights' not in list(compile_dict.keys()) or
             compile_dict['loss_weights'] is None or
             type(compile_dict['loss_weights']) is list or
             type(compile_dict['loss_weights']) is dict,
             "wrong input type for compile parameter loss_weights: only list "
             "and dictionary are supported.")
 
-    _assert('weighted_metrics' not in compile_dict.keys() or
+    _assert('weighted_metrics' not in list(compile_dict.keys()) or
             compile_dict['weighted_metrics'] is None or
             type(compile_dict['weighted_metrics']) is list,
             "wrong input type for compile parameter weighted_metrics: only list "
             "is supported.")
 
-    _assert('sample_weight_mode' not in compile_dict.keys() or
+    _assert('sample_weight_mode' not in list(compile_dict.keys()) or
             compile_dict['sample_weight_mode'] is None or
             compile_dict['sample_weight_mode'] == "temporal",
             """compile parameter sample_weight_mode can only be "temporal" or None""")
 
+
 # Returns an object of custom function name and it corresponding object
 def query_custom_functions_map(object_table, custom_fn_names):
     """
@@ -460,6 +479,7 @@ def query_custom_functions_map(object_table, custom_fn_names):
     custom_fn_map_obj = dill.dumps(custom_fn_map)
     return custom_fn_map_obj
 
+
 def get_custom_functions_list(compile_params):
     """
     Args:
diff --git a/src/ports/postgres/modules/deep_learning/model_arch_info.py_in b/src/ports/postgres/modules/deep_learning/model_arch_info.py_in
index 0081e58e..02bc5bdd 100644
--- a/src/ports/postgres/modules/deep_learning/model_arch_info.py_in
+++ b/src/ports/postgres/modules/deep_learning/model_arch_info.py_in
@@ -22,7 +22,7 @@ m4_changequote(`<!', `!>')
 import sys
 import json
 import plpy
-from keras_model_arch_table import ModelArchSchema
+from deep_learning.keras_model_arch_table import ModelArchSchema
 
 def _get_layers(model_arch):
     d = json.loads(model_arch)
diff --git a/src/ports/postgres/modules/deep_learning/predict_input_params.py_in b/src/ports/postgres/modules/deep_learning/predict_input_params.py_in
index d9ba0913..b07a03a3 100644
--- a/src/ports/postgres/modules/deep_learning/predict_input_params.py_in
+++ b/src/ports/postgres/modules/deep_learning/predict_input_params.py_in
@@ -18,16 +18,17 @@
 # under the License.
 
 import plpy
-from keras_model_arch_table import ModelArchSchema
+from deep_learning.keras_model_arch_table import ModelArchSchema
 from utilities.utilities import add_postfix
 from utilities.validate_args import input_tbl_valid
 
-from madlib_keras_helper import CLASS_VALUES_COLNAME
-from madlib_keras_helper import DEPENDENT_VARNAME_COLNAME
-from madlib_keras_helper import DEPENDENT_VARTYPE_COLNAME
-from madlib_keras_helper import MODEL_WEIGHTS_COLNAME
-from madlib_keras_helper import NORMALIZING_CONST_COLNAME
-from madlib_keras_helper import create_summary_view
+from deep_learning.madlib_keras_helper import CLASS_VALUES_COLNAME
+from deep_learning.madlib_keras_helper import DEPENDENT_VARNAME_COLNAME
+from deep_learning.madlib_keras_helper import DEPENDENT_VARTYPE_COLNAME
+from deep_learning.madlib_keras_helper import MODEL_WEIGHTS_COLNAME
+from deep_learning.madlib_keras_helper import NORMALIZING_CONST_COLNAME
+from deep_learning.madlib_keras_helper import create_summary_view
+
 
 class PredictParamsProcessor:
     def __init__(self, model_table, module_name, mst_key):
diff --git a/src/ports/postgres/modules/deep_learning/test/keras_model_arch_table.sql_in b/src/ports/postgres/modules/deep_learning/test/keras_model_arch_table.sql_in
index 90eb3358..0d0bab56 100644
--- a/src/ports/postgres/modules/deep_learning/test/keras_model_arch_table.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/keras_model_arch_table.sql_in
@@ -145,7 +145,7 @@ load_query = plpy.prepare("""SELECT load_keras_model(
                         $1, $2)
                     """, ['json','bytea'])
 plpy.execute(load_query, [model.to_json(), weights_bytea])
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 DROP TABLE IF EXISTS test_keras_model_arch_table;
 SELECT create_model_arch_transfer_learning();
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras_custom_function.setup.sql_in b/src/ports/postgres/modules/deep_learning/test/madlib_keras_custom_function.setup.sql_in
index 13893266..9439c654 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras_custom_function.setup.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras_custom_function.setup.sql_in
@@ -29,7 +29,7 @@ def test_sum_fn(a, b):
 
 pb=dill.dumps(test_sum_fn)
 return pb
-$$ language plpythonu;
+$$ language plpython3u;
 
 CREATE OR REPLACE FUNCTION read_custom_function(pb bytea, arg1 int, arg2 int)
 RETURNS INTEGER AS
@@ -38,7 +38,7 @@ import dill
 obj=dill.loads(pb)
 res=obj(arg1, arg2)
 return res
-$$ language plpythonu;
+$$ language plpython3u;
 
 -- Custom loss function returns 0 as the loss
 CREATE OR REPLACE FUNCTION custom_function_zero_object()
@@ -51,7 +51,7 @@ def test_custom_fn(a, b):
 
 pb=dill.dumps(test_custom_fn)
 return pb
-$$ language plpythonu;
+$$ language plpython3u;
 
 CREATE OR REPLACE FUNCTION custom_function_one_object()
 RETURNS BYTEA AS
@@ -63,4 +63,4 @@ def test_custom_fn1(a, b):
 
 pb=dill.dumps(test_custom_fn1)
 return pb
-$$ language plpythonu;
+$$ language plpython3u;
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit.sql_in b/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit.sql_in
index ed52ce7d..3e212a26 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit.sql_in
@@ -484,7 +484,7 @@ CREATE OR REPLACE FUNCTION get_gd_keys_len()
 RETURNS INTEGER AS
 $$
 return len(GD.keys())
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 
 -- Test GD is cleared after a successful run
 -- This test calls fit with different models which will run in the same segment slice(process).
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit_multiple.sql_in b/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit_multiple.sql_in
index 07c4325d..a3e27eeb 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit_multiple.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras_fit_multiple.sql_in
@@ -72,7 +72,7 @@ $$
         model_selection_table,
         1
     )
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(<!__HAS_FUNCTION_PROPERTIES__!>, MODIFIES SQL DATA);
 
 CREATE OR REPLACE FUNCTION test_init_schedule(
@@ -89,7 +89,7 @@ $$
         err_msg = 'FitMultiple.init_schedule_tbl() returned False'
 
     return err_msg
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__',MODIFIES SQL DATA);
 
 CREATE OR REPLACE FUNCTION test_rotate_schedule(
@@ -103,7 +103,7 @@ $$
 
     fit_mult.rotate_schedule_tbl()
 
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__',MODIFIES SQL DATA);
 
 -- Mock fit_transition function, for testing
@@ -162,7 +162,7 @@ $$
     else:
         GD['transition_function_params'][dist_key]['reset'] = True
         return serialized_weights
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 CREATE OR REPLACE FUNCTION validate_transition_function_params(
     current_seg_id                       INTEGER,
@@ -208,7 +208,7 @@ $$
             )
 
     return 'PASS'  # actual params match expected params
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 -- Helper to rotate an array of int's
 CREATE OR REPLACE FUNCTION rotate_keys(
@@ -216,14 +216,14 @@ CREATE OR REPLACE FUNCTION rotate_keys(
 ) RETURNS INTEGER[]
 AS $$
    return keys[-1:] + keys[:-1]
-$$ LANGUAGE plpythonu IMMUTABLE;
+$$ LANGUAGE plpython3u IMMUTABLE;
 
 CREATE OR REPLACE FUNCTION reverse_rotate_keys(
     keys    INTEGER[]
 ) RETURNS INTEGER[]
 AS $$
    return keys[1:] + keys[:1]
-$$ LANGUAGE plpythonu IMMUTABLE;
+$$ LANGUAGE plpython3u IMMUTABLE;
 
 CREATE OR REPLACE FUNCTION setup_model_tables(
     input_table TEXT,
@@ -251,7 +251,7 @@ $$
         WHERE mst_key IS NOT NULL;
     """.format(model_out=fit_mult.model_output_tbl)
     plpy.execute(q)
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 -- Updates dist keys in src table and internal fit_mult class variables
 --    num_data_segs can be larger than actual number of segments, since this
@@ -328,7 +328,7 @@ $$
             exp_table=expected_distkey_mappings_tbl
         )
     plpy.execute(create_distkey_map_tbl_cmd)
-$$ LANGUAGE plpythonu VOLATILE;
+$$ LANGUAGE plpython3u VOLATILE;
 
 CREATE OR REPLACE FUNCTION test_run_training(
     source_table TEXT,
@@ -355,7 +355,7 @@ $$
     fit_mult.use_caching = use_caching
 
     fit_mult.run_training(hop=hop, is_very_first_hop=is_very_first_hop)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__',MODIFIES SQL DATA);
 
 CREATE OR REPLACE FUNCTION validate_mst_key_order(output_tbl TEXT, expected_tbl TEXT)
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in b/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
index 125588c1..ecf9715f 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
@@ -280,7 +280,7 @@ def test_custom_fn(a, b):
 
 pb=dill.dumps(test_custom_fn)
 return pb
-$$ language plpythonu;
+$$ language plpython3u;
 
 
 DROP TABLE IF EXISTS test_custom_function_table;
diff --git a/src/ports/postgres/modules/elastic_net/elastic_net.py_in b/src/ports/postgres/modules/elastic_net/elastic_net.py_in
index 4364aa89..fd7423fc 100644
--- a/src/ports/postgres/modules/elastic_net/elastic_net.py_in
+++ b/src/ports/postgres/modules/elastic_net/elastic_net.py_in
@@ -1,12 +1,12 @@
 
 import plpy
-from elastic_net_models import _elastic_net_gaussian_igd_train
-from elastic_net_models import _elastic_net_gaussian_fista_train
-from elastic_net_models import _elastic_net_binomial_fista_train
-from elastic_net_models import _elastic_net_binomial_igd_train
-from elastic_net_utils import _generate_warmup_lambda_sequence
+from elastic_net.elastic_net_models import _elastic_net_gaussian_igd_train
+from elastic_net.elastic_net_models import _elastic_net_gaussian_fista_train
+from elastic_net.elastic_net_models import _elastic_net_binomial_fista_train
+from elastic_net.elastic_net_models import _elastic_net_binomial_igd_train
+from elastic_net.elastic_net_utils import _generate_warmup_lambda_sequence
 
-from elastic_net_utils import BINOMIAL_FAMILIES, GAUSSIAN_FAMILIES, OPTIMIZERS
+from elastic_net.elastic_net_utils import BINOMIAL_FAMILIES, GAUSSIAN_FAMILIES, OPTIMIZERS
 
 from utilities.control import OptimizerControl
 from utilities.validate_args import is_col_array
@@ -416,8 +416,8 @@ def _get_cv_optimizer_params(param_str, alpha, smallest_lambda):
         "n_lambdas": (15, int),
         "validation_result": (None, str)
     }
-    param_defaults = dict([(k, v[0]) for k, v in cv_params_defaults.items()])
-    param_types = dict([(k, v[1]) for k, v in cv_params_defaults.items()])
+    param_defaults = dict([(k, v[0]) for k, v in list(cv_params_defaults.items())])
+    param_types = dict([(k, v[1]) for k, v in list(cv_params_defaults.items())])
 
     if not param_str:
         return param_defaults, param_str
@@ -434,10 +434,10 @@ def _get_cv_optimizer_params(param_str, alpha, smallest_lambda):
             else:
                 name_value['lambda_value'] = [float(smallest_lambda)]
         else:
-            name_value['lambda_value'] = map(float, name_value['lambda_value'])
+            name_value['lambda_value'] = list(map(float, name_value['lambda_value']))
             # no warmup when cross validating on lambda
             param_str += ', warmup=False'
-        name_value['alpha'] = map(float, name_value['alpha'])
+        name_value['alpha'] = list(map(float, name_value['alpha']))
     return name_value, param_str
 # ------------------------------------------------------------------------
 
@@ -511,7 +511,7 @@ def analyze_input_str(schema_madlib, tbl_source,
     outstr_array = []
     if col_ind_var == "*":
         col_types_dict = dict(get_cols_and_types(tbl_source))
-        cols = col_types_dict.keys()
+        cols = list(col_types_dict.keys())
 
         s = _string_to_array(excluded) if excluded is not None else []
 
diff --git a/src/ports/postgres/modules/elastic_net/elastic_net.sql_in b/src/ports/postgres/modules/elastic_net/elastic_net.sql_in
index 011b24e3..2c538250 100644
--- a/src/ports/postgres/modules/elastic_net/elastic_net.sql_in
+++ b/src/ports/postgres/modules/elastic_net/elastic_net.sql_in
@@ -939,7 +939,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.elastic_net_train (
             col_ind_var, regress_family, alpha, lambda_value,
             standardize, grouping_col, optimizer, optimizer_params,
             excluded, max_iter, tolerance)
-        $$ LANGUAGE plpythonu
+        $$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 ------------------------------------------------------------------------
@@ -1084,7 +1084,7 @@ m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.elastic_net_train ()
 RETURNS TEXT AS $$
 PythonFunction(elastic_net, elastic_net, elastic_net_help)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 ------------------------------------------------------------------------
@@ -1100,7 +1100,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.elastic_net_train (
     family_or_optimizer  TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(elastic_net, elastic_net, elastic_net_help)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 ------------------------------------------------------------------------
@@ -1122,7 +1122,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.elastic_net_predict (
     tbl_predict     TEXT
 ) RETURNS VOID AS $$
 PythonFunction(elastic_net, elastic_net, elastic_net_predict_all)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 ------------------------------------------------------------------------
diff --git a/src/ports/postgres/modules/elastic_net/elastic_net_generate_result.py_in b/src/ports/postgres/modules/elastic_net/elastic_net_generate_result.py_in
index 15881b45..107fc12e 100644
--- a/src/ports/postgres/modules/elastic_net/elastic_net_generate_result.py_in
+++ b/src/ports/postgres/modules/elastic_net/elastic_net_generate_result.py_in
@@ -1,6 +1,6 @@
 import plpy
-from elastic_net_utils import _process_results
-from elastic_net_utils import _compute_log_likelihood
+from elastic_net.elastic_net_utils import _process_results
+from elastic_net.elastic_net_utils import _compute_log_likelihood
 from utilities.validate_args import get_cols_and_types
 from utilities.validate_args import quote_ident
 from utilities.utilities import split_quoted_delimited_str
@@ -186,7 +186,7 @@ def build_output_table(res, grouping_column, grouping_col_list,
         if grouping_column:
             grouping_info = ",".join([quote_literal(str(res[grp_col.strip()]))
                                       for grp_col in grouping_col_list
-                                      if grp_col.strip() in res.keys()])
+                                      if grp_col.strip() in list(res.keys())])
         else:
             grouping_info = ""
         if grouping_info:
diff --git a/src/ports/postgres/modules/elastic_net/elastic_net_models.py_in b/src/ports/postgres/modules/elastic_net/elastic_net_models.py_in
index b7ea0160..1e9675e8 100644
--- a/src/ports/postgres/modules/elastic_net/elastic_net_models.py_in
+++ b/src/ports/postgres/modules/elastic_net/elastic_net_models.py_in
@@ -1,5 +1,5 @@
-from elastic_net_optimizer_fista import _elastic_net_fista_train
-from elastic_net_optimizer_igd import _elastic_net_igd_train
+from elastic_net.elastic_net_optimizer_fista import _elastic_net_fista_train
+from elastic_net.elastic_net_optimizer_igd import _elastic_net_igd_train
 import plpy
 
 # ========================================================================
diff --git a/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_fista.py_in b/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_fista.py_in
index 59c00030..f45395f0 100644
--- a/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_fista.py_in
+++ b/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_fista.py_in
@@ -1,10 +1,10 @@
 import plpy
-from elastic_net_generate_result import _elastic_net_generate_result
-from elastic_net_utils import _normalize_data
-from elastic_net_utils import _elastic_net_validate_args
-from elastic_net_utils import _compute_average_sq
-from elastic_net_utils import _generate_warmup_lambda_sequence
-from elastic_net_utils import _process_warmup_lambdas
+from elastic_net.elastic_net_generate_result import _elastic_net_generate_result
+from elastic_net.elastic_net_utils import _normalize_data
+from elastic_net.elastic_net_utils import _elastic_net_validate_args
+from elastic_net.elastic_net_utils import _compute_average_sq
+from elastic_net.elastic_net_utils import _generate_warmup_lambda_sequence
+from elastic_net.elastic_net_utils import _process_warmup_lambdas
 from utilities.control import MinWarning
 from utilities.in_mem_group_control import GroupIterationController
 from utilities.utilities import unique_string
@@ -31,8 +31,8 @@ def _fista_params_parser(optimizer_params, lambda_value, tolerance, schema_madli
         "activeset_tolerance": (tolerance, float),
         "warmup_tolerance": (tolerance, float)
     }
-    param_defaults = dict([(k, v[0]) for k, v in defaults_and_types.items()])
-    param_types = dict([(k, v[1]) for k, v in defaults_and_types.items()])
+    param_defaults = dict([(k, v[0]) for k, v in list(defaults_and_types.items())])
+    param_types = dict([(k, v[1]) for k, v in list(defaults_and_types.items())])
 
     if not optimizer_params:
         return param_defaults
@@ -328,7 +328,7 @@ def _compute_fista(schema_madlib, func_step_aggregate, func_state_diff,
     """
     args = locals()
 
-    for k, v in kwargs.iteritems():
+    for k, v in kwargs.items():
         if k not in args:
             args.update({k: v})
     iterationCtrl = GroupIterationController(args)
diff --git a/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_igd.py_in b/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_igd.py_in
index 6bc51d2d..3814316d 100644
--- a/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_igd.py_in
+++ b/src/ports/postgres/modules/elastic_net/elastic_net_optimizer_igd.py_in
@@ -2,15 +2,15 @@
 import plpy
 from utilities.utilities import unique_string
 from utilities.in_mem_group_control import GroupIterationController
-from elastic_net_utils import _compute_means
-from elastic_net_utils import _normalize_data
-from elastic_net_utils import _compute_scales
-from elastic_net_utils import _elastic_net_validate_args
+from elastic_net.elastic_net_utils import _compute_means
+from elastic_net.elastic_net_utils import _normalize_data
+from elastic_net.elastic_net_utils import _compute_scales
+from elastic_net.elastic_net_utils import _elastic_net_validate_args
 from utilities.utilities import _array_to_string
-from elastic_net_utils import _compute_average_sq
-from elastic_net_utils import _generate_warmup_lambda_sequence
-from elastic_net_utils import _process_warmup_lambdas
-from elastic_net_generate_result import _elastic_net_generate_result
+from elastic_net.elastic_net_utils import _compute_average_sq
+from elastic_net.elastic_net_utils import _generate_warmup_lambda_sequence
+from elastic_net.elastic_net_utils import _process_warmup_lambdas
+from elastic_net.elastic_net_generate_result import _elastic_net_generate_result
 from utilities.utilities import extract_keyvalue_params
 from utilities.control import MinWarning
 
@@ -34,8 +34,8 @@ def _igd_params_parser(optimizer_params, lambda_value, tolerance, schema_madlib)
         "step_decay": (0.0, float),
         "warmup_tolerance": (tolerance, float)
     }
-    param_defaults = dict([(k, v[0]) for k, v in defaults_and_types.items()])
-    param_types = dict([(k, v[1]) for k, v in defaults_and_types.items()])
+    param_defaults = dict([(k, v[0]) for k, v in list(defaults_and_types.items())])
+    param_types = dict([(k, v[1]) for k, v in list(defaults_and_types.items())])
 
     if not optimizer_params:
         return param_defaults
@@ -330,7 +330,7 @@ def _compute_igd(schema_madlib, func_step_aggregate, func_state_diff,
     """
     args = locals()
 
-    for k, v in kwargs.iteritems():
+    for k, v in kwargs.items():
         if k not in args:
             args.update({k: v})
     iterationCtrl = GroupIterationController(args)
diff --git a/src/ports/postgres/modules/glm/glm.py_in b/src/ports/postgres/modules/glm/glm.py_in
index c5661472..0f333a61 100644
--- a/src/ports/postgres/modules/glm/glm.py_in
+++ b/src/ports/postgres/modules/glm/glm.py_in
@@ -135,15 +135,15 @@ def __extract_family_params(schema_madlib, family_params):
         gamma=["inverse", "log", "identity"],
         inverse_gaussian=["sqr_inverse", "identity", "log", "inverse"],
         binomial=["logit", "probit"])
-    for k, v in family_params_dict.iteritems():
+    for k, v in family_params_dict.items():
         if k == "family":
-            if v not in family_link.keys():
+            if v not in list(family_link.keys()):
                 plpy.error("GLM error: {param_value} is not a valid "
                            "family!".format(param_value=v))
 
-    if "family" not in family_params_dict.keys():
+    if "family" not in list(family_params_dict.keys()):
         plpy.error("GLM error: Required parameter family is missing!")
-    if "link" in family_params_dict.keys():
+    if "link" in list(family_params_dict.keys()):
         if family_params_dict["link"] not in family_link[family_params_dict["family"]]:
             plpy.error("GLM error: Invalid link function {link_func} for "
                        "family {family}!".format(link_func=family_params_dict["link"],
diff --git a/src/ports/postgres/modules/glm/glm.sql_in b/src/ports/postgres/modules/glm/glm.sql_in
index 3e4aa442..67d54f2b 100644
--- a/src/ports/postgres/modules/glm/glm.sql_in
+++ b/src/ports/postgres/modules/glm/glm.sql_in
@@ -1136,7 +1136,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm(
     verbose             boolean
 ) RETURNS void AS $$
 PythonFunction(glm, glm, glm)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- entry functions with default values
@@ -1184,7 +1184,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, glm, glm_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm()
@@ -1274,7 +1274,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm_predict(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, glm, glm_predict_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm_predict()
@@ -1288,13 +1288,13 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm_predict_poisson(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, glm, glm_predict_poisson_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.glm_predict_binomial(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, glm, glm_predict_binomial_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 -------------------------------------------------------------
diff --git a/src/ports/postgres/modules/glm/multinom.py_in b/src/ports/postgres/modules/glm/multinom.py_in
index 7adfcb02..7cf36a6b 100644
--- a/src/ports/postgres/modules/glm/multinom.py_in
+++ b/src/ports/postgres/modules/glm/multinom.py_in
@@ -22,8 +22,8 @@ from utilities.validate_args import output_tbl_valid
 from utilities.validate_args import cols_in_tbl_valid
 from utilities.validate_args import columns_exist_in_table
 
-from glm import __glm_validate_args
-from glm import __extract_optim_params
+from glm.glm import __glm_validate_args
+from glm.glm import __extract_optim_params
 
 # ========================================================================
 
@@ -121,7 +121,7 @@ def __multinom_validate_args(
 
     if not (isinstance(category_list[0], int) or
             isinstance(category_list[0], float) or
-            isinstance(category_list[0], long) or
+            isinstance(category_list[0], int) or
             isinstance(category_list[0], str)):
         plpy.error("Multinom error: Given category type is not supported!\n"
                    "Only numeric, character, binary data and enumerated types "
diff --git a/src/ports/postgres/modules/glm/multiresponseglm.sql_in b/src/ports/postgres/modules/glm/multiresponseglm.sql_in
index 5fa02251..02aa41bc 100644
--- a/src/ports/postgres/modules/glm/multiresponseglm.sql_in
+++ b/src/ports/postgres/modules/glm/multiresponseglm.sql_in
@@ -630,7 +630,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom(
     verbose             boolean
 ) RETURNS void AS $$
 PythonFunction(glm, multinom, multinom)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- entry functions with default values
@@ -699,7 +699,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, multinom, multinom_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom()
@@ -722,7 +722,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom_predict(
     id_column           varchar
 ) RETURNS void AS $$
 PythonFunction(glm, multinom, multinom_predict)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom_predict(
@@ -761,7 +761,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom_predict(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, multinom, multinom_predict_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.multinom_predict()
diff --git a/src/ports/postgres/modules/glm/ordinal.py_in b/src/ports/postgres/modules/glm/ordinal.py_in
index 4cf4cfd5..1543899c 100644
--- a/src/ports/postgres/modules/glm/ordinal.py_in
+++ b/src/ports/postgres/modules/glm/ordinal.py_in
@@ -21,8 +21,8 @@ from utilities.validate_args import input_tbl_valid
 from utilities.validate_args import output_tbl_valid
 from utilities.validate_args import cols_in_tbl_valid
 
-from glm import __glm_validate_args
-from glm import __extract_optim_params
+from glm.glm import __glm_validate_args
+from glm.glm import __extract_optim_params
 
 # ========================================================================
 
@@ -118,7 +118,7 @@ def __ordinal_validate_args(
 
     if not (isinstance(category_list[0], int) or
             isinstance(category_list[0], float) or
-            isinstance(category_list[0], long) or
+            isinstance(category_list[0], int) or
             isinstance(category_list[0], str)):
         plpy.error("Ordinal error: Given category type is not supported!\n"
                    "Only numeric, character, binary data and enumerated types "
diff --git a/src/ports/postgres/modules/glm/ordinal.sql_in b/src/ports/postgres/modules/glm/ordinal.sql_in
index 7d1d7d5c..16778217 100644
--- a/src/ports/postgres/modules/glm/ordinal.sql_in
+++ b/src/ports/postgres/modules/glm/ordinal.sql_in
@@ -596,7 +596,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal(
     verbose             boolean
 ) RETURNS void AS $$
 PythonFunction(glm, ordinal, ordinal)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- entry functions with default values
@@ -665,7 +665,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, ordinal, ordinal_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal()
@@ -686,7 +686,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal_predict(
     verbose             boolean
 ) RETURNS void AS $$
 PythonFunction(glm, ordinal, ordinal_predict)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal_predict(
@@ -714,7 +714,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal_predict(
     message    TEXT
 ) RETURNS TEXT AS $$
     PythonFunction(glm, ordinal, ordinal_predict_help_msg)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.ordinal_predict()
diff --git a/src/ports/postgres/modules/graph/apsp.py_in b/src/ports/postgres/modules/graph/apsp.py_in
index 9e0d4929..518d1c0f 100644
--- a/src/ports/postgres/modules/graph/apsp.py_in
+++ b/src/ports/postgres/modules/graph/apsp.py_in
@@ -29,9 +29,9 @@
 
 
 import plpy
-from graph_utils import validate_graph_coding
-from graph_utils import get_graph_usage
-from graph_utils import get_edge_params
+from graph.graph_utils import validate_graph_coding
+from graph.graph_utils import get_graph_usage
+from graph.graph_utils import get_edge_params
 from utilities.control import MinWarning
 from utilities.utilities import _assert
 from utilities.utilities import _check_groups
diff --git a/src/ports/postgres/modules/graph/apsp.sql_in b/src/ports/postgres/modules/graph/apsp.sql_in
index daf382d3..e8c9e2b0 100644
--- a/src/ports/postgres/modules/graph/apsp.sql_in
+++ b/src/ports/postgres/modules/graph/apsp.sql_in
@@ -393,8 +393,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, apsp, graph_apsp)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp(
@@ -407,7 +407,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp(
 ) RETURNS VOID AS $$
      SELECT MADLIB_SCHEMA.graph_apsp($1, $2, $3, $4, $5, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp_get_path(
@@ -418,7 +418,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp_get_path(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, apsp, graph_apsp_get_path)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp_get_path(
@@ -429,7 +429,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp_get_path(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, apsp, graph_apsp_get_path)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
@@ -438,8 +438,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, apsp, graph_apsp_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -447,5 +447,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_apsp()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_apsp('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 --------------------------------------------------------------------------------
diff --git a/src/ports/postgres/modules/graph/bfs.py_in b/src/ports/postgres/modules/graph/bfs.py_in
index 70bff3f5..c7a9e2fd 100644
--- a/src/ports/postgres/modules/graph/bfs.py_in
+++ b/src/ports/postgres/modules/graph/bfs.py_in
@@ -28,8 +28,8 @@
 """
 
 import plpy
-from graph_utils import validate_graph_coding
-from graph_utils import get_graph_usage
+from graph.graph_utils import validate_graph_coding
+from graph.graph_utils import get_graph_usage
 from utilities.control import MinWarning
 from utilities.utilities import _assert
 from utilities.utilities import _check_groups
diff --git a/src/ports/postgres/modules/graph/bfs.sql_in b/src/ports/postgres/modules/graph/bfs.sql_in
index d2474f0a..957592ba 100644
--- a/src/ports/postgres/modules/graph/bfs.sql_in
+++ b/src/ports/postgres/modules/graph/bfs.sql_in
@@ -408,8 +408,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_bfs(
     grouping_cols           TEXT
 ) RETURNS VOID AS $$
     PythonFunction(graph, bfs, graph_bfs)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -------------------------------------------------------------------------
 
@@ -425,7 +425,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_bfs(
 ) RETURNS VOID AS $$
      SELECT MADLIB_SCHEMA.graph_bfs($1, $2, $3, $4, $5, $6, $7, $8, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -------------------------------------------------------------------------
 
@@ -440,7 +440,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_bfs(
 ) RETURNS VOID AS $$
      SELECT MADLIB_SCHEMA.graph_bfs($1, $2, $3, $4, $5, $6, $7, NULL, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -------------------------------------------------------------------------
 
@@ -454,7 +454,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_bfs(
 ) RETURNS VOID AS $$
      SELECT MADLIB_SCHEMA.graph_bfs($1, $2, $3, $4, $5, $6, NULL, NULL, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -------------------------------------------------------------------------
 
@@ -463,8 +463,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_bfs(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, bfs, graph_bfs_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -472,7 +472,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_bfs()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_bfs('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
diff --git a/src/ports/postgres/modules/graph/graph_utils.py_in b/src/ports/postgres/modules/graph/graph_utils.py_in
index 8b0cf063..6d2757c5 100644
--- a/src/ports/postgres/modules/graph/graph_utils.py_in
+++ b/src/ports/postgres/modules/graph/graph_utils.py_in
@@ -37,6 +37,7 @@ from utilities.validate_args import columns_exist_in_table
 from utilities.validate_args import table_is_empty
 from utilities.validate_args import get_cols_and_types
 
+
 def validate_output_and_summary_tables(model_out_table, module_name,
                                        out_table=None):
     """
@@ -119,6 +120,7 @@ def validate_graph_coding(vertex_table, vertex_id, edge_table, edge_params,
         """Graph {func_name}: Not all columns from \"{dest}\" are present in edge table ({edge_table})""".
         format(**locals()))
 
+
 def validate_params_for_link_analysis(schema_madlib, func_name,
                                             threshold, max_iter,
                                             edge_table=None,
@@ -136,6 +138,7 @@ def validate_params_for_link_analysis(schema_madlib, func_name,
                 "{0} error: One or more grouping columns specified do not exist!".
                 format(func_name))
 
+
 def update_output_grouping_tables_for_link_analysis(temp_summary_table,
                                                     iter_num,
                                                     summary_table,
@@ -186,10 +189,11 @@ def update_output_grouping_tables_for_link_analysis(temp_summary_table,
         INNER JOIN {temp_summary_table}
         ON {join_condition}
         """.format(join_condition=' AND '.join(
-        ["{res_table}.{col}={temp_summary_table}.{col}".format(
+        [f"{res_table}.{col}={temp_summary_table}.{col}".format(
             **locals())
          for col in grouping_cols_list]), **locals()))
 
+
 def get_default_threshold_for_link_analysis(n_vertices):
     """
         A fixed threshold value, of say 1e-5, might not work well when the
@@ -201,6 +205,7 @@ def get_default_threshold_for_link_analysis(n_vertices):
     _assert(n_vertices > 0, """Number of vertices must be greater than 0""")
     return 1.0 / (n_vertices * 1000)
 
+
 def get_graph_usage(schema_madlib, func_name, other_text):
 
     usage = """
diff --git a/src/ports/postgres/modules/graph/hits.py_in b/src/ports/postgres/modules/graph/hits.py_in
index ad8e748d..ec74b858 100644
--- a/src/ports/postgres/modules/graph/hits.py_in
+++ b/src/ports/postgres/modules/graph/hits.py_in
@@ -30,11 +30,11 @@
 import math
 import plpy
 import sys
-from graph_utils import get_graph_usage
-from graph_utils import get_default_threshold_for_link_analysis
-from graph_utils import update_output_grouping_tables_for_link_analysis
-from graph_utils import validate_graph_coding
-from graph_utils import validate_params_for_link_analysis
+from graph.graph_utils import get_graph_usage
+from graph.graph_utils import get_default_threshold_for_link_analysis
+from graph.graph_utils import update_output_grouping_tables_for_link_analysis
+from graph.graph_utils import validate_graph_coding
+from graph.graph_utils import validate_params_for_link_analysis
 
 from utilities.control import MinWarning
 from utilities.utilities import _assert
diff --git a/src/ports/postgres/modules/graph/hits.sql_in b/src/ports/postgres/modules/graph/hits.sql_in
index 6f140c82..5cb59b1b 100644
--- a/src/ports/postgres/modules/graph/hits.sql_in
+++ b/src/ports/postgres/modules/graph/hits.sql_in
@@ -388,7 +388,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.hits(
     grouping_cols   VARCHAR
 ) RETURNS VOID AS $$
     PythonFunction(graph, hits, hits)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.hits(
@@ -433,8 +433,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.hits(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, hits, hits_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 -------------------------------------------------------------------------
 
@@ -442,5 +442,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.hits()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.hits('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 -------------------------------------------------------------------------
diff --git a/src/ports/postgres/modules/graph/measures.py_in b/src/ports/postgres/modules/graph/measures.py_in
index 902c0d2a..9e45aa6f 100644
--- a/src/ports/postgres/modules/graph/measures.py_in
+++ b/src/ports/postgres/modules/graph/measures.py_in
@@ -38,7 +38,7 @@ from utilities.validate_args import table_exists
 from utilities.validate_args import table_is_empty
 from utilities.validate_args import columns_exist_in_table
 
-from graph_utils import get_graph_usage
+from graph.graph_utils import get_graph_usage
 
 from collections import namedtuple
 from functools import partial
diff --git a/src/ports/postgres/modules/graph/measures.sql_in b/src/ports/postgres/modules/graph/measures.sql_in
index 50afaf35..be0223ba 100644
--- a/src/ports/postgres/modules/graph/measures.sql_in
+++ b/src/ports/postgres/modules/graph/measures.sql_in
@@ -229,7 +229,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_closeness(
     vertex_filter_expr TEXT
 ) RETURNS VOID AS $$
     PythonFunction(graph, measures, graph_closeness)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
@@ -247,8 +247,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_closeness(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, measures, graph_closeness_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -256,7 +256,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_closeness()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_closeness('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 /**
 @addtogroup grp_graph_diameter
@@ -410,7 +410,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_diameter(
     out_table TEXT
 ) RETURNS VOID AS $$
     PythonFunction(graph, measures, graph_diameter)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
@@ -419,8 +419,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_diameter(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, measures, graph_diameter_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -428,7 +428,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_diameter()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_diameter('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 /**
 @addtogroup grp_graph_avg_path_length
@@ -586,7 +586,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_avg_path_length(
     out_table TEXT
 ) RETURNS VOID AS $$
     PythonFunction(graph, measures, graph_avg_path_length)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
@@ -595,8 +595,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_avg_path_length(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, measures, graph_avg_path_length_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -604,7 +604,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_avg_path_length()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_avg_path_length('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 /**
 @addtogroup grp_graph_vertex_degrees
@@ -786,7 +786,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_vertex_degrees(
     grouping_cols           TEXT
 ) RETURNS VOID AS $$
     PythonFunction(graph, measures, graph_vertex_degrees)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------------
 
@@ -807,8 +807,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_vertex_degrees(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, measures, graph_vertex_degrees_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -816,4 +816,4 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_vertex_degrees()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_vertex_degrees('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
diff --git a/src/ports/postgres/modules/graph/pagerank.py_in b/src/ports/postgres/modules/graph/pagerank.py_in
index 3a6b7b3b..17c4324e 100644
--- a/src/ports/postgres/modules/graph/pagerank.py_in
+++ b/src/ports/postgres/modules/graph/pagerank.py_in
@@ -28,12 +28,12 @@
 """
 
 import plpy
-from graph_utils import get_graph_usage
-from graph_utils import get_default_threshold_for_link_analysis
-from graph_utils import update_output_grouping_tables_for_link_analysis
-from graph_utils import validate_graph_coding
-from graph_utils import validate_output_and_summary_tables
-from graph_utils import validate_params_for_link_analysis
+from graph.graph_utils import get_graph_usage
+from graph.graph_utils import get_default_threshold_for_link_analysis
+from graph.graph_utils import update_output_grouping_tables_for_link_analysis
+from graph.graph_utils import validate_graph_coding
+from graph.graph_utils import validate_output_and_summary_tables
+from graph.graph_utils import validate_params_for_link_analysis
 
 from utilities.control import MinWarning
 from utilities.control import OptimizerControl
@@ -49,7 +49,6 @@ from utilities.utilities import py_list_to_sql_string
 
 from utilities.validate_args import columns_exist_in_table, get_cols_and_types
 from utilities.validate_args import table_exists
-from utilities.validate_args import unquote_ident
 from utilities.utilities import rename_table
 
 
@@ -869,4 +868,8 @@ SELECT {schema_madlib}.pagerank('usage');
 """
 
     return help_string.format(schema_madlib=schema_madlib)
+<<<<<<< HEAD
+# -------------------------------------
+=======
 # ---------------------------------------------------------------------
+>>>>>>> 9a73a706 (update: format code for deep_learning.)
diff --git a/src/ports/postgres/modules/graph/pagerank.sql_in b/src/ports/postgres/modules/graph/pagerank.sql_in
index 6bd87e76..6e63908c 100644
--- a/src/ports/postgres/modules/graph/pagerank.sql_in
+++ b/src/ports/postgres/modules/graph/pagerank.sql_in
@@ -456,8 +456,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.pagerank(
     personalization_vertices BIGINT[]
 ) RETURNS VOID AS $$
     PythonFunction(graph, pagerank, pagerank)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.pagerank(
     vertex_table    TEXT,
@@ -530,8 +530,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.pagerank(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, pagerank, pagerank_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 --------------------------------------------------------------------------------
 
@@ -539,5 +539,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.pagerank()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.pagerank('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 --------------------------------------------------------------------------------
diff --git a/src/ports/postgres/modules/graph/sssp.py_in b/src/ports/postgres/modules/graph/sssp.py_in
index a1e640fa..292e1f27 100644
--- a/src/ports/postgres/modules/graph/sssp.py_in
+++ b/src/ports/postgres/modules/graph/sssp.py_in
@@ -28,9 +28,9 @@
 """
 
 import plpy
-from graph_utils import validate_graph_coding
-from graph_utils import get_graph_usage
-from graph_utils import get_edge_params
+from graph.graph_utils import validate_graph_coding
+from graph.graph_utils import get_graph_usage
+from graph.graph_utils import get_edge_params
 from utilities.control import MinWarning
 from utilities.control import OptimizerControl
 
diff --git a/src/ports/postgres/modules/graph/sssp.sql_in b/src/ports/postgres/modules/graph/sssp.sql_in
index 86f21d9f..dcc67fb7 100644
--- a/src/ports/postgres/modules/graph/sssp.sql_in
+++ b/src/ports/postgres/modules/graph/sssp.sql_in
@@ -338,7 +338,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, sssp, graph_sssp)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp(
@@ -352,7 +352,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, sssp, graph_sssp)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp(
@@ -387,7 +387,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp_get_path(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, sssp, graph_sssp_get_path)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp_get_path(
     sssp_table             TEXT,
@@ -396,7 +396,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp_get_path(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, sssp, graph_sssp_get_path)
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 
@@ -405,8 +405,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, sssp, graph_sssp_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 -------------------------------------------------------------------------------
 
@@ -414,5 +414,5 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_sssp()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.graph_sssp('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 -------------------------------------------------------------------------------
diff --git a/src/ports/postgres/modules/graph/wcc.py_in b/src/ports/postgres/modules/graph/wcc.py_in
index d4f90222..17f3ab4a 100644
--- a/src/ports/postgres/modules/graph/wcc.py_in
+++ b/src/ports/postgres/modules/graph/wcc.py_in
@@ -42,8 +42,8 @@ from utilities.utilities import add_postfix
 from utilities.validate_args import table_exists
 from utilities.utilities import rename_table
 from utilities.control import MinWarning
-from graph_utils import validate_graph_coding, get_graph_usage
-from graph_utils import validate_output_and_summary_tables
+from graph.graph_utils import validate_graph_coding, get_graph_usage
+from graph.graph_utils import validate_output_and_summary_tables
 
 def validate_wcc_args(schema_madlib, vertex_table, vertex_table_in, vertex_id,
                       vertex_id_in, edge_table, edge_params, edge_args,
@@ -83,16 +83,18 @@ def validate_wcc_args(schema_madlib, vertex_table, vertex_table_in, vertex_id,
                 "Weakly Connected Components error: "
                 "One or more grouping columns specified do not exist!")
 
+
 def wcc(schema_madlib, vertex_table, vertex_id, edge_table, edge_args,
         out_table, grouping_cols, iteration_limit=0, warm_start=False, **kwargs):
     """
     Function that computes the wcc
 
     Args:
+        @param schema_madlib
         @param vertex_table
         @param vertex_id
         @param edge_table
-        @param dest_vertex
+        @param edge_args
         @param out_table
         @param grouping_cols
     """
diff --git a/src/ports/postgres/modules/graph/wcc.sql_in b/src/ports/postgres/modules/graph/wcc.sql_in
index 594b74a4..812d2634 100644
--- a/src/ports/postgres/modules/graph/wcc.sql_in
+++ b/src/ports/postgres/modules/graph/wcc.sql_in
@@ -645,8 +645,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.weakly_connected_components(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, wcc, wcc)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.weakly_connected_components(
     vertex_table            TEXT,
@@ -658,7 +658,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.weakly_connected_components(
 ) RETURNS VOID AS $$
      SELECT MADLIB_SCHEMA.weakly_connected_components($1, $2, $3, $4, $5, NULL);
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 -- HELPER functions
 -------------------------------------------------------------------------
@@ -668,8 +668,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_largest_cpt(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, wcc, graph_wcc_largest_cpt)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_histogram(
@@ -678,8 +678,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_histogram(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, wcc, graph_wcc_histogram)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_vertex_check(
@@ -689,8 +689,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_vertex_check(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, wcc, graph_wcc_vertex_check)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_reachable_vertices(
@@ -710,8 +710,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_reachable_vertices(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, wcc, graph_wcc_reachable_vertices)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_num_cpts(
@@ -720,8 +720,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.graph_wcc_num_cpts(
 
 ) RETURNS VOID AS $$
     PythonFunction(graph, wcc, graph_wcc_num_cpts)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 -------------------------------------------------------------------------
 
 -------------------------------------------------------------------------
@@ -731,8 +731,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.weakly_connected_components(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(graph, wcc, wcc_help)
-$$ LANGUAGE plpythonu IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+$$ LANGUAGE plpython3u IMMUTABLE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 -------------------------------------------------------------------------------
 
@@ -740,6 +740,6 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.weakly_connected_components()
 RETURNS VARCHAR AS $$
     SELECT MADLIB_SCHEMA.weakly_connected_components('');
 $$ LANGUAGE sql IMMUTABLE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 -------------------------------------------------------------------------------
 
diff --git a/src/ports/postgres/modules/kmeans/kmeans.sql_in b/src/ports/postgres/modules/kmeans/kmeans.sql_in
index 5a45d059..f76bafef 100644
--- a/src/ports/postgres/modules/kmeans/kmeans.sql_in
+++ b/src/ports/postgres/modules/kmeans/kmeans.sql_in
@@ -1009,7 +1009,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_compute_kmeans(
     agg_centroid VARCHAR)
 RETURNS INTEGER
 VOLATILE
-LANGUAGE plpythonu
+LANGUAGE plpython3u
 AS $$PythonFunction(kmeans, kmeans, compute_kmeans)$$
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
@@ -1018,7 +1018,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.__kmeans_validate_src(
     rel_source      VARCHAR
 ) RETURNS VOID AS $$
     PythonFunction(kmeans, kmeans, kmeans_validate_src)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `READS SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.__kmeans_validate_expr(
@@ -1026,7 +1026,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.__kmeans_validate_expr(
     expr_point      VARCHAR
 ) RETURNS BOOLEAN AS $$
     PythonFunction(kmeans, kmeans, kmeans_validate_expr)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `READS SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.__seeding_validate_args(
@@ -1366,7 +1366,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_compute_kmeanspp_seeding(
     expr_point VARCHAR)
 RETURNS INTEGER
 AS $$PythonFunction(kmeans, kmeans, compute_kmeanspp_seeding)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1697,7 +1697,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.internal_compute_kmeans_random_seeding(
     expr_point VARCHAR)
 RETURNS INTEGER
 AS $$PythonFunction(kmeans, kmeans, compute_kmeans_random_seeding)$$
-LANGUAGE plpythonu VOLATILE
+LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -2188,7 +2188,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
     k_selection_algorithm VARCHAR /*+ DEFAULT 'silhouette' */
 ) RETURNS VOID AS $$
     PythonFunction(`kmeans', `kmeans_auto', `kmeanspp_auto')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
@@ -2204,7 +2204,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeanspp_auto($1, $2, $3, $4, $5, $6, $7, $8, $9, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
     rel_source VARCHAR,
@@ -2218,7 +2218,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeanspp_auto($1, $2, $3, $4, $5, $6, $7, $8, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
     rel_source VARCHAR,
@@ -2231,7 +2231,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeanspp_auto($1, $2, $3, $4, $5, $6, $7, NULL, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
     rel_source VARCHAR,
@@ -2243,7 +2243,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeanspp_auto($1, $2, $3, $4, $5, $6, NULL, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
     rel_source VARCHAR,
@@ -2254,7 +2254,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeanspp_auto($1, $2, $3, $4, $5, NULL, NULL, NULL, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
     rel_source VARCHAR,
@@ -2264,7 +2264,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeanspp_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeanspp_auto($1, $2, $3, $4, NULL, NULL, NULL, NULL, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
     rel_source VARCHAR,
@@ -2278,7 +2278,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
     k_selection_algorithm VARCHAR /*+ DEFAULT 'silhouette' */
 ) RETURNS VOID AS $$
     PythonFunction(`kmeans', `kmeans_auto', `kmeans_random_auto')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
@@ -2293,7 +2293,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeans_random_auto($1, $2, $3, $4, $5, $6, $7, $8, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
     rel_source VARCHAR,
@@ -2306,7 +2306,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeans_random_auto($1, $2, $3, $4, $5, $6, $7, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
     rel_source VARCHAR,
@@ -2318,7 +2318,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeans_random_auto($1, $2, $3, $4, $5, $6, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
     rel_source VARCHAR,
@@ -2329,7 +2329,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeans_random_auto($1, $2, $3, $4, $5, NULL, NULL, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
     rel_source VARCHAR,
@@ -2339,7 +2339,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.kmeans_random_auto(
 ) RETURNS VOID AS $$
     SELECT MADLIB_SCHEMA.kmeans_random_auto($1, $2, $3, $4, NULL, NULL, NULL, NULL, NULL)
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `CONTAINS SQL', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.simple_silhouette_points(
     rel_source VARCHAR,
@@ -2351,8 +2351,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.simple_silhouette_points(
     fn_dist VARCHAR /*+ DEFAULT 'squared_dist_norm2' */
 ) RETURNS VOID AS $$
     PythonFunction(kmeans, kmeans, simple_silhouette_points_str_wrapper)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.simple_silhouette_points(
     rel_source VARCHAR,
@@ -2366,7 +2366,7 @@ AS $$
     SELECT MADLIB_SCHEMA.simple_silhouette_points($1, $2, $3, $4, $5, $6,
         'MADLIB_SCHEMA.squared_dist_norm2')
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.simple_silhouette_points(
@@ -2378,8 +2378,8 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.simple_silhouette_points(
     fn_dist VARCHAR /*+ DEFAULT 'squared_dist_norm2' */
 ) RETURNS VOID AS $$
     PythonFunction(kmeans, kmeans, simple_silhouette_points_dbl_wrapper)
-$$ LANGUAGE plpythonu VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+$$ LANGUAGE plpython3u VOLATILE
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.simple_silhouette_points(
     rel_source VARCHAR,
@@ -2392,4 +2392,4 @@ AS $$
     SELECT MADLIB_SCHEMA.simple_silhouette_points($1, $2, $3, $4, $5,
         'MADLIB_SCHEMA.squared_dist_norm2')
 $$ LANGUAGE sql VOLATILE
-m4_ifdef(`\_\_HAS_FUNCTION_PROPERTIES\_\_', `MODIFIES SQL DATA', `');
+m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
diff --git a/src/ports/postgres/modules/knn/knn.sql_in b/src/ports/postgres/modules/knn/knn.sql_in
index 22822ed5..ef6deb59 100644
--- a/src/ports/postgres/modules/knn/knn.sql_in
+++ b/src/ports/postgres/modules/knn/knn.sql_in
@@ -527,7 +527,7 @@ CREATE OR REPLACE FUNCTION chebychev_distance (x double precision[], y double pr
 AS $$
     from scipy.spatial import distance
     return distance.chebyshev(x, y)
-$$ LANGUAGE plpythonu;
+$$ LANGUAGE plpython3u;
 </pre>
 Then pass the function as an argument:
 <pre class="example">
@@ -644,7 +644,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.__knn_validate_src(
     fn_dist VARCHAR
 ) RETURNS INTEGER AS $$
     PythonFunctionBody(`knn', `knn', `knn_validate_src')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.knn(
@@ -664,7 +664,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.knn(
     algorithm_params VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(`knn', `knn', `knn')
-$$ LANGUAGE plpythonu VOLATILE
+$$ LANGUAGE plpython3u VOLATILE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.knn(
@@ -810,7 +810,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.knn(
     message VARCHAR
 ) RETURNS VARCHAR AS $$
     PythonFunction(knn, knn, knn_help)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.knn()
diff --git a/src/ports/postgres/modules/lda/lda.py_in b/src/ports/postgres/modules/lda/lda.py_in
index 29c9d855..d5fd4351 100644
--- a/src/ports/postgres/modules/lda/lda.py_in
+++ b/src/ports/postgres/modules/lda/lda.py_in
@@ -507,7 +507,7 @@ def lda_train(schema_madlib, train_table, model_table, output_data_table, voc_si
     """
     @brief This function provides the entry for the LDA training process.
     @param schema_madlib        MDALib schema
-    @param data_table           Training data table
+    @param train_table          Training data table
     @param voc_size             Size of vocabulary
     @param topic_num            Number of topics
     @param iter_num             Number of iterations
@@ -587,6 +587,7 @@ def lda_predict(schema_madlib, test_table, model_table, output_data_table,
                 iter_num=20):
     """
     @brief This function provides the entry for the LDA prediction process.
+    @param schema_madlib    MDALib schema
     @param test_table       name of the testing dataset table
     @param model_table      name of the model table
     @param iter_num         number of iterations
@@ -622,6 +623,7 @@ def get_topic_desc(schema_madlib, model_table, vocab_table, desc_table,
                    top_k=15):
     """
     @brief Get the per-topic description by top-k words
+    @param schema_madlib MDALib schema
     @param model_table  The model table generated by the training process
     @param vocab_table  The vocabulary table
     @param top_k        The top k words for topic description
@@ -725,6 +727,7 @@ def get_topic_desc(schema_madlib, model_table, vocab_table, desc_table,
 def get_topic_word_count(schema_madlib, model_table, output_table):
     """
     @brief Get the per-topic word counts from the model table
+    @param schema_madlib   MDALib schema
     @param model_table     The model table generated by the training process
     @param output_table    The output table for storing the per-topic word counts
     """
@@ -762,6 +765,7 @@ def get_topic_word_count(schema_madlib, model_table, output_table):
 def get_word_topic_count(schema_madlib, model_table, output_table):
     """
     @brief Get the per-word topic counts from the model table
+    @param schema_madlib   MDALib schema
     @param model_table     The model table generated by the training process
     @param output_table    The output table for storing the per-word topic counts
     """
@@ -789,9 +793,11 @@ def get_word_topic_count(schema_madlib, model_table, output_table):
         """.format(output_table=output_table, schema_madlib=schema_madlib,
                    model_table=model_table))
 
+
 def get_word_topic_mapping(schema_madlib, lda_output_table, mapping_table):
     """
     @brief Get the wordid - topicid mapping from the lda training output table
+    @param schema_madlib    MDALib schema
     @param lda_output_table The output table from lda traning or predicting
     @param mapping_table    The result table that saves the mapping info
     """
@@ -837,10 +843,12 @@ def get_word_topic_mapping(schema_madlib, lda_output_table, mapping_table):
         """.format(lda_output_table=lda_output_table,
                    schema_madlib=schema_madlib, mapping_table=mapping_table))
 
+
 def get_perplexity(schema_madlib, model_table, output_data_table):
     """
     @brief Get the perplexity given the prediction and model.
-    @param model_table     The model table generated by lda_train
+    @param schema_madlib        MDALib schema
+    @param model_table          The model table generated by lda_train
     @param output_data_table    The output data table generated by lda_predict
     """
     _assert(model_table != '' and output_data_table != '',
@@ -1020,9 +1028,9 @@ def index_sort(arr, **kwargs):
     # process arrays for GPDB < 4.1 and PG < 9.0
     arr = string_to_array(arr, False)
     dim = len(arr)
-    idx = range(dim)
+    idx = list(range(dim))
     idx.sort(key=lambda r: arr[r])
-    return array_to_string(map(lambda r: r + 1, idx))
+    return array_to_string([r + 1 for r in idx])
 
 
 def _convert_data_table(schema_madlib, data_table):
@@ -1246,7 +1254,7 @@ def _validate_model_table(model_table):
     """
     # plpy.notice('checking the model table ...')
     try:
-        rv = plpy.execute("""
+        rv = plpy.execute(f"""
             SELECT count(*) cnt
             FROM pg_attribute
             WHERE
diff --git a/src/ports/postgres/modules/lda/lda.sql_in b/src/ports/postgres/modules/lda/lda.sql_in
index 0d3279f9..b43121f9 100644
--- a/src/ports/postgres/modules/lda/lda.sql_in
+++ b/src/ports/postgres/modules/lda/lda.sql_in
@@ -1073,7 +1073,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
                         voc_size, topic_num, iter_num, alpha, beta, None, None)
     return [[model_table, 'model table'],
         [output_data_table, 'output data table']]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -1098,7 +1098,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
                       voc_size, topic_num, iter_num, alpha, beta,evaluate_every , perplexity_tol)
     return [[model_table, 'model table'],
         [output_data_table, 'output data table']]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1121,7 +1121,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
                       voc_size, topic_num, iter_num, alpha, beta,evaluate_every , None)
     return [[model_table, 'model table'],
         [output_data_table, 'output data table']]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 
@@ -1152,7 +1152,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
     return [[
         output_table,
         'per-doc topic distribution and per-word topic assignments']]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1173,7 +1173,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
     return [[
         output_table,
         'per-doc topic distribution and per-word topic assignments']]
-$$ LANGUAGE PLPYTHONU STRICT
+$$ LANGUAGE PLPYTHON3U STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1192,7 +1192,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
     with AOControl(False):
         lda.get_topic_word_count(schema_madlib, model_table, output_table)
     return [[output_table, 'per-topic word counts']]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1213,7 +1213,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
         with MinWarning("error"):
             lda.get_word_topic_count(schema_madlib, model_table, output_table)
     return [[output_table, 'per-word topic counts']]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1241,7 +1241,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
         desc_table,
         """topic description, use "ORDER BY topicid, prob DESC" to check the
         results"""]]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1262,7 +1262,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
         with MinWarning("error"):
             lda.get_word_topic_mapping(schema_madlib, lda_output_table, mapping_table)
     return [[mapping_table, 'wordid - topicid mapping']]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1420,7 +1420,7 @@ RETURNS FLOAT8 AS $$
     PythonFunctionBodyOnly(`lda', `lda')
     with AOControl(False):
         return lda.get_perplexity(schema_madlib, model_table, output_data_table)
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `READS SQL DATA', `');
 
 /**
@@ -1617,7 +1617,7 @@ MADLIB_SCHEMA.__lda_util_index_sort
 )
 RETURNS INT4[] AS $$
     PythonFunction(`lda', `lda', `index_sort')
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `NO SQL', `');
 
 /**
@@ -1638,7 +1638,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
     with AOControl(False):
         lda.norm_vocab(vocab_table, output_vocab_table)
     return [[output_vocab_table,'normalized vocbulary table']]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1660,7 +1660,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
     with AOControl(False):
         lda.norm_dataset(data_table, norm_vocab_table, output_data_table)
     return [[output_data_table,'normalized data table']]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 /**
@@ -1687,7 +1687,7 @@ RETURNS SETOF MADLIB_SCHEMA.lda_result AS $$
         lda.conorm_data(data_table, vocab_table, output_data_table, output_vocab_table)
     return [[output_data_table,'normalized data table'],
         [output_vocab_table,'normalized vocab table']]
-$$ LANGUAGE plpythonu STRICT
+$$ LANGUAGE plpython3u STRICT
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 -- UDT contains model matrix (voc_size x topic_num) and total topic counts array
diff --git a/src/ports/postgres/modules/linalg/matrix_help_message.py_in b/src/ports/postgres/modules/linalg/matrix_help_message.py_in
index 62b2535a..c613d032 100644
--- a/src/ports/postgres/modules/linalg/matrix_help_message.py_in
+++ b/src/ports/postgres/modules/linalg/matrix_help_message.py_in
@@ -48,7 +48,7 @@ The column names in {} are set using the options provided in 'out_args'.
 
 def _get_help_message(schema_madlib, message, function_name, functionality_str,
                       usage_str, **kwargs):
-    format_dict = dict(locals().items() + globals().items())
+    format_dict = dict(list(locals().items()) + list(globals().items()))
     if not message:
         help_string = """
 ------------------------------------------------------------
diff --git a/src/ports/postgres/modules/linalg/matrix_ops.py_in b/src/ports/postgres/modules/linalg/matrix_ops.py_in
index bdbb5029..1964196d 100644
--- a/src/ports/postgres/modules/linalg/matrix_ops.py_in
+++ b/src/ports/postgres/modules/linalg/matrix_ops.py_in
@@ -526,7 +526,7 @@ def matrix_densify(schema_madlib, matrix_in, in_args, matrix_out, out_args):
                                  in_default_args={'row': in_args['row'],
                                                   'val': in_args['val']})
     (row_dim, col_dim) = get_dims(matrix_in, in_args)
-    _assert(col_dim < sys.maxint,
+    _assert(col_dim < sys.maxsize,
             "Matrix error: Matrix {0} has too many rows. This cannot be "
             "transposed in a dense format due to "
             "restrictions on maximum array size.".format(matrix_in))
@@ -909,7 +909,7 @@ def matrix_block_trans(schema_madlib, matrix_in, in_args, matrix_out, out_args):
 def matrix_blockize(schema_madlib, matrix_in, in_args,
                     row_dim, col_dim, matrix_out, out_args):
     _assert(row_dim > 0 and col_dim > 0, 'Matrix error: invalid block dimension')
-    _assert(row_dim * col_dim < sys.maxint,
+    _assert(row_dim * col_dim < sys.maxsize,
             "Matrix error: Block size requested ({0}) is "
             "too large".format(row_dim * col_dim))
     _validate_input_table(matrix_in)
@@ -1599,7 +1599,7 @@ def _matrix_trans_sparse(schema_madlib, matrix_in, in_args,
 def _matrix_trans_dense(schema_madlib, matrix_in, in_args,
                         matrix_out, out_args):
     (row_dim, col_dim) = get_dims(matrix_in, in_args)
-    _assert(row_dim < sys.maxint,
+    _assert(row_dim < sys.maxsize,
             "Matrix error: Matrix {0} has too many rows. "
             "This cannot be transposed in a dense format due to "
             "restrictions on maximum array size.".format(matrix_in))
@@ -2766,7 +2766,7 @@ def matrix_lu(schema_madlib, matrix_in, in_args,
 
     matrix_output_names = dict([(i, add_postfix(matrix_out_prefix, "_" + i))
                                 for i in ("p", "l", "u", "q")])
-    for each_output in matrix_output_names.values():
+    for each_output in list(matrix_output_names.values()):
         _validate_output_table(each_output)
 
     in_args = parse_matrix_args(in_args)
@@ -2822,7 +2822,7 @@ def matrix_lu(schema_madlib, matrix_in, in_args,
 
     matrix_temp_names = dict([
         (k, 'pg_temp.' + unique_string() if is_output_sparse else v)
-        for k, v in matrix_output_names.items()])
+        for k, v in list(matrix_output_names.items())])
     plpy.execute("""
         CREATE TABLE {matrix_temp_names[p]} AS
         SELECT row_id AS {out_args[row]},
@@ -2849,8 +2849,8 @@ def matrix_lu(schema_madlib, matrix_in, in_args,
         """.format(**locals()))
 
     if is_output_sparse:
-        for temp, output in zip(matrix_temp_names.values(),
-                                matrix_output_names.values()):
+        for temp, output in zip(list(matrix_temp_names.values()),
+                                list(matrix_output_names.values())):
             matrix_sparsify(schema_madlib, temp, out_args, output, out_args)
             plpy.execute('DROP TABLE IF EXISTS %s' % temp)
 
diff --git a/src/ports/postgres/modules/linalg/matrix_ops.sql_in b/src/ports/postgres/modules/linalg/matrix_ops.sql_in
index 6e33f46d..7ff448af 100644
--- a/src/ports/postgres/modules/linalg/matrix_ops.sql_in
+++ b/src/ports/postgres/modules/linalg/matrix_ops.sql_in
@@ -1002,7 +1002,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_info(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_info_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 
@@ -1063,7 +1063,7 @@ RETURNS MADLIB_SCHEMA.matrix_result AS $$
                                matrix_b, b_args,
                                matrix_out, out_args)
     return [matrix_out]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1086,7 +1086,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_mult(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_mult_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_mult()
@@ -1120,7 +1120,7 @@ RETURNS INTEGER[] AS $$
     with AOControl(False):
         return matrix_ops.matrix_ndims(schema_madlib,
                                        matrix_in, in_args, is_block)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1139,7 +1139,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_ndims(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_ndims_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 
@@ -1188,7 +1188,7 @@ RETURNS MADLIB_SCHEMA.matrix_result AS $$
                               matrix_b, b_args,
                               matrix_out, out_args)
     return [matrix_out]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1210,7 +1210,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_add(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_add_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_add()
@@ -1258,7 +1258,7 @@ RETURNS MADLIB_SCHEMA.matrix_result AS $$
                               matrix_b, b_args,
                               matrix_out, out_args)
     return [matrix_out]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1280,7 +1280,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_sub(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_sub_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1334,14 +1334,14 @@ RETURNS FLOAT8[] AS $$
     PythonFunctionBodyOnly(`linalg', `matrix_ops')
     with AOControl(False):
         return matrix_ops.matrix_extract(schema_madlib, matrix_in, in_args, 1, index)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_extract_row(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_extract_row_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_extract_row()
@@ -1379,14 +1379,14 @@ RETURNS FLOAT8[] AS $$
     PythonFunctionBodyOnly(`linalg', `matrix_ops')
     with AOControl(False):
         return matrix_ops.matrix_extract(schema_madlib, matrix_in, in_args, 2, index)
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_extract_col(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_extract_col_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_extract_col()
@@ -1420,7 +1420,7 @@ RETURNS MADLIB_SCHEMA.matrix_result AS $$
                                 row_dim, col_dim,
                                 matrix_out, out_args)
     return [matrix_out]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1439,7 +1439,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_zeros(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_zeros_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_zeros()
@@ -1483,7 +1483,7 @@ RETURNS MADLIB_SCHEMA.matrix_result AS $$
         matrix_ops.matrix_max(schema_madlib,
                               matrix_in, in_args, dim, matrix_out, fetch_index)
     return [matrix_out]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
 
 CREATE OR REPLACE FUNCTION
@@ -1504,7 +1504,7 @@ CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_max(
     message     TEXT
 ) RETURNS TEXT AS $$
 PythonFunction(linalg, matrix_help_message, matrix_max_help_message)
-$$ LANGUAGE plpythonu IMMUTABLE
+$$ LANGUAGE plpython3u IMMUTABLE
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `CONTAINS SQL', `');
 
 CREATE OR REPLACE FUNCTION MADLIB_SCHEMA.matrix_max()
@@ -1549,7 +1549,7 @@ RETURNS MADLIB_SCHEMA.matrix_result AS $$
         matrix_ops.matrix_min(schema_madlib,
                               matrix_in, in_args, dim, matrix_out, fetch_index)
     return [matrix_out]
-$$ LANGUAGE plpythonu
+$$ LANGUAGE plpython3u
 m4_ifdef(`__HAS_FUNCTION_PROPERTIES__', `MODIFIES SQL DATA', `');
... 27415 lines suppressed ...