You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by mb...@apache.org on 2020/04/23 20:06:42 UTC

[systemml] branch master updated: [SYSTEMDS-315] Python Federated Matrices (test, docs, scripts)

This is an automated email from the ASF dual-hosted git repository.

mboehm7 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/systemml.git


The following commit(s) were added to refs/heads/master by this push:
     new 0fe05a9  [SYSTEMDS-315] Python Federated Matrices (test, docs, scripts)
0fe05a9 is described below

commit 0fe05a97840238c0130e58c1e4ec19b9195bb1a9
Author: Sebastian <ba...@tugraz.at>
AuthorDate: Thu Apr 23 21:59:44 2020 +0200

    [SYSTEMDS-315] Python Federated Matrices (test, docs, scripts)
    
    - Easy start of federated worker in /bin/systemds.sh
    - Setup of tests for the Python language bindings federated matrices
      - Tests of the basic federated operations
      - Out commented "advanced" functionality that is for later.
    - Initial tutorial on Python federated matrices
    - Minor :bug: fix in federated matrix, not allowing multiple sources
    - Github workflow action for automated federated tests
    
    Closes #871.
---
 .github/workflows/federatedPython.yml              |  85 +++++++
 README.md                                          |   8 +-
 bin/README.md                                      |  81 ++++--
 bin/systemds.sh                                    |  40 ++-
 docker/build.sh                                    |   3 +
 docker/{build.sh => pythonsysds.Dockerfile}        |  14 +-
 docker/sysds.Dockerfile                            |  13 +-
 docker/testsysds.Dockerfile                        |  10 +-
 docs/README.md                                     |  25 +-
 docs/Tasks.txt                                     |   1 +
 src/assembly/bin/README.md                         |   6 +-
 src/main/python/docs/source/federated.rst          | 126 +++++++++
 src/main/python/docs/source/index.rst              |   7 +
 src/main/python/systemds/matrix/matrix.py          |   3 +-
 src/main/python/tests/federated/runFedTest.sh      |  67 +++++
 .../tests/federated/test_federated_aggregations.py | 236 +++++++++++++++++
 .../python/tests/federated/test_federated_basic.py | 281 +++++++++++++++++++++
 17 files changed, 940 insertions(+), 66 deletions(-)

diff --git a/.github/workflows/federatedPython.yml b/.github/workflows/federatedPython.yml
new file mode 100644
index 0000000..9ec7b20
--- /dev/null
+++ b/.github/workflows/federatedPython.yml
@@ -0,0 +1,85 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+name: Federated Python Test
+
+on: [push, pull_request]
+
+jobs:
+  applicationsTests:
+    runs-on: ${{ matrix.os }}
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: [3.6]
+        os: [ubuntu-latest]
+        java: [ 1.8 ]
+    name:  Python Test
+    steps:
+    - name: Checkout Repository
+      uses: actions/checkout@v2
+
+    - name: Setup Java
+      uses: actions/setup-java@v1
+      with:
+        java-version: ${{ matrix.java }}
+
+    - name: Cache Maven Dependencies
+      uses: actions/cache@v1
+      with:
+        path: ~/.m2/repository
+        key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
+        restore-keys: |
+          ${{ runner.os }}-maven-
+  
+    - name: Maven clean & package
+      run: mvn clean package
+
+    - name: Setup Python
+      uses: actions/setup-python@v1
+      with:
+        python-version: ${{ matrix.python-version }}
+        architecture: 'x64'
+
+    - name: Cache Pip Dependencies
+      uses: actions/cache@v1
+      with:
+        path: ~/.cache/pip
+        key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('src/main/python/setup.py') }}
+        restore-keys: |
+          ${{ runner.os }}-pip-${{ matrix.python-version }}-
+  
+    - name: Install pip Dependencies
+      run: pip install numpy py4j wheel
+
+    - name: Build Python Package
+      run: |
+        cd src/main/python
+        python create_python_dist.py
+
+    - name: Run Federated Python Tests
+      run: |
+        export SYSTEMDS_ROOT=$(pwd)
+        export PATH=$SYSTEMDS_ROOT/bin:$PATH
+        cd src/main/python
+        ./tests/federated/runFedTest.sh tests/federated/test_federated_aggregations.py
+        sleep 3
+        ./tests/federated/runFedTest.sh tests/federated/test_federated_basic.py
diff --git a/README.md b/README.md
index a9cb743..ce1d574 100644
--- a/README.md
+++ b/README.md
@@ -21,11 +21,13 @@ limitations under the License.
 
 **Overview:** SystemDS is a versatile system for the end-to-end data science lifecycle from data integration, cleaning, and feature engineering, over efficient, local and distributed ML model training, to deployment and serving. To this end, we aim to provide a stack of declarative languages with R-like syntax for (1) the different tasks of the data-science lifecycle, and (2) users with different expertise. These high-level scripts are compiled into hybrid execution plans of local, in-me [...]
 
-**Documentation:** [SystemDS Documentation](https://github.com/apache/systemml/tree/master/docs)
+**Quick Start** [Install, Quick Start and Hello World](/bin/README.md)
 
-**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from [**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2018. We will continue to support linear algebra programs over matrices, while replacing the underlying data model and compiler, as well as substantially extending the supported functionalities. Until the first release, you can build your own snapshot via Apache Maven: `mvn -DskipTests clean package`.
+**Documentation:** [SystemDS Documentation](/docs/README.md)
+
+**Python Documentation** [Python SystemDS Documentation](https://damslab.github.io/docs/sysdspython/index.html)
 
-## Status
+**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from [**Apache SystemML**](http://systemml.apache.org/) 1.2 in September 2018. We will continue to support linear algebra programs over matrices, while replacing the underlying data model and compiler, as well as substantially extending the supported functionalities. Until the first release, you can build your own snapshot via Apache Maven: `mvn -DskipTests clean package`.
 
 [![License](https://img.shields.io/badge/License-Apache%202.0-gre.svg)](https://opensource.org/licenses/Apache-2.0)
 ![Build](https://github.com/apache/systemml/workflows/Build/badge.svg)
diff --git a/bin/README.md b/bin/README.md
index 8c812b8..2ed2c4b 100644
--- a/bin/README.md
+++ b/bin/README.md
@@ -17,46 +17,79 @@ limitations under the License.
 {% end comment %}
 -->
 
-## Scripts to run SystemDS
-This directory contains scripts to launch systemds.   
+# Scripts to run SystemDS
+
+This directory contains scripts to launch systemds.
 
 ## Setting SYSTEMDS_ROOT environment variable
-In order to run SystemDS from your development directory and leave the 
-SystemDS source tree untouched, the following setup could be used (example for bash):
- ```shell script
-$ export SYSTEMDS_ROOT=/home/$USER/systemds
-$ export PATH=$SYSTEMDS_ROOT/bin:$PATH
+
+In order to run SystemDS from your development directory and leave the
+SystemDS files untouched, the following setup could be used (example for bash):
+The settings are the same if you download a release of SystemDS.
+
+The following example works if you open an terminal at the root of the downloaded release,
+or a cloned repository. You can also change the `$(pwd)` with the full path to the folder.
+
+```bash
+export SYSTEMDS_ROOT=$(pwd)
+export PATH=$SYSTEMDS_ROOT/bin:$PATH
 ```
 
-## Running a first example:
-To see SystemDS in action a simple example using the `Univar-stats.dml` 
-script can be executed. This example is taken from the 
-[SystemML documentation](http://apache.github.io/systemml/standalone-guide). 
+It can be beneficial to enter these into your `~/.profile` for linux
+or your environment variables in windows to enable reuse between terminals and restarts.
+
+## Hello World example
+
+To quickly verify that the system is setup correctly.
+You can run a simple hello world, using the launch script.
+
+first open an terminal and go to an empty folder, then execute the following.
+
+```bash
+# Create a hello World script
+echo 'print("HelloWorld!")' > hello.dml
+# Execute hello world Script
+systemds.sh hello.dml
+# Remove the hello.dml
+rm hello.dml
+```
+
+## Running a first example
+
+To see SystemDS in action a simple example using the `Univar-stats.dml`
+script can be executed. This example is taken from the
+[SystemML documentation](http://apache.github.io/systemml/standalone-guide).
 The relevant commands to run this example with SystemDS will be listed here.
 See their documentation for further details.  
 
-#### Example preparations
-```shell script
+### Example preparations
+
+```bash
 # download test data
-$ wget -P data/ http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data
+wget -P data/ http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data
 
 # generate a metadata file for the dataset
-$ echo '{"rows": 306, "cols": 4, "format": "csv"}' > data/haberman.data.mtd
+echo '{"rows": 306, "cols": 4, "format": "csv"}' > data/haberman.data.mtd
 
 # generate type description for the data
-$ echo '1,1,1,2' > data/types.csv
-$ echo '{"rows": 1, "cols": 4, "format": "csv"}' > data/types.csv.mtd
+echo '1,1,1,2' > data/types.csv
+echo '{"rows": 1, "cols": 4, "format": "csv"}' > data/types.csv.mtd
 ```
-#### Executing the DML script
+
+### Executing the DML script
+
 ```shell script
-$ bin/systemds.sh Univar-Stats.dml -nvargs X=data/haberman.data TYPES=data/types.csv STATS=data/univarOut.mtx CONSOLE_OUTPUT=TRUE
+bin/systemds.sh Univar-Stats.dml -nvargs X=data/haberman.data TYPES=data/types.csv STATS=data/univarOut.mtx CONSOLE_OUTPUT=TRUE
 ```
 
-#### Using Intel MKL native instructions
-To use the MKL acceleration download and install the latest MKL library from [1], 
-set the environment variables with the MKL-provided script `$ compilervars.sh intel64` and set 
+## Using Intel MKL native instructions
+
+To use the MKL acceleration download and install the latest MKL library from [1],
+set the environment variables with the MKL-provided script `$ compilervars.sh intel64` and set
 the option `sysds.native.blas` in `SystemDS-config.xml`.
 
-## Further reading 
+[1]: https://software.intel.com/mkl "Intel Math Kernel Library"
+
+## Further reading
 
-More documentation is available in the [docs directory of our github repository](https://github.com/apache/systemml/tree/master/docs) 
+More documentation is available in the [docs directory of our github repository](/docs/README.md)
diff --git a/bin/systemds.sh b/bin/systemds.sh
index ab291cd..80e9721 100755
--- a/bin/systemds.sh
+++ b/bin/systemds.sh
@@ -86,6 +86,10 @@ Usage: $0 [SystemDS.jar] <dml-filename> [arguments] [-help]
                    run script.
     -help        - Print this usage message and exit
 
+Worker Usage: $0 WORKER [SystemDS.jar] <portnumber> [arguments] [-help]
+
+    port         - The port to open for the federated worker.
+
 Set custom launch configuration by setting/editing SYSTEMDS_STANDALONE_OPTS and/or SYSTEMDS_DISTRIBUTED_OPTS
 
 Set the environment variable SYSDS_DISTRIBUTED=1 to run spark-submit instead of local java
@@ -94,7 +98,7 @@ EOF
   exit 1
 }
 
-# print an error if no dml file is supplied
+# print an error if no argument is supplied.
 if [ -z "$1" ] ; then
     echo "Wrong Usage.";
     printUsageExit;
@@ -183,11 +187,28 @@ if  echo "$1" | grep -q "jar"; then
   shift
   SCRIPT_FILE=$1
   shift
+elif echo "$1" | grep -q "WORKER"; then
+  WORKER=1
+  shift
+  if echo "$1" | grep -q "jar"; then
+    SYSTEMDS_JAR_FILE=$1
+    shift
+  fi
+  PORT=$1
+  re='^[0-9]+$'
+  if ! [[ $PORT =~ $re ]] ; then
+    echo "error: Port is not a number"
+    printUsageExit
+  fi
 else
   SCRIPT_FILE=$1
   shift
 fi
 
+if [ -z "$WORKER" ] ; then
+  WORKER=0
+fi
+
 
 if [ -z "$SYSTEMDS_ROOT" ] ; then
   SYSTEMDS_ROOT=.
@@ -268,7 +289,22 @@ print_out "#  CLASSPATH= $CLASSPATH"
 print_out "#  HADOOP_HOME= $HADOOP_HOME"
 
 #build the command to run
-if [ $SYSDS_DISTRIBUTED == 0 ]; then
+if [ $WORKER == 1 ]; then
+  print_out "#"
+  print_out "#  starting Fedederated worker on port $PORT"
+  print_out "###############################################################################"
+
+  print_out "Executing command: $CMD"
+  print_out  ""
+
+  CMD=" \
+  java $SYSTEMDS_STANDALONE_OPTS \
+  -cp $CLASSPATH \
+  -Dlog4j.configuration=file:$LOG4JPROP \
+  org.apache.sysds.api.DMLScript \
+  -w $PORT"
+
+elif [ $SYSDS_DISTRIBUTED == 0 ]; then
   print_out "#"
   print_out "#  Running script $SCRIPT_FILE locally with opts: $*"
   print_out "###############################################################################"
diff --git a/docker/build.sh b/docker/build.sh
index e0799eb..73add3c 100755
--- a/docker/build.sh
+++ b/docker/build.sh
@@ -28,5 +28,8 @@ docker image build -f docker/sysds.Dockerfile -t sebaba/sysds:0.2 .
 # The second build is for testing systemds. This image installs the R dependencies needed to run the tests.
 docker image build -f docker/testsysds.Dockerfile -t sebaba/testingsysds:0.2 .
 
+# The third build is python docker for systemds. 
+docker image build -f docker/pythonsysds.Dockerfile -t sebaba/pythonsysds:0.2 .
+
 # You might want to prune the docker system afterwards using
 # docker system prune
\ No newline at end of file
diff --git a/docker/build.sh b/docker/pythonsysds.Dockerfile
old mode 100755
new mode 100644
similarity index 67%
copy from docker/build.sh
copy to docker/pythonsysds.Dockerfile
index e0799eb..1ddb691
--- a/docker/build.sh
+++ b/docker/pythonsysds.Dockerfile
@@ -1,4 +1,3 @@
-#/bin/bash 
 #-------------------------------------------------------------
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -20,13 +19,10 @@
 #
 #-------------------------------------------------------------
 
-# Build the docker containers
+FROM openjdk:8-alpine
 
-# The first build is for running systemds through docker.
-docker image build -f docker/sysds.Dockerfile -t sebaba/sysds:0.2 .
+RUN apk add --no-cache --virtual .build-deps g++ python3-dev libffi-dev openssl-dev && \
+	apk add --no-cache python3 && \
+	pip3 install --upgrade pip setuptools
 
-# The second build is for testing systemds. This image installs the R dependencies needed to run the tests.
-docker image build -f docker/testsysds.Dockerfile -t sebaba/testingsysds:0.2 .
-
-# You might want to prune the docker system afterwards using
-# docker system prune
\ No newline at end of file
+RUN pip3 install systemds
diff --git a/docker/sysds.Dockerfile b/docker/sysds.Dockerfile
index ee08bae..ce8cd4f 100644
--- a/docker/sysds.Dockerfile
+++ b/docker/sysds.Dockerfile
@@ -32,15 +32,12 @@ ENV MAVEN_HOME /usr/lib/mvn
 ENV PATH $MAVEN_HOME/bin:$PATH
 
 RUN wget http://archive.apache.org/dist/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz && \
-  tar -zxvf apache-maven-$MAVEN_VERSION-bin.tar.gz && \
-  rm apache-maven-$MAVEN_VERSION-bin.tar.gz && \
-  mv apache-maven-$MAVEN_VERSION /usr/lib/mvn
+	tar -zxvf apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	rm apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	mv apache-maven-$MAVEN_VERSION /usr/lib/mvn
 
 # Install Extras
-RUN apk update && \
-    apk upgrade && \
-    apk add git && \ 
-    apk add bash
+RUN apk add --no-cache git bash
 
 RUN git clone https://github.com/apache/systemml.git
 
@@ -56,7 +53,7 @@ ENV PATH $SYSTEMDS_ROOT/bin:$PATH
 
 # Remove extra files.
 RUN rm -r src/ && \
-    rm -r .git
+	rm -r .git
 
 COPY docker/mountFolder/main.dml /input/main.dml
 
diff --git a/docker/testsysds.Dockerfile b/docker/testsysds.Dockerfile
index acaffcc..1e33ceb 100644
--- a/docker/testsysds.Dockerfile
+++ b/docker/testsysds.Dockerfile
@@ -32,13 +32,13 @@ ENV MAVEN_HOME /usr/lib/mvn
 ENV PATH $MAVEN_HOME/bin:$PATH
 
 RUN wget http://archive.apache.org/dist/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz && \
-  tar -zxvf apache-maven-$MAVEN_VERSION-bin.tar.gz && \
-  rm apache-maven-$MAVEN_VERSION-bin.tar.gz && \
-  mv apache-maven-$MAVEN_VERSION /usr/lib/mvn
+	tar -zxvf apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	rm apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	mv apache-maven-$MAVEN_VERSION /usr/lib/mvn
 
 # Install Extras
 RUN apt-get update -qq && \
-    apt-get upgrade -y && \
+	apt-get upgrade -y && \
 	apt-get install openjdk-8-jdk-headless -y
 
 COPY ./src/test/scripts/installDependencies.R installDependencies.R
@@ -48,4 +48,4 @@ RUN Rscript installDependencies.R
 
 COPY ./docker/entrypoint.sh /entrypoint.sh
 
-ENTRYPOINT ["/entrypoint.sh"]
\ No newline at end of file
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/docs/README.md b/docs/README.md
index 3488ed0..4cc68d8 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -20,17 +20,20 @@ limitations under the License.
 # SystemDS Documentation
 
 Various forms of documentation for SystemDS are available.
-In this directory you'll find 
-* a DML language reference
-* a description of builtin functions (WIP)
-* coding style settings for Eclipse (compatible with various other IDEs)
-* an enumerated list of open and completed tasks
+In this directory you'll find
 
-### Pointers to more documentation
-* A [hello world example](https://github.com/apache/systemml/blob/master/src/assembly/bin/README.md) (shipped with the binary distribution) to get you started on how to run SystemDS
-* An extended introductory [example](https://github.com/apache/systemml/blob/master/bin/README.md)
-* Instructions on how to build the [python bindings documentation](https://github.com/apache/systemml/blob/master/src/main/python/docs/README.md)
-* [Packaging](https://github.com/apache/systemml/blob/master/src/main/python/BUILD_INSTRUCTIONS.md)
- the python bindings yourself 
+* a [DML language reference](./dml-language-reference.md)
+* a description of [builtin functions (WIP)](./builtins-reference.md)
+* [coding style settings](./CodeStyle_eclipse.xml) for Eclipse (compatible with various other IDEs)
+  * More information inside our [CONTRIBUTING.md](/CONTRIBUTING.md)
+* an enumerated list of open and completed [tasks](./Tasks.txt)
+
+## Pointers to more documentation
+
+* A [hello world example](/src/assembly/bin/README.md) (shipped with the binary distribution) to get you started on how to run SystemDS
+* An extended introductory [example](/bin/README.md)
+* Instructions on how to build the [python bindings documentation](/src/main/python/docs/README.md)
+* [Packaging](/src/main/python/BUILD_INSTRUCTIONS.md)
+ the python bindings yourself
 * The generated javadoc output will be available from the [releases page](https://github.com/apache/systemml/releases)
   
\ No newline at end of file
diff --git a/docs/Tasks.txt b/docs/Tasks.txt
index 7a61c05..6e6118c 100644
--- a/docs/Tasks.txt
+++ b/docs/Tasks.txt
@@ -234,6 +234,7 @@ SYSTEMDS-310 Python Bindings
  * 312 Python 3.6 compatibility                                       OK
  * 313 Python Documentation upload via Github Actions                 OK
  * 314 Python SystemDS context manager                                OK
+ * 315 Python Federated Matrices Tests                                OK
 
 SYSTEMDS-320 Merge SystemDS into Apache SystemML                      OK
  * 321 Merge histories of SystemDS and SystemML                       OK
diff --git a/src/assembly/bin/README.md b/src/assembly/bin/README.md
index 3ad5b6c..a0688fe 100644
--- a/src/assembly/bin/README.md
+++ b/src/assembly/bin/README.md
@@ -30,7 +30,7 @@ limitations under the License.
 Requirements for running SystemDS are a bash shell and OpenJDK 8 or a Spark 2 cluster installation (to run distributed jobs). 
 These requirements should be available via standard system packages in all major Linux distributions 
 (make sure to have the right JDK version enabled, if you have multiple versions in your system).
-For Windows, a bash comes with [git for windows](http://git-scm.com) and OpenJDK builds can be optained at http://adoptopenjdk.net
+For Windows, a bash comes with [git for windows](http://git-scm.com) and OpenJDK builds can be obtained at http://adoptopenjdk.net
 (tested version [jdk8u232-b09](https://adoptopenjdk.net/archive.html))  
 
 To start out with an example after having installed the requirements mentioned above, create a text file  
@@ -74,8 +74,8 @@ $ SYSTEMDS_ROOT=../../code/my-systemds/source  ./systemds.sh hello.dml -args 10
 #### Running a script distributed on a Spark cluster 
 For running on a Spark cluster, the env variable SYSDS_DISTRIBUTED needs to be set (to something other than 0).
 Per default, SystemDS will run in hybrid mode, pushing some instructions to the cluster and running others locally.
-To force cluster mode in this little test, we will increase the matrix size to give the woker nodes in the cluster 
-something to do and force SystemDS to only generate Spark instructions by adding -exec spark to the command line 
+To force cluster mode in this little test, we will increase the matrix size to give the worker nodes in the cluster
+something to do and force SystemDS to only generate Spark instructions by adding -exec spark to the command line
 parameters:
 ```shell script
 $ SYSDS_DISTRIBUTED=1 ./systemds.sh hello.dml -args 10000 10000 1.0 -exec spark
diff --git a/src/main/python/docs/source/federated.rst b/src/main/python/docs/source/federated.rst
new file mode 100644
index 0000000..b2de7bb
--- /dev/null
+++ b/src/main/python/docs/source/federated.rst
@@ -0,0 +1,126 @@
+.. -------------------------------------------------------------
+.. 
+.. Licensed to the Apache Software Foundation (ASF) under one
+.. or more contributor license agreements.  See the NOTICE file
+.. distributed with this work for additional information
+.. regarding copyright ownership.  The ASF licenses this file
+.. to you under the Apache License, Version 2.0 (the
+.. "License"); you may not use this file except in compliance
+.. with the License.  You may obtain a copy of the License at
+.. 
+..   http://www.apache.org/licenses/LICENSE-2.0
+.. 
+.. Unless required by applicable law or agreed to in writing,
+.. software distributed under the License is distributed on an
+.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+.. KIND, either express or implied.  See the License for the
+.. specific language governing permissions and limitations
+.. under the License.
+.. 
+.. ------------------------------------------------------------
+
+Federated SystemDS
+==================
+
+The python SystemDS supports federated execution.
+To enable this, each of the federated environments have to have 
+a running federated worker.
+
+Start Federated worker
+----------------------
+
+To start a federated worker, you first have to setup your environment variables.
+A simple guide to do this is in the SystemDS Repository_.
+
+.. _Repository: https://github.com/apache/systemml/tree/master/bin/
+
+If that is setup correctly simply start a worker using the following command.
+Here the ``8001`` refer to the port used by the worker.
+
+  systemds.sh WORKER 8001
+
+Simple Aggregation Example
+--------------------------
+
+In this example we use a single federated worker, and aggregate the sum of its data.
+
+First we need to create some data for our federated worker to use.
+In this example we simply use Numpy to create a ``test.csv`` file::
+
+  # Import numpy
+  import numpy as np
+  a = np.asarray([[1,2,3],[4,5,6],[7,8,9]])
+  np.savetxt("temp/test.csv", a, delimiter=",")
+
+Currently we also require a metadata file for the federated worker.
+This should be located next to the ``test.csv`` file called ``test.csv.mtd``.
+To make this simply execute the following::
+
+  echo '{ "format":"csv", "header":false, "rows":3, "cols":3 }' > temp/test.csv.mtd
+
+After creating our data we the federated worker becomes able to execute federated instructions.
+The aggregated sum using federated instructions in python SystemDS is done as follows::
+
+  # Import numpy and SystemDS federated
+  import numpy as np
+  from systemds.matrix import federated
+  from systemds.context import SystemDSContext
+  # Create a federated matrix
+  ## Indicate the dimensions of the data:
+  ### Here the first list in the tuple is the top left Coordinate, 
+  ### and the second the bottom left coordinate.
+  ### It is ordered as [col,row].
+  dims = ([0,0],[3,3])
+  ## Specify the address + file path from worker:
+  address = "localhost:8001/temp/test.csv"
+  with SystemDSContext() as sds:
+  	fed_a = federated(sds,[address],[dims])
+  	# Sum the federated matrix and call compute to execute
+  	print(fed_a.sum().compute())
+  	# Result should be 45.
+
+Multiple Federated Environments 
+-------------------------------
+
+In this example we multiply matrices that are located in different federated environments.
+
+Using the data created from the last example we can simulate
+multiple federated workers by starting multiple ones on different ports.
+I recommend to start 3 different terminals, and run one federated environment in each.
+
+| systemds.sh WORKER 8001
+| systemds.sh WORKER 8002
+| systemds.sh WORKER 8003
+
+Once all three workers are up and running we can leverage all three in the following example::
+
+  # Import numpy and SystemDS federated
+  import numpy as np
+  from systemds.matrix import federated
+  from systemds.context import SystemDSContext
+  # Create a federated matrix using two federated environments
+  # Note that the two federated matrices are stacked on top of each other
+  with SystemDSContext() as sds:
+  	fed_a = federated(sds,[
+		  "localhost:8001/temp/test.csv",
+		  "localhost:8002/temp/test.csv"
+		  ],[([0,0],[3,3]),([0,3],[3,6])])
+  	# Create another federated matrix using the first environment again, and the third.
+  	fed_b = federated(sds,[
+		  "localhost:8001/temp/test.csv",
+		  "localhost:8003/temp/test.csv"
+		  ],[([0,0],[3,3]),([0,3],[3,6])])
+  	# Multiply, compute and print.
+  	res = (fed_a * fed_b).compute()
+
+  print(res)
+
+The print should look like::
+
+  [[ 1.  4.  9.  1.  4.  9.]
+   [16. 25. 36. 16. 25. 36.]
+   [49. 64. 81. 49. 64. 81.]]
+
+
+:Author: Sebastian Baunsgaard
+:Version: 1.0 of 2020/03/26
diff --git a/src/main/python/docs/source/index.rst b/src/main/python/docs/source/index.rst
index 34ae76c..cdcb0a2 100644
--- a/src/main/python/docs/source/index.rst
+++ b/src/main/python/docs/source/index.rst
@@ -49,6 +49,13 @@ tensors (multi-dimensional arrays) whose first dimension may have a heterogeneou
 .. toctree::
    :maxdepth: 1
    :hidden:
+   :caption: Guides
+
+   federated.rst
+
+.. toctree::
+   :maxdepth: 1
+   :hidden:
    :caption: Central Classes
 
    matrix.rst
diff --git a/src/main/python/systemds/matrix/matrix.py b/src/main/python/systemds/matrix/matrix.py
index 1509dbe..d4cb9b1 100644
--- a/src/main/python/systemds/matrix/matrix.py
+++ b/src/main/python/systemds/matrix/matrix.py
@@ -104,7 +104,8 @@ def federated(sds_context: 'SystemDSContext', addresses: Iterable[str],
     addresses_str = 'list(' + ','.join(map(lambda s: f'"{s}"', addresses)) + ')'
     ranges_str = 'list('
     for begin, end in ranges:
-        ranges_str += f'list({",".join(map(str, begin))}), list({",".join(map(str, end))})'
+        ranges_str += f'list({",".join(map(str, begin))}), list({",".join(map(str, end))}),'
+    ranges_str = ranges_str[:-1]
     ranges_str += ')'
     named_params = {'addresses': addresses_str, 'ranges': ranges_str}
     named_params.update(kwargs)
diff --git a/src/main/python/tests/federated/runFedTest.sh b/src/main/python/tests/federated/runFedTest.sh
new file mode 100755
index 0000000..33f82b5
--- /dev/null
+++ b/src/main/python/tests/federated/runFedTest.sh
@@ -0,0 +1,67 @@
+#/bin/bash
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+if [ "$#" -ne 1 ]; then
+	echo "Usage:   "$0" <federatedTest>"
+	echo "Example: "$0" ./tests/federated/test_federated_basic.py"
+	exit
+fi
+
+# FIELDS
+workerdir="tests/federated/worker/"
+outputdir="tests/federated/output/"
+mkdir $workerdir
+mkdir $outputdir
+w1_Output="$workerdir/w1"
+w2_Output="$workerdir/w2"
+log="$outputdir/out.log"
+
+# Make the workers start quietly and pipe their output to a file to print later
+export SYSDS_QUIET=1
+systemds.sh WORKER 8001 >$w1_Output 2>&1 &
+Fed1=$!
+systemds.sh WORKER 8002 >$w2_Output 2>&1 &
+Fed2=$!
+echo "Starting workers" && sleep 3 && echo "Starting tests"
+
+# Run test
+python $1 >$log 2>&1
+pkill -P $Fed1
+pkill -P $Fed2
+
+# Print output
+echo -e "\n---------------\nWorkers Output:\n---------------"
+echo -e "\nWorker 1:"
+cat $w1_Output
+echo -e "\nWorker 2:"
+cat $w2_Output
+rm -r $workerdir
+echo -e "\n------------\nTest output:\n------------"
+cat $log
+grepvals="$(tail -n 10 $log | grep OK)"
+rm -r $outputdir
+echo -e "------------\n"
+if [[ $grepvals == *"OK"* ]]; then
+	exit 0
+else
+	exit 1
+fi
diff --git a/src/main/python/tests/federated/test_federated_aggregations.py b/src/main/python/tests/federated/test_federated_aggregations.py
new file mode 100644
index 0000000..8d95ebb
--- /dev/null
+++ b/src/main/python/tests/federated/test_federated_aggregations.py
@@ -0,0 +1,236 @@
+# -------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# -------------------------------------------------------------
+
+# Make the `systemds` package importable
+import os
+import sys
+import warnings
+import unittest
+import json
+import io
+import os
+import shutil
+import numpy as np
+
+path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../")
+sys.path.insert(0, path)
+
+from systemds.context import SystemDSContext
+from systemds.matrix import federated
+
+dim = 5
+np.random.seed(132)
+m1 = np.array(np.random.randint(100, size=dim * dim) + 1.01, dtype=np.double)
+m1.shape = (dim, dim)
+m2 = np.array(np.random.randint(5, size=dim * dim) + 1, dtype=np.double)
+m2.shape = (dim, dim)
+
+tempdir = "/tmp/test_federated_aggregations/"
+mtd = {"format": "csv", "header": "false", "rows": dim, "cols": dim}
+
+# Create the testing directory if it does not exist.
+if not os.path.exists(tempdir):
+    os.makedirs(tempdir)
+
+# Save data files for the federated workers.
+np.savetxt(tempdir + "m1.csv", m1, delimiter=",")
+with io.open(tempdir + "m1.csv.mtd", "w", encoding="utf-8") as f:
+    f.write(json.dumps(mtd, ensure_ascii=False))
+
+np.savetxt(tempdir + "m2.csv", m2, delimiter=",")
+with io.open(tempdir + "m2.csv.mtd", "w", encoding="utf-8") as f:
+    f.write(json.dumps(mtd, ensure_ascii=False))
+
+# Federated workers + file locations
+fed1 = "localhost:8001/" + tempdir + "m1.csv"
+fed2 = "localhost:8002/" + tempdir + "m2.csv"
+
+sds = SystemDSContext()
+
+class TestFederatedAggFn(unittest.TestCase):
+    def setUp(self):
+        warnings.filterwarnings(
+            action="ignore", message="unclosed", category=ResourceWarning
+        )
+
+    def tearDown(self):
+        warnings.filterwarnings(
+            action="ignore", message="unclosed", category=ResourceWarning
+        )
+
+    def test_sum3(self):
+        #   [[m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]]
+        f_m_a = (
+            federated(sds,[fed1, fed2], [([0, 0], [dim, dim]), ([0, dim], [dim, dim * 2])])
+            .sum()
+            .compute()
+        )
+        m1_m2 = m1.sum() + m2.sum()
+        self.assertAlmostEqual(f_m_a, m1_m2)
+
+    def test_sum1(self):
+        f_m1 = federated(sds,[fed1], [([0, 0], [dim, dim])]).sum().compute()
+        m1_r = m1.sum()
+        self.assertAlmostEqual(f_m1, m1_r)
+
+    def test_sum2(self):
+        f_m2 = federated(sds,[fed2], [([0, 0], [dim, dim])]).sum().compute()
+        m2_r = m2.sum()
+        self.assertAlmostEqual(f_m2, m2_r)
+
+    def test_sum3(self):
+        #   [[m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]]
+        f_m1_m2 = (
+            federated(sds,[fed1, fed2], [([0, 0], [dim, dim]), ([0, dim], [dim, dim * 2])])
+            .sum()
+            .compute()
+        )
+
+        m1_m2 = np.concatenate((m1, m2), axis=1).sum()
+
+        self.assertAlmostEqual(f_m1_m2, m1_m2)
+
+    def test_sum4(self):
+        #   [[m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]]
+        f_m1_m2 = (
+            federated(sds,[fed1, fed2], [([0, 0], [dim, dim]), ([dim, 0], [dim * 2, dim])])
+            .sum()
+            .compute()
+        )
+        m1_m2 = np.concatenate((m1, m2)).sum()
+        self.assertAlmostEqual(f_m1_m2, m1_m2)
+
+    # -----------------------------------
+    # The rest of the tests are 
+    # Extended functionality not working Yet
+    # -----------------------------------
+
+    # def test_sum5(self):
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0, 0, 0,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0, 0, 0,m2,m2,m2,m2,m2]]
+    #     f_m_a = (
+    #         federated(sds,
+    #             [fed1, fed2], [([0, 0], [dim, dim]), ([2, dim], [dim + 2, dim * 2])]
+    #         )
+    #         .sum()
+    #         .compute()
+    #     )
+    #     m1_m2 = m1.sum() + m2.sum()
+    #     self.assertAlmostEqual(f_m_a, m1_m2)
+
+    # def test_sum6(self):
+    #     # Note it overwrites the value in the field. not sum or anything else.
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]]
+    #     f_m_a = (
+    #         federated(sds,
+    #             [fed1, fed2], [([0, 0], [dim, dim]), ([2, 3], [dim + 2, dim + 3])]
+    #         )
+    #         .sum()
+    #         .compute()
+    #     )
+    #     m1_m2 = m1.sum() + m2.sum()
+
+    #     m1_m2 = m1_m2 - m1[3:5, 2:5].sum()
+    #     self.assertAlmostEqual(f_m_a, m1_m2)
+
+    # def test_sum7(self):
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]    +     1
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]]
+    #     f_m_a = federated(sds,
+    #         [fed1, fed2], [([0, 0], [dim, dim]), ([2, 3], [dim + 2, dim + 3])]
+    #     )
+    #     f_m_a = (f_m_a + 1).sum().compute()
+
+    #     m1_m2 = m1.sum() + m2.sum()
+    #     m1_m2 = m1_m2 - m1[3:5, 2:5].sum()
+    #     m1_m2 = m1_m2 + (7 * 8)
+
+    #     self.assertAlmostEqual(f_m_a, m1_m2)
+
+    # def test_sum8(self):
+    #     #   [[ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]]
+    #     f_m_a = federated(sds,[fed1], [([2, 3], [dim + 2, dim + 3])])
+    #     f_m_a = f_m_a.sum().compute()
+
+    #     m = m1.sum()
+
+    #     self.assertAlmostEqual(f_m_a, m)
+
+    # def test_sum9(self):
+    #     #   [[ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]    +     1
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]]
+    #     f_m_a = federated(sds,[fed1], [([2, 3], [dim + 2, dim + 3])])
+    #     f_m_a = (f_m_a + 1).sum().compute()
+
+    #     m = m1.sum()
+    #     m = m + (7 * 8)
+
+    #     self.assertAlmostEqual(f_m_a, m)
+
+
+if __name__ == "__main__":
+    unittest.main(exit=False)
+    sds.close()
+    shutil.rmtree(tempdir)
diff --git a/src/main/python/tests/federated/test_federated_basic.py b/src/main/python/tests/federated/test_federated_basic.py
new file mode 100644
index 0000000..0418c8a
--- /dev/null
+++ b/src/main/python/tests/federated/test_federated_basic.py
@@ -0,0 +1,281 @@
+# -------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# -------------------------------------------------------------
+
+# Make the `systemds` package importable
+import os
+import sys
+import warnings
+import unittest
+import json
+import io
+import os
+import shutil
+import numpy as np
+
+path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../")
+sys.path.insert(0, path)
+
+from systemds.context import SystemDSContext
+from systemds.matrix import federated
+
+dim = 5
+np.random.seed(132)
+m1 = np.array(np.random.randint(100, size=dim * dim) + 1.01, dtype=np.double)
+m1.shape = (dim, dim)
+m2 = np.array(np.random.randint(5, size=dim * dim) + 1, dtype=np.double)
+m2.shape = (dim, dim)
+
+tempdir = "/tmp/test_federated_basic/"
+mtd = {"format": "csv", "header": "false", "rows": dim, "cols": dim}
+
+# Create the testing directory if it does not exist.
+if not os.path.exists(tempdir):
+    os.makedirs(tempdir)
+
+# Save data files for the federated workers.
+np.savetxt(tempdir + "m1.csv", m1, delimiter=",")
+with io.open(tempdir + "m1.csv.mtd", "w", encoding="utf-8") as f:
+    f.write(json.dumps(mtd, ensure_ascii=False))
+
+np.savetxt(tempdir + "m2.csv", m2, delimiter=",")
+with io.open(tempdir + "m2.csv.mtd", "w", encoding="utf-8") as f:
+    f.write(json.dumps(mtd, ensure_ascii=False))
+
+# Federated workers + file locations
+fed1 = "localhost:8001/" + tempdir + "m1.csv"
+fed2 = "localhost:8002/" + tempdir + "m2.csv"
+
+sds = SystemDSContext()
+
+class TestFederatedAggFn(unittest.TestCase):
+    def setUp(self):
+        warnings.filterwarnings(
+            action="ignore", message="unclosed", category=ResourceWarning
+        )
+
+    def tearDown(self):
+        warnings.filterwarnings(
+            action="ignore", message="unclosed", category=ResourceWarning
+        )
+
+    def test_1(self):
+        f_m1 = federated(sds,[fed1], [([0, 0], [dim, dim])]).compute()
+        res = np.allclose(f_m1, m1)
+        self.assertTrue(res)
+
+    def test_2(self):
+        f_m2 = federated(sds,[fed2], [([0, 0], [dim, dim])]).compute()
+        res = np.allclose(f_m2, m2)
+        self.assertTrue(res)
+
+    def test_3(self):
+        #   [[m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+        #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]]
+        f_m1_m2 = federated(sds,
+            [fed1, fed2], [([0, 0], [dim, dim]), ([0, dim], [dim, dim * 2])]
+        ).compute()
+        m1_m2 = np.concatenate((m1, m2), axis=1)
+        res = np.allclose(f_m1_m2, m1_m2)
+        self.assertTrue(res)
+
+    def test_4(self):
+        #   [[m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m1,m1,m1,m1,m1]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]
+        #    [m2,m2,m2,m2,m2]]
+        f_m1_m2 = federated(sds,
+            [fed1, fed2], [([0, 0], [dim, dim]), ([dim, 0], [dim * 2, dim])]
+        ).compute()
+        m1_m2 = np.concatenate((m1, m2))
+        res = np.allclose(f_m1_m2, m1_m2)
+        self.assertTrue(res)
+
+    # -----------------------------------
+    # The rest of the tests are 
+    # Extended functionality not working Yet
+    # -----------------------------------
+
+    # def test_5(self):
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0, 0, 0,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0, 0, 0,m2,m2,m2,m2,m2]]
+    #     f_m1_m2 = federated(sds,
+    #         [fed1, fed2], [([0, 0], [dim, dim]), ([2, dim], [dim + 2, dim * 2])]
+    #     ).compute()
+
+    #     m1_p = np.concatenate((m1, np.zeros((2, dim))))
+    #     m2_p = np.concatenate((np.zeros((2, dim)), m2))
+    #     m1_m2 = np.concatenate((m1_p, m2_p), axis=1)
+    #     res = np.allclose(f_m1_m2, m1_m2)
+    #     self.assertTrue(res)
+
+    # def test_6(self):
+    #     # Note it overwrites the value in the field. not sum or anything else.
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]]
+    #     f_m1_m2 = federated(sds,
+    #         [fed1, fed2], [([0, 0], [dim, dim]), ([2, 3], [dim + 2, dim + 3])]
+    #     ).compute()
+
+    #     m1_m2 = np.zeros((dim + 2, dim + 3))
+    #     m1_m2[0:dim, 0:dim] = m1
+    #     m1_m2[2 : dim + 2, 3 : dim + 3] = m2
+
+    #     res = np.allclose(f_m1_m2, m1_m2)
+    #     self.assertTrue(res)
+
+    # def test_7(self):
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]    +     1
+    #     #    [m1,m1,m1,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]
+    #     #    [ 0, 0, 0,m2,m2,m2,m2,m2]]
+    #     f_m1_m2 = federated(sds,
+    #         [fed1, fed2], [([0, 0], [dim, dim]), ([2, 3], [dim + 2, dim + 3])]
+    #     )
+    #     f_m1_m2 = (f_m1_m2 + 1).compute()
+    #     m1_m2 = np.zeros((dim + 2, dim + 3))
+    #     m1_m2[0:dim, 0:dim] = m1
+    #     m1_m2[2 : dim + 2, 3 : dim + 3] = m2
+    #     m1_m2 += 1
+    #     res = np.allclose(f_m1_m2, m1_m2)
+    #     if not res:
+    #         print("Federated:")
+    #         print(f_m1_m2)
+    #         print("numpy:")
+    #         print(m1_m2)
+    #     self.assertTrue(res)
+
+    # def test_8(self):
+    #     #   [[ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]]
+    #     f_m1_m2 = federated(sds,[fed1], [([2, 3], [dim + 2, dim + 3])])
+    #     f_m1_m2 = (f_m1_m2).compute()
+    #     m1_m2 = np.zeros((dim + 2, dim + 3))
+    #     m1_m2[2 : dim + 2, 3 : dim + 3] = m1
+    #     res = np.allclose(f_m1_m2, m1_m2)
+    #     if not res:
+    #         print("Federated:")
+    #         print(f_m1_m2)
+    #         print("numpy:")
+    #         print(m1_m2)
+    #     self.assertTrue(res)
+
+    # def test_9(self):
+    #     #   [[ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]    +     1
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]
+    #     #    [ 0, 0, 0,m1,m1,m1,m1,m1]]
+    #     f_m1_m2 = federated(sds,[fed1], [([2, 3], [dim + 2, dim + 3])])
+    #     f_m1_m2 = (f_m1_m2 + 1).compute()
+
+    #     m1_m2 = np.zeros((dim + 2, dim + 3))
+    #     m1_m2[2 : dim + 2, 3 : dim + 3] = m1
+
+    #     m1_m2 += 1
+    #     res = np.allclose(f_m1_m2, m1_m2)
+
+    #     if not res:
+    #         print("Federated:")
+    #         print(f_m1_m2)
+    #         print("numpy:")
+    #         print(m1_m2)
+    #     self.assertTrue(res)
+
+    # def test_10(self):
+    #     #   [[m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [m1,m1,m1,m1,m1, 0, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]]
+    #     f_m1_m2 = federated(sds,[fed1], [([0, 0], [dim + 2, dim + 3])])
+    #     f_m1_m2 = (f_m1_m2).compute()
+
+    #     m1_m2 = np.zeros((dim + 2, dim + 3))
+    #     m1_m2[0:dim, 0:dim] = m1
+
+    #     res = np.allclose(f_m1_m2, m1_m2)
+
+    #     if not res:
+    #         print("Federated:")
+    #         print(f_m1_m2)
+    #         print("numpy:")
+    #         print(m1_m2)
+    #     self.assertTrue(res)
+
+    # def test_11(self):
+    #     #   [[ 0, 0, 0, 0, 0, 0, 0, 0]
+    #     #    [ 0,m1,m1,m1,m1,m1, 0, 0]
+    #     #    [ 0,m1,m1,m1,m1,m1, 0, 0]
+    #     #    [ 0,m1,m1,m1,m1,m1, 0, 0]
+    #     #    [ 0,m1,m1,m1,m1,m1, 0, 0]
+    #     #    [ 0,m1,m1,m1,m1,m1, 0, 0]
+    #     #    [ 0, 0, 0, 0, 0, 0, 0, 0]]
+    #     f_m1_m2 = federated(sds,[fed1], [([1, 1], [dim + 2, dim + 3])])
+    #     f_m1_m2 = (f_m1_m2).compute()
+
+    #     m1_m2 = np.zeros((dim + 2, dim + 3))
+    #     m1_m2[1 : dim + 1, 1 : dim + 1] = m1
+
+    #     res = np.allclose(f_m1_m2, m1_m2)
+
+    #     if not res:
+    #         print("Federated:")
+    #         print(f_m1_m2)
+    #         print("numpy:")
+    #         print(m1_m2)
+    #     self.assertTrue(res)
+
+
+if __name__ == "__main__":
+    unittest.main(exit=False)
+    sds.close()
+    shutil.rmtree(tempdir)