You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by sz...@apache.org on 2021/03/24 11:51:46 UTC

[nifi-minifi-cpp] branch main updated: MINIFICPP-1456 Introduce PutAzureBlobStorage processor

This is an automated email from the ASF dual-hosted git repository.

szaszm pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/nifi-minifi-cpp.git


The following commit(s) were added to refs/heads/main by this push:
     new ba6cacb  MINIFICPP-1456 Introduce PutAzureBlobStorage processor
ba6cacb is described below

commit ba6cacb3925ed3d4aa794904e8387b4329740fc4
Author: Gabor Gyimesi <ga...@gmail.com>
AuthorDate: Wed Mar 24 12:49:50 2021 +0100

    MINIFICPP-1456 Introduce PutAzureBlobStorage processor
    
    Closes #979
    
    Signed-off-by: Marton Szasz <sz...@gmail.com>
---
 .github/workflows/ci.yml                           |  12 +-
 CMakeLists.txt                                     |  22 +-
 CONTROLLERS.md                                     |  22 ++
 LICENSE                                            | 136 ++++++----
 NOTICE                                             |   4 +
 PROCESSORS.md                                      |  30 +++
 README.md                                          |   2 +
 Windows.md                                         |   1 +
 bootstrap.sh                                       |   2 +
 bstrp_functions.sh                                 |   4 +-
 cmake/BundledAzureSdkCpp.cmake                     | 129 ++++++++++
 cmake/DockerConfig.cmake                           |   1 +
 .../dummy/FindCURL.cmake => NlohmannJson.cmake}    |  27 +-
 cmake/curl/dummy/FindCURL.cmake                    |  10 +-
 .../dummy/Findnlohmann_json.cmake}                 |  19 +-
 docker/DockerBuild.sh                              |   2 +
 docker/DockerVerify.sh                             |   3 +-
 docker/Dockerfile                                  |   3 +-
 .../integration/MiNiFi_integration_test_driver.py  |  16 +-
 .../integration/features/azure_storage.feature     |  22 ++
 docker/test/integration/features/s3.feature        |   2 +-
 .../integration/minifi/core/DockerTestCluster.py   |  74 +++---
 .../minifi/core/SingleNodeDockerCluster.py         |  16 +-
 .../minifi/processors/PutAzureBlobStorage.py       |  13 +
 docker/test/integration/steps/steps.py             |  20 ++
 extensions/aws/processors/PutS3Object.h            |   3 +-
 extensions/aws/processors/S3Processor.cpp          |   2 +
 extensions/aws/s3/S3Wrapper.cpp                    |   7 +-
 extensions/azure/AzureLoader.cpp                   |  28 +++
 extensions/azure/AzureLoader.h                     |  69 ++++++
 extensions/azure/CMakeLists.txt                    |  40 +++
 .../AzureStorageCredentialsService.cpp             |  70 ++++++
 .../AzureStorageCredentialsService.h               |  85 +++++++
 .../azure/processors/PutAzureBlobStorage.cpp       | 265 ++++++++++++++++++++
 extensions/azure/processors/PutAzureBlobStorage.h  | 136 ++++++++++
 extensions/azure/storage/AzureBlobStorage.cpp      |  87 +++++++
 extensions/azure/storage/AzureBlobStorage.h        |  55 ++++
 extensions/azure/storage/AzureStorageCredentials.h |  73 ++++++
 extensions/azure/storage/BlobStorage.h             |  64 +++++
 extensions/mqtt/processors/PublishMQTT.h           |   3 +-
 libminifi/test/azure-tests/CMakeLists.txt          |  39 +++
 .../test/azure-tests/PutAzureBlobStorageTests.cpp  | 276 +++++++++++++++++++++
 msi/LICENSE.txt                                    | 136 ++++++----
 .../azure-sdk-for-cpp-old-compiler.patch           |  42 ++++
 win_build_vs.bat                                   |   4 +-
 45 files changed, 1910 insertions(+), 166 deletions(-)

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 89ab136..845aef0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -30,7 +30,7 @@ jobs:
         run: |
           export PATH="/usr/local/opt/lua@5.3/lib:/usr/local/opt/lua@5.3/include:/usr/local/opt/lua@5.3/bin:$PATH"
           export PKG_CONFIG_PATH="/usr/local/opt/lua@5.3/lib/pkgconfig"
-          ./bootstrap.sh -e -t && cd build  && cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_LUA_SCRIPTING=1 -DENABLE_AWS=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_RULE_MESSAGES=OFF -DSTRICT_GSL_CHECKS=AUDIT -DFAIL_ON_WARNINGS=ON .. && cmake --build . --parallel 4 && make test ARGS="--timeout 300 -j4 --output-on-failure" && make linter
+          ./bootstrap.sh -e -t && cd build  && cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_LUA_SCRIPTING=1 -DENABLE_AWS=ON -DENABLE_AZURE=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_RULE_MESSAGES=OFF -DSTRICT_GSL_CHECKS=AUDIT -DFAIL_ON_WARNINGS=ON .. && cmake --build . --parallel 4 && make test ARGS="--timeout 300 -j4 --output-on-failure" && make linter
   macos_xcode_12_0:
     name: "macos-xcode12.0"
     runs-on: macos-10.15
@@ -60,11 +60,11 @@ jobs:
         run: |
           export PATH="/usr/local/opt/lua@5.3/lib:/usr/local/opt/lua@5.3/include:/usr/local/opt/lua@5.3/bin:$PATH"
           export PKG_CONFIG_PATH="/usr/local/opt/lua@5.3/lib/pkgconfig"
-          ./bootstrap.sh -e -t && cd build  && cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_LUA_SCRIPTING=1 -DENABLE_AWS=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_RULE_MESSAGES=OFF -DSTRICT_GSL_CHECKS=AUDIT -DFAIL_ON_WARNINGS=ON .. && cmake --build . --parallel 4 && make test ARGS="--timeout 300 -j4 --output-on-failure" && make linter
+          ./bootstrap.sh -e -t && cd build  && cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_LUA_SCRIPTING=1 -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_RULE_MESSAGES=OFF -DSTRICT_GSL_CHECKS=AUDIT -DFAIL_ON_WARNINGS=ON .. && cmake --build . --parallel 4 && make test ARGS="--timeout 300 -j4 --output-on-failure" && make linter
   windows_VS2017:
     name: "windows-vs2017"
     runs-on: windows-2016
-    timeout-minutes: 90
+    timeout-minutes: 120
     steps:
       - id: checkout
         uses: actions/checkout@v2
@@ -74,7 +74,7 @@ jobs:
         run: |
           PATH %PATH%;C:\Program Files (x86)\Windows Kits\10\bin\10.0.17763.0\x86
           PATH %PATH%;C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin\Roslyn
-          win_build_vs.bat build /CI /S /A
+          win_build_vs.bat build /CI /S /A /Z
         shell: cmd
   windows_VS2019:
     name: "windows-vs2019"
@@ -204,7 +204,7 @@ jobs:
           sudo apt install -y ccache openjdk-8-jdk maven libusb-1.0-0-dev libpng12-dev libgps-dev
           echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV
       - id: build
-        run: sudo mount tmpfs -t tmpfs /tmp && ./bootstrap.sh -e -t && cd build  && cmake -DUSE_SHARED_LIBS= -DENABLE_OPENWSMAN=ON -DENABLE_OPENCV=ON -DENABLE_MQTT=ON -DENABLE_GPS=ON -DENABLE_USB_CAMERA=ON -DENABLE_LIBRDKAFKA=ON -DENABLE_OPC=ON -DENABLE_SFTP=ON -DENABLE_MQTT=ON -DENABLE_COAP=ON -DENABLE_PYTHON=ON -DENABLE_SQL=ON -DENABLE_AWS=ON -DSTRICT_GSL_CHECKS=AUDIT -DFAIL_ON_WARNINGS=ON .. &&  cmake --build . --parallel 4  && make test ARGS="--timeout 300 -j8 --output-on-failure"
+        run: sudo mount tmpfs -t tmpfs /tmp && ./bootstrap.sh -e -t && cd build  && cmake -DUSE_SHARED_LIBS= -DENABLE_OPENWSMAN=ON -DENABLE_OPENCV=ON -DENABLE_MQTT=ON -DENABLE_GPS=ON -DENABLE_USB_CAMERA=ON -DENABLE_LIBRDKAFKA=ON -DENABLE_OPC=ON -DENABLE_SFTP=ON -DENABLE_MQTT=ON -DENABLE_COAP=ON -DENABLE_PYTHON=ON -DENABLE_SQL=ON -DENABLE_AWS=ON -DENABLE_AZURE=ON -DSTRICT_GSL_CHECKS=AUDIT -DFAIL_ON_WARNINGS=ON .. &&  cmake --build . --parallel 4  && make test ARGS="--timeout 300 -j8 --out [...]
   debian:
     name: "debian"
     runs-on: ubuntu-18.04
@@ -323,7 +323,7 @@ jobs:
       - id: checkout
         uses: actions/checkout@v2
       - id: build
-        run: ./bootstrap.sh -e -t && cd build  && cmake -DUSE_SHARED_LIBS= -DSTRICT_GSL_CHECKS=AUDIT -DENABLE_JNI=OFF -DDISABLE_JEMALLOC=ON -DENABLE_AWS=ON -DENABLE_LIBRDKAFKA=ON .. && make docker
+        run: ./bootstrap.sh -e -t && cd build  && cmake -DUSE_SHARED_LIBS= -DSTRICT_GSL_CHECKS=AUDIT -DENABLE_JNI=OFF -DDISABLE_JEMALLOC=ON -DENABLE_AWS=ON -DENABLE_LIBRDKAFKA=ON -DENABLE_AZURE=ON .. && make docker
       - id: install_deps
         run: |
           sudo apt update
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4cb6ae7..7cf84a6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -477,7 +477,6 @@ if(ENABLE_ALL OR ENABLE_SENSORS)
 	createExtension(SENSOR-EXTENSIONS "SENSOR EXTENSIONS" "Enables the package of sensor extensions." "extensions/sensors" "${TEST_DIR}/sensors-tests")
 endif()
 
-
 ## SQLite extensions
 option(ENABLE_SQLITE "Disables the scripting extensions." OFF)
 if (ENABLE_SQLITE)
@@ -507,7 +506,7 @@ endif()
 
 ## AWS Extentions
 option(ENABLE_AWS "Enables AWS support." OFF)
-if (ENABLE_AWS)
+if (ENABLE_ALL OR ENABLE_AWS)
 	include(BundledAwsSdkCpp)
 	use_bundled_libaws(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 	createExtension(AWS-EXTENSIONS "AWS EXTENSIONS" "This enables AWS support" "extensions/aws" "${TEST_DIR}/aws-tests")
@@ -519,7 +518,6 @@ if (ENABLE_OPENCV)
 	createExtension(OPENCV-EXTENSIONS "OPENCV EXTENSIONS" "This enabled OpenCV support" "extensions/opencv" "extensions/opencv/tests")
 endif()
 
-
 ## Bustache/template extensions
 option(ENABLE_BUSTACHE "Enables Bustache (ApplyTemplate) support." OFF)
 if (ENABLE_BUSTACHE)
@@ -550,19 +548,33 @@ if ((ENABLE_ALL OR ENABLE_SFTP) AND NOT DISABLE_CURL)
 	createExtension(SFTP "SFTP EXTENSIONS" "This enables SFTP support" "extensions/sftp" "extensions/sftp/tests")
 endif()
 
-## Openwsman Extesions
 option(ENABLE_OPENWSMAN "Enables the Openwsman extensions." OFF)
-if (ENABLE_OPENWSMAN AND NOT DISABLE_CIVET AND NOT DISABLE_CURL)
+option(ENABLE_AZURE "Enables Azure support." OFF)
+
+if ((ENABLE_OPENWSMAN AND NOT DISABLE_CIVET AND NOT DISABLE_CURL) OR ENABLE_ALL OR ENABLE_AZURE)
 	include(BundledLibXml2)
 	use_bundled_libxml2(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/libxml2/dummy")
+endif()
 
+## Openwsman Extesions
+if (ENABLE_OPENWSMAN AND NOT DISABLE_CIVET AND NOT DISABLE_CURL)
 	include(BundledOpenWSMAN)
 	use_bundled_openwsman(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 
 	createExtension(OPENWSMAN-EXTENSIONS "OPENWSMAN EXTENSIONS" "This enables Openwsman support" "extensions/openwsman")
 endif()
 
+## Azure Extentions
+if (ENABLE_ALL OR ENABLE_AZURE)
+	include(NlohmannJson)
+	list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/nlohmann_json/dummy")
+
+	include(BundledAzureSdkCpp)
+	use_bundled_libazure(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+	createExtension(AZURE-EXTENSIONS "AZURE EXTENSIONS" "This enables Azure support" "extensions/azure" "${TEST_DIR}/azure-tests")
+endif()
+
 ## NOW WE CAN ADD LIBRARIES AND EXTENSIONS TO MAIN
 add_subdirectory(main)
 
diff --git a/CONTROLLERS.md b/CONTROLLERS.md
index 8d48ed3..0896f76 100644
--- a/CONTROLLERS.md
+++ b/CONTROLLERS.md
@@ -17,6 +17,7 @@
 
 ## Table of Contents
 
+- [AzureStorageCredentialsService](#azureStorageCredentialsService)
 - [AWSCredentialsService](#awsCredentialsService)
 
 ## AWSCredentialsService
@@ -39,3 +40,24 @@ default values, and whether a property supports the NiFi Expression Language.
 |Access Key|||Yes|Specifies the AWS Access Key|
 |Secret Key|||Yes|Specifies the AWS Secret Key|
 |Credentials File|||No|Path to a file containing AWS access key and secret key in properties file format. Properties used: accessKey and secretKey|
+
+## AzureStorageCredentialsService
+
+### Description
+
+Manages the credentials for an Azure Storage account. This allows for multiple Azure Storage related processors to reference this single
+controller service so that Azure storage credentials can be managed and controlled in a central location.
+
+### Properties
+
+In the list below, the names of required properties appear in bold. Any other
+properties (not in bold) are considered optional. The table also indicates any
+default values, and whether a property supports the NiFi Expression Language.
+
+| Name | Default Value | Allowable Values | Expression Language Supported? | Description |
+| - | - | - | - | - |
+|Storage Account Name||||The storage account name.|
+|Storage Account Key||||The storage account key. This is an admin-like password providing access to every container in this account. It is recommended one uses Shared Access Signature (SAS) token instead for fine-grained control with policies.|
+|SAS Token||||Shared Access Signature token. Specify either SAS Token (recommended) or Account Key.|
+|Common Storage Account Endpoint Suffix||||Storage accounts in public Azure always use a common FQDN suffix. Override this endpoint suffix with a different suffix in certain circumstances (like Azure Stack or non-public Azure regions).|
+|Connection String||||Connection string used to connect to Azure Storage service. This overrides all other set credential properties.|
diff --git a/LICENSE b/LICENSE
index 0f30eab..e48765f 100644
--- a/LICENSE
+++ b/LICENSE
@@ -214,14 +214,14 @@ notices and license terms. Your use of the source code for the these
 subcomponents is subject to the terms and conditions of the following
 licenses.
 
-This product bundles 'cpplint.py' which is  available under a 3-Clause BSD License. 
+This product bundles 'cpplint.py' which is  available under a 3-Clause BSD License.
 
 	 Copyright (c) 2009 Google Inc. All rights reserved.
-	
+
 	 Redistribution and use in source and binary forms, with or without
 	 modification, are permitted provided that the following conditions are
 	 met:
-	
+
 	    * Redistributions of source code must retain the above copyright
 	 notice, this list of conditions and the following disclaimer.
 	    * Redistributions in binary form must reproduce the above
@@ -231,7 +231,7 @@ This product bundles 'cpplint.py' which is  available under a 3-Clause BSD Licen
 	    * Neither the name of Google Inc. nor the names of its
 	 contributors may be used to endorse or promote products derived from
 	 this software without specific prior written permission.
-	
+
 	 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 	 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 	 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -251,11 +251,11 @@ This product bundles 'spdlog' which is available under an MIT license.
   Copyright (c) 2019 ZVYAGIN.Alexander@.gmail.com
 
   Copyright(c) 2015 Ruslan Baratov.
-  
+
   Copyright (c) 2015-2019 Gabi Melman.
 
   Copyright (c) 2016-2019 spdlog contributors.
-	
+
   Permission is hereby granted, free of charge, to any person obtaining a copy
   of this software and associated documentation files (the "Software"), to deal
   in the Software without restriction, including without limitation the rights
@@ -277,7 +277,7 @@ This product bundles 'spdlog' which is available under an MIT license.
   -- NOTE: Third party dependency used by this software --
   This software depends on the fmt lib (MIT License),
   and users must comply to its license: https://github.com/fmtlib/fmt/blob/master/LICENSE.rst
-		
+
 This product bundles 'fmt' which is available under the following license.
 
   Copyright (c) 2012 - present, Victor Zverovich
@@ -653,7 +653,7 @@ For more information, please refer to <http://unlicense.org>
 This projects includes libarchive bundle (https://www.libarchive.org)
 which is available under a BSD License by Tim Kientzle and others
 
-Copyright (c) 2003-2009 Tim Kientzle and other authors 
+Copyright (c) 2003-2009 Tim Kientzle and other authors
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -685,10 +685,10 @@ above.
 The following source files are in the public domain and have no copyright:
 libarchive/archive_getdate.c
 
-This libarchive includes below files 
+This libarchive includes below files
 libarchive/archive_entry.c
 libarchive/archive_read_support_filter_compress.c
-libarchive/archive_write_add_filter_compress.c 
+libarchive/archive_write_add_filter_compress.c
 which under a 3-clause UC Regents copyright as below
 /*-
  * Copyright (c) 1993
@@ -976,7 +976,7 @@ For sys/queue.h:
  *
  *  @(#)queue.h 8.5 (Berkeley) 8/20/94
  * $FreeBSD$
- 
+
 This product bundles regexp.c and regexp.h within librdkafka offered under a public domain license as below.
 
 LICENSE.regexp
@@ -1056,7 +1056,7 @@ freely, subject to the following restrictions:
     3. This notice may not be removed or altered from any source
     distribution.
 
-This product bundles wingetopt.c and wingetopt.h within librdkafka under the licenses below. 
+This product bundles wingetopt.c and wingetopt.h within librdkafka under the licenses below.
 
 For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
 
@@ -1109,7 +1109,7 @@ LICENSE.wingetopt
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
- 
+
 This product bundles LightPcapNg within PcapPlusPlus under the MIT license below.
 
 Copyright (c) 2016 Radu Velea, radu.velea@gmail.com
@@ -1193,7 +1193,7 @@ Redistribution and use in source and binary forms, with or without modification,
 
     Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
     Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
+    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFI [...]
 
@@ -1238,7 +1238,7 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  * are met:
  *
  * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
+ *    notice, this list of conditions and the following disclaimer.
  *
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in
@@ -1293,21 +1293,21 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  * This package is an SSL implementation written
  * by Eric Young (eay@cryptsoft.com).
  * The implementation was written so as to conform with Netscapes SSL.
- * 
+ *
  * This library is free for commercial and non-commercial use as long as
  * the following conditions are aheared to.  The following conditions
  * apply to all code found in this distribution, be it the RC4, RSA,
  * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
  * included with this distribution is covered by the same copyright terms
  * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
+ *
  * Copyright remains Eric Young's, and as such any Copyright notices in
  * the code are not to be removed.
  * If this package is used in a product, Eric Young should be given attribution
  * as the author of the parts of the library used.
  * This can be in the form of a textual message at program startup or
  * in documentation (online or textual) provided with the package.
- * 
+ *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
@@ -1322,10 +1322,10 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  *     Eric Young (eay@cryptsoft.com)"
  *    The word 'cryptographic' can be left out if the rouines from the library
  *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
+ * 4. If you include any Windows specific code (or a derivative thereof) from
  *    the apps directory (application code) you must include an acknowledgement:
  *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
+ *
  * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -1337,7 +1337,7 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
- * 
+ *
  * The licence and distribution terms for any publically available version or
  * derivative of this code cannot be changed.  i.e. this code cannot simply be
  * copied and put under another distribution licence
@@ -1462,8 +1462,8 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
 This product bundles RapidJSON:
-Tencent is pleased to support the open source community by making RapidJSON available. 
- 
+Tencent is pleased to support the open source community by making RapidJSON available.
+
 Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.  All rights reserved.
 
 If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License.
@@ -1474,24 +1474,24 @@ This product bundles RTIMULib2, which is offered under the license, below:
 
 This file is part of RTIMULib
 Copyright (c) 2014-2015, richards-tech, LLC
-Permission is hereby granted, free of charge, 
-to any person obtaining a copy of 
+Permission is hereby granted, free of charge,
+to any person obtaining a copy of
 this software and associated documentation files
 (the "Software"), to deal in the Software without
-restriction, including without limitation the rights 
-to use, copy, modify, merge, publish, distribute, 
-sublicense, and/or sell copies of the Software, and 
-to permit persons to whom the Software is furnished 
+restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and
+to permit persons to whom the Software is furnished
 to do so, subject to the following conditions:
 
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 
-THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 DEALINGS IN THE SOFTWARE.
 
 
@@ -1500,13 +1500,13 @@ Other dependencies and licenses:
 Open Source Software Licensed Under the BSD License:
 --------------------------------------------------------------------
 
-The msinttypes r29 
-Copyright (c) 2006-2013 Alexander Chemeris 
+The msinttypes r29
+Copyright (c) 2006-2013 Alexander Chemeris
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
 
-* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
 * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
 * Neither the name of  copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
 
@@ -1573,16 +1573,16 @@ This product bundles 'bsdiff' which is available under a "2-clause BSD" license.
 	 Copyright 2003-2005 Colin Percival
 	 Copyright 2012 Matthew Endsley
 	 All rights reserved
-	
+
 	 Redistribution and use in source and binary forms, with or without
-	 modification, are permitted providing that the following conditions 
+	 modification, are permitted providing that the following conditions
 	 are met:
 	 1. Redistributions of source code must retain the above copyright
 	    notice, this list of conditions and the following disclaimer.
 	 2. Redistributions in binary form must reproduce the above copyright
 	    notice, this list of conditions and the following disclaimer in the
 	    documentation and/or other materials provided with the distribution.
-	
+
 	 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 	 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 	 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -2958,3 +2958,53 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 --------------------------------------------------------------------------
 
 This product bundles the IANA timezone database which is in the public domain.
+
+--------------------------------------------------------------------------
+
+This project bundles 'azure-sdk-for-cpp', which is available under an MIT License:
+MIT License
+
+Copyright (c) Microsoft Corporation.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE
+
+--------------------------------------------------------------------------
+
+This product bundles 'JSON for Modern C++' which is available under a MIT license:
+MIT License
+
+Copyright (c) 2013-2021 Niels Lohmann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/NOTICE b/NOTICE
index a0a980a..3f2efc7 100644
--- a/NOTICE
+++ b/NOTICE
@@ -40,6 +40,9 @@ Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 This product includes software developed by
 Amazon Technologies, Inc (http://www.amazon.com/).
 
+This bundles the 'Azure SDK for C++' (MIT licensed) project (https://github.com/Azure/azure-sdk-for-cpp), which includes the following NOTICE:
+Copyright (c) Microsoft Corporation.
+
 **********************
 THIRD PARTY COMPONENTS
 **********************
@@ -55,6 +58,7 @@ This software includes third party software subject to the following copyrights:
 - libsodium - Copyright (c) 2013 - 2018 Frank Denis under the ISC software license
 - IANA timezone database - public domain
 - date (HowardHinnant/date) - notices below
+- JSON for Modern C++ (nlohmann/json) - Copyright (c) 2013-2021 Niels Lohmann
 
 The licenses for these third party components are included in LICENSE.txt
 
diff --git a/PROCESSORS.md b/PROCESSORS.md
index 7034be2..83ac954 100644
--- a/PROCESSORS.md
+++ b/PROCESSORS.md
@@ -38,6 +38,7 @@
 - [MotionDetector](#motiondetector)
 - [PublishKafka](#publishkafka)
 - [PublishMQTT](#publishmqtt)
+- [PutAzureBlobStorage](#putazureblobstorage)
 - [PutFile](#putfile)
 - [PutOPCProcessor](#putopcprocessor)
 - [PutS3Object](#puts3object)
@@ -1022,6 +1023,35 @@ In the list below, the names of required properties appear in bold. Any other pr
 |success|FlowFiles that are sent successfully to the destination are transferred to this relationship|
 
 
+## PutAzureBlobStorage
+
+### Description
+
+Puts content into an Azure Storage Blob
+### Properties
+
+In the list below, the names of required properties appear in bold. Any other properties (not in bold) are considered optional. The table also indicates any default values, and whether a property supports the NiFi Expression Language.
+
+| Name | Default Value | Allowable Values | Description |
+| - | - | - | - |
+|**Container Name**|||Name of the Azure storage container. In case of PutAzureBlobStorage processor, container can be created if it does not exist.<br/>**Supports Expression Language: true**|
+|Storage Account Name||||The storage account name.<br/>**Supports Expression Language: true**|
+|Storage Account Key||||The storage account key. This is an admin-like password providing access to every container in this account. It is recommended one uses Shared Access Signature (SAS) token instead for fine-grained control with policies.<br/>**Supports Expression Language: true**|
+|SAS Token||||Shared Access Signature token. Specify either SAS Token (recommended) or Account Key.<br/>**Supports Expression Language: true**|
+|Common Storage Account Endpoint Suffix||||Storage accounts in public Azure always use a common FQDN suffix. Override this endpoint suffix with a different suffix in certain circumstances (like Azure Stack or non-public Azure regions).<br/>**Supports Expression Language: true**|
+|Connection String||||Connection string used to connect to Azure Storage service. This overrides all other set credential properties.<br/>**Supports Expression Language: true**|
+|Azure Storage Credentials Service|||Name of the Azure Storage Credentials Service used to retrieve the connection string from.|
+|**Blob**|||The filename of the blob.<br/>**Supports Expression Language: true**|
+|**Create Container**|false||Specifies whether to check if the container exists and to automatically create it if it does not. Permission to list containers is required. If false, this check is not made, but the Put operation will fail if the container does not exist.|
+
+### Relationships
+
+| Name | Description |
+| - | - |
+|failure|Unsuccessful operations will be transferred to the failure relationship|
+|success|All successfully processed FlowFiles are routed to this relationship|
+
+
 ## PutFile
 
 ### Description
diff --git a/README.md b/README.md
index 043fca5..445b708 100644
--- a/README.md
+++ b/README.md
@@ -76,6 +76,7 @@ Through JNI extensions you can run NiFi processors using NARs. The JNI extension
 | ------------- |:-------------| :-----|
 | Archive Extensions    | [ApplyTemplate](PROCESSORS.md#applytemplate)<br/>[CompressContent](PROCESSORS.md#compresscontent)<br/>[ManipulateArchive](PROCESSORS.md#manipulatearchive)<br/>[MergeContent](PROCESSORS.md#mergecontent)<br/>[FocusArchiveEntry](PROCESSORS.md#focusarchiveentry)<br/>[UnfocusArchiveEntry](PROCESSORS.md#unfocusarchiveentry)      |   -DBUILD_LIBARCHIVE=ON |
 | AWS | [AWSCredentialsService](CONTROLLERS.md#awscredentialsservice)<br/>[PutS3Object](PROCESSORS.md#puts3object)<br/>[DeleteS3Object](PROCESSORS.md#deletes3object)<br/>[FetchS3Object](PROCESSORS.md#fetchs3object)<br/>[ListS3](PROCESSORS.md#lists3) | -DENABLE_AWS=ON  |
+| Azure | [AzureStorageCredentialsService](CONTROLLERS.md#azurestoragecredentialsservice)<br/>[PutAzureBlobStorage](PROCESSORS.md#putazureblobatorage) | -DENABLE_AZURE=ON  |
 | CivetWeb | [ListenHTTP](PROCESSORS.md#listenhttp)  | -DDISABLE_CIVET=ON |
 | CURL | [InvokeHTTP](PROCESSORS.md#invokehttp)      |    -DDISABLE_CURL=ON  |
 | GPS | GetGPS      |    -DENABLE_GPS=ON  |
@@ -363,6 +364,7 @@ $ # It is recommended that you install bison from source as HomeBrew now uses an
     U. OPC-UA Support ..............Disabled
     W. SQL Support .................Disabled
     X. Openwsman Support ...........Disabled
+    Y. Azure Support ...............Disabled
     ****************************************
                 Build Options.
     ****************************************
diff --git a/Windows.md b/Windows.md
index 7e5223b..f32a851 100644
--- a/Windows.md
+++ b/Windows.md
@@ -63,6 +63,7 @@ After the build directory it will take optional parameters modifying the CMake c
 | /S | Enables SQL |
 | /C | Enables CoAP |
 | /A | Enables AWS |
+| /Z | Enables Azure |
 | /M | Creates installer with merge modules |
 | /64 | Creates 64-bit build instead of a 32-bit one |
 | /D | Builds RelWithDebInfo build instead of Release |
diff --git a/bootstrap.sh b/bootstrap.sh
index 77d069a..2ece4cc 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -322,6 +322,8 @@ add_dependency TENSORFLOW_ENABLED "tensorflow"
 add_disabled_option OPC_ENABLED ${FALSE} "ENABLE_OPC"
 add_dependency OPC_ENABLED "mbedtls"
 
+add_disabled_option AZURE_ENABLED ${FALSE} "ENABLE_AZURE"
+
 USE_SHARED_LIBS=${TRUE}
 TESTS_DISABLED=${FALSE}
 ASAN_ENABLED=${FALSE}
diff --git a/bstrp_functions.sh b/bstrp_functions.sh
index 1eb1629..f4648ed 100755
--- a/bstrp_functions.sh
+++ b/bstrp_functions.sh
@@ -367,6 +367,7 @@ show_supported_features() {
   echo "U. OPC-UA Support...............$(print_feature_status OPC_ENABLED)"
   echo "W. SQL Support..................$(print_feature_status SQL_ENABLED)"
   echo "X. Openwsman Support ...........$(print_feature_status OPENWSMAN_ENABLED)"
+  echo "Y. Azure Support ...............$(print_feature_status AZURE_ENABLED)"
   echo "****************************************"
   echo "            Build Options."
   echo "****************************************"
@@ -418,6 +419,7 @@ read_feature_options(){
     u) ToggleFeature OPC_ENABLED ;;
     w) ToggleFeature SQL_ENABLED ;;
     x) ToggleFeature OPENWSMAN_ENABLED ;;
+    y) ToggleFeature AZURE_ENABLED ;;
     1) ToggleFeature TESTS_DISABLED ;;
     2) EnableAllFeatures ;;
     3) ToggleFeature JNI_ENABLED;;
@@ -436,7 +438,7 @@ read_feature_options(){
       fi
       ;;
     q) exit 0;;
-    *) echo -e "${RED}Please enter an option A-X or 1-6...${NO_COLOR}" && sleep 2
+    *) echo -e "${RED}Please enter an option A-Y or 1-6...${NO_COLOR}" && sleep 2
   esac
 }
 
diff --git a/cmake/BundledAzureSdkCpp.cmake b/cmake/BundledAzureSdkCpp.cmake
new file mode 100644
index 0000000..2f28f3c
--- /dev/null
+++ b/cmake/BundledAzureSdkCpp.cmake
@@ -0,0 +1,129 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+function(use_bundled_libazure SOURCE_DIR BINARY_DIR)
+    set(PC "${Patch_EXECUTABLE}" -p1 -i "${SOURCE_DIR}/thirdparty/azure-sdk-cpp-for-cpp/azure-sdk-for-cpp-old-compiler.patch")
+
+    # Define byproducts
+    if (WIN32)
+        set(SUFFIX "lib")
+        set(PREFIX "")
+        set(AZURE_CORE_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/core/azure-core/${CMAKE_BUILD_TYPE}/${PREFIX}azure-core.${SUFFIX}")
+        set(AZURE_STORAGE_COMMON_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/storage/azure-storage-common/${CMAKE_BUILD_TYPE}/${PREFIX}azure-storage-common.${SUFFIX}")
+        set(AZURE_STORAGE_BLOBS_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/storage/azure-storage-blobs/${CMAKE_BUILD_TYPE}/${PREFIX}azure-storage-blobs.${SUFFIX}")
+        set(AZURE_IDENTITY_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/identity/azure-identity/${CMAKE_BUILD_TYPE}/${PREFIX}azure-identity.${SUFFIX}")
+    else()
+        set(SUFFIX "a")
+        set(PREFIX "lib")
+        set(AZURE_CORE_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/core/azure-core/${PREFIX}azure-core.${SUFFIX}")
+        set(AZURE_STORAGE_COMMON_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/storage/azure-storage-common/${PREFIX}azure-storage-common.${SUFFIX}")
+        set(AZURE_STORAGE_BLOBS_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/storage/azure-storage-blobs/${PREFIX}azure-storage-blobs.${SUFFIX}")
+        set(AZURE_IDENTITY_LIB "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/identity/azure-identity/${PREFIX}azure-identity.${SUFFIX}")
+    endif()
+
+    set(AZURESDK_LIBRARIES_LIST
+            "${AZURE_CORE_LIB}"
+            "${AZURE_STORAGE_COMMON_LIB}"
+            "${AZURE_STORAGE_BLOBS_LIB}"
+            "${AZURE_IDENTITY_LIB}")
+
+    set(AZURE_SDK_CMAKE_ARGS ${PASSTHROUGH_CMAKE_ARGS}
+        -DWARNINGS_AS_ERRORS=OFF)
+    append_third_party_passthrough_args(AZURE_SDK_CMAKE_ARGS "${AZURE_SDK_CMAKE_ARGS}")
+
+    # Build project
+    ExternalProject_Add(
+            azure-sdk-cpp-external
+            GIT_REPOSITORY "https://github.com/Azure/azure-sdk-for-cpp.git"
+            GIT_TAG "azure-storage-blobs_12.0.0-beta.7"
+            BUILD_IN_SOURCE true
+            SOURCE_DIR "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src"
+            BUILD_BYPRODUCTS "${AZURESDK_LIBRARIES_LIST}"
+            EXCLUDE_FROM_ALL TRUE
+            STEP_TARGETS build
+            CMAKE_ARGS ${AZURE_SDK_CMAKE_ARGS}
+            LIST_SEPARATOR % # This is needed for passing semicolon-separated lists
+            PATCH_COMMAND ${PC}
+    )
+
+    # Set dependencies
+    add_dependencies(azure-sdk-cpp-external-build CURL::libcurl LibXml2::LibXml2 OpenSSL::Crypto OpenSSL::SSL nlohmann_json::nlohmann_json)
+
+    # Set variables
+    set(LIBAZURE_FOUND "YES" CACHE STRING "" FORCE)
+    set(LIBAZURE_INCLUDE_DIRS
+            "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/core/azure-core/inc/"
+            "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/storage/azure-storage-blobs/inc/"
+            "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/storage/azure-storage-common/inc/"
+            "${BINARY_DIR}/thirdparty/azure-sdk-cpp-src/sdk/identity/azure-identity/inc/"
+            CACHE STRING "" FORCE)
+    set(LIBAZURE_LIBRARIES ${AZURESDK_LIBRARIES_LIST} CACHE STRING "" FORCE)
+
+    # Create imported targets
+    FOREACH(LIBAZURE_INCLUDE_DIR ${LIBAZURE_INCLUDE_DIRS})
+        file(MAKE_DIRECTORY ${LIBAZURE_INCLUDE_DIR})
+    ENDFOREACH(LIBAZURE_INCLUDE_DIR)
+
+    add_library(AZURE::azure-core STATIC IMPORTED)
+    set_target_properties(AZURE::azure-core PROPERTIES IMPORTED_LOCATION "${AZURE_CORE_LIB}")
+    add_dependencies(AZURE::azure-core azure-sdk-cpp-external-build)
+    target_include_directories(AZURE::azure-core INTERFACE ${LIBAZURE_INCLUDE_DIRS})
+    target_link_libraries(AZURE::azure-core INTERFACE LibXml2::LibXml2 CURL::libcurl OpenSSL::Crypto OpenSSL::SSL Threads::Threads nlohmann_json::nlohmann_json)
+    if (APPLE)
+        target_link_libraries(AZURE::azure-core INTERFACE "-framework CoreFoundation")
+    endif()
+    if (WIN32)
+        target_link_libraries(AZURE::azure-core INTERFACE winhttp.lib)
+    endif()
+
+    add_library(AZURE::azure-identity STATIC IMPORTED)
+    set_target_properties(AZURE::azure-identity PROPERTIES IMPORTED_LOCATION "${AZURE_IDENTITY_LIB}")
+    add_dependencies(AZURE::azure-identity azure-sdk-cpp-external-build)
+    target_include_directories(AZURE::azure-identity INTERFACE ${LIBAZURE_INCLUDE_DIRS})
+    target_link_libraries(AZURE::azure-identity INTERFACE LibXml2::LibXml2 CURL::libcurl OpenSSL::Crypto OpenSSL::SSL Threads::Threads nlohmann_json::nlohmann_json)
+    if (APPLE)
+        target_link_libraries(AZURE::azure-identity INTERFACE "-framework CoreFoundation")
+    endif()
+    if (WIN32)
+        target_link_libraries(AZURE::azure-identity INTERFACE winhttp.lib)
+    endif()
+
+    add_library(AZURE::azure-storage-common STATIC IMPORTED)
+    set_target_properties(AZURE::azure-storage-common PROPERTIES IMPORTED_LOCATION "${AZURE_STORAGE_COMMON_LIB}")
+    add_dependencies(AZURE::azure-storage-common azure-sdk-cpp-external-build)
+    target_include_directories(AZURE::azure-storage-common INTERFACE ${LIBAZURE_INCLUDE_DIRS})
+    target_link_libraries(AZURE::azure-storage-common INTERFACE LibXml2::LibXml2 CURL::libcurl OpenSSL::Crypto OpenSSL::SSL Threads::Threads nlohmann_json::nlohmann_json)
+    if (APPLE)
+        target_link_libraries(AZURE::azure-storage-common INTERFACE "-framework CoreFoundation")
+    endif()
+    if (WIN32)
+        target_link_libraries(AZURE::azure-storage-common INTERFACE winhttp.lib)
+    endif()
+
+    add_library(AZURE::azure-storage-blobs STATIC IMPORTED)
+    set_target_properties(AZURE::azure-storage-blobs PROPERTIES IMPORTED_LOCATION "${AZURE_STORAGE_BLOBS_LIB}")
+    add_dependencies(AZURE::azure-storage-blobs azure-sdk-cpp-external-build)
+    target_include_directories(AZURE::azure-storage-blobs INTERFACE ${LIBAZURE_INCLUDE_DIRS})
+    target_link_libraries(AZURE::azure-storage-blobs INTERFACE LibXml2::LibXml2 CURL::libcurl OpenSSL::Crypto OpenSSL::SSL Threads::Threads nlohmann_json::nlohmann_json)
+    if (APPLE)
+        target_link_libraries(AZURE::azure-storage-blobs INTERFACE "-framework CoreFoundation")
+    endif()
+    if (WIN32)
+        target_link_libraries(AZURE::azure-storage-blobs INTERFACE winhttp.lib)
+    endif()
+    add_definitions("-DBUILD_CURL_HTTP_TRANSPORT_ADAPTER")
+endfunction(use_bundled_libazure)
diff --git a/cmake/DockerConfig.cmake b/cmake/DockerConfig.cmake
index 5ae9b3b..308e2be 100644
--- a/cmake/DockerConfig.cmake
+++ b/cmake/DockerConfig.cmake
@@ -44,6 +44,7 @@ add_custom_target(
         -c ENABLE_BUSTACHE=${ENABLE_BUSTACHE}
         -c ENABLE_SFTP=${ENABLE_SFTP}
         -c ENABLE_OPENWSMAN=${ENABLE_OPENWSMAN}
+        -c ENABLE_AZURE=${ENABLE_AZURE}
         -c DISABLE_CURL=${DISABLE_CURL}
         -c DISABLE_JEMALLOC=${DISABLE_JEMALLOC}
         -c DISABLE_CIVET=${DISABLE_CIVET}
diff --git a/cmake/curl/dummy/FindCURL.cmake b/cmake/NlohmannJson.cmake
similarity index 53%
copy from cmake/curl/dummy/FindCURL.cmake
copy to cmake/NlohmannJson.cmake
index 72802bb..3b1f381 100644
--- a/cmake/curl/dummy/FindCURL.cmake
+++ b/cmake/NlohmannJson.cmake
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -15,10 +15,21 @@
 # specific language governing permissions and limitations
 # under the License.
 
-if(NOT CURL_FOUND)
-    set(CURL_FOUND "YES" CACHE STRING "" FORCE)
-    set(CURL_INCLUDE_DIR "${EXPORTED_CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
-    set(CURL_INCLUDE_DIRS "${CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
-    set(CURL_LIBRARY "${EXPORTED_CURL_LIBRARY}" CACHE STRING "" FORCE)
-    set(CURL_LIBRARIES "${CURL_LIBRARY}" CACHE STRING "" FORCE)
+include(FetchContent)
+
+FetchContent_Declare(nlohmann_json
+    GIT_REPOSITORY https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent
+    GIT_TAG "v3.9.1")
+
+FetchContent_MakeAvailable(nlohmann_json)
+
+FetchContent_GetProperties(nlohmann_json)
+if(NOT nlohmann_json_POPULATED)
+    FetchContent_Populate(nlohmann_json)
+    add_subdirectory(${nlohmann_json_SOURCE_DIR} ${nlohmann_json_BINARY_DIR} EXCLUDE_FROM_ALL)
 endif()
+
+set(NLOHMANN_JSON_INCLUDE_DIR "${nlohmann_json_SOURCE_DIR}/include")
+
+# Set exported variables for FindPackage.cmake
+set(PASSTHROUGH_VARIABLES ${PASSTHROUGH_VARIABLES} "-DEXPORTED_NLOHMANN_JSON_INCLUDE_DIR=${NLOHMANN_JSON_INCLUDE_DIR}" CACHE STRING "" FORCE)
diff --git a/cmake/curl/dummy/FindCURL.cmake b/cmake/curl/dummy/FindCURL.cmake
index 72802bb..f9b289b 100644
--- a/cmake/curl/dummy/FindCURL.cmake
+++ b/cmake/curl/dummy/FindCURL.cmake
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -22,3 +22,9 @@ if(NOT CURL_FOUND)
     set(CURL_LIBRARY "${EXPORTED_CURL_LIBRARY}" CACHE STRING "" FORCE)
     set(CURL_LIBRARIES "${CURL_LIBRARY}" CACHE STRING "" FORCE)
 endif()
+
+if(NOT TARGET CURL::libcurl)
+    add_library(CURL::libcurl STATIC IMPORTED)
+    set_target_properties(CURL::libcurl PROPERTIES IMPORTED_LOCATION "${CURL_LIBRARIES}")
+    set_property(TARGET CURL::libcurl APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${CURL_INCLUDE_DIRS}")
+endif()
diff --git a/cmake/curl/dummy/FindCURL.cmake b/cmake/nlohmann_json/dummy/Findnlohmann_json.cmake
similarity index 59%
copy from cmake/curl/dummy/FindCURL.cmake
copy to cmake/nlohmann_json/dummy/Findnlohmann_json.cmake
index 72802bb..6a3cb1b 100644
--- a/cmake/curl/dummy/FindCURL.cmake
+++ b/cmake/nlohmann_json/dummy/Findnlohmann_json.cmake
@@ -5,9 +5,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #   http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing,
 # software distributed under the License is distributed on an
 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -15,10 +15,13 @@
 # specific language governing permissions and limitations
 # under the License.
 
-if(NOT CURL_FOUND)
-    set(CURL_FOUND "YES" CACHE STRING "" FORCE)
-    set(CURL_INCLUDE_DIR "${EXPORTED_CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
-    set(CURL_INCLUDE_DIRS "${CURL_INCLUDE_DIR}" CACHE STRING "" FORCE)
-    set(CURL_LIBRARY "${EXPORTED_CURL_LIBRARY}" CACHE STRING "" FORCE)
-    set(CURL_LIBRARIES "${CURL_LIBRARY}" CACHE STRING "" FORCE)
+if(NOT NLOHMANN_JSON_FOUND)
+    set(NLOHMANN_JSON_FOUND "YES" CACHE STRING "" FORCE)
+    set(NLOHMANN_JSON_INCLUDE_DIR "${EXPORTED_NLOHMANN_JSON_INCLUDE_DIR}" CACHE STRING "" FORCE)
+    set(NLOHMANN_JSON_INCLUDE_DIRS "${EXPORTED_NLOHMANN_JSON_INCLUDE_DIR}" CACHE STRING "" FORCE)
+endif()
+
+if(NOT TARGET nlohmann_json::nlohmann_json)
+    add_library(nlohmann_json::nlohmann_json STATIC IMPORTED)
+    set_property(TARGET nlohmann_json::nlohmann_json APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${NLOHMANN_JSON_INCLUDE_DIR}")
 endif()
diff --git a/docker/DockerBuild.sh b/docker/DockerBuild.sh
index 40e4701..a30fca7 100755
--- a/docker/DockerBuild.sh
+++ b/docker/DockerBuild.sh
@@ -49,6 +49,7 @@ ENABLE_AWS=${ENABLE_AWS:-}
 ENABLE_BUSTACHE=${ENABLE_BUSTACHE:-}
 ENABLE_SFTP=${ENABLE_SFTP:-}
 ENABLE_OPENWSMAN=${ENABLE_OPENWSMAN:-}
+ENABLE_AZURE=${ENABLE_AZURE:-}
 DISABLE_CURL=${DISABLE_CURL:-}
 DISABLE_JEMALLOC=${DISABLE_JEMALLOC:-ON}
 DISABLE_CIVET=${DISABLE_CIVET:-}
@@ -186,6 +187,7 @@ BUILD_ARGS="--build-arg UID=${UID_ARG} \
             --build-arg ENABLE_BUSTACHE=${ENABLE_BUSTACHE} \
             --build-arg ENABLE_SFTP=${ENABLE_SFTP} \
             --build-arg ENABLE_OPENWSMAN=${ENABLE_OPENWSMAN} \
+            --build-arg ENABLE_AZURE=${ENABLE_AZURE} \
             --build-arg DISABLE_CURL=${DISABLE_CURL} \
             --build-arg DISABLE_JEMALLOC=${DISABLE_JEMALLOC} \
             --build-arg DISABLE_CIVET=${DISABLE_CIVET} \
diff --git a/docker/DockerVerify.sh b/docker/DockerVerify.sh
index dc59e64..288042f 100755
--- a/docker/DockerVerify.sh
+++ b/docker/DockerVerify.sh
@@ -94,4 +94,5 @@ exec
   behave $BEHAVE_OPTS "features/s3.feature" -n "A MiNiFi instance can download s3 bucket objects directly" &&
   behave $BEHAVE_OPTS "features/s3.feature" -n "A MiNiFi instance can download s3 bucket objects via a http-proxy" &&
   behave $BEHAVE_OPTS "features/s3.feature" -n "A MiNiFi instance can list an S3 bucket directly" &&
-  behave $BEHAVE_OPTS "features/s3.feature" -n "A MiNiFi instance can list an S3 bucket objects via a http-proxy"
+  behave $BEHAVE_OPTS "features/s3.feature" -n "A MiNiFi instance can list an S3 bucket objects via a http-proxy" &&
+  behave $BEHAVE_OPTS "features/azure_storage.feature" -n "A MiNiFi instance can upload data to Azure blob storage"
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 20e8ce8..37541b4 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -106,6 +106,7 @@ ARG ENABLE_AWS
 ARG ENABLE_BUSTACHE
 ARG ENABLE_SFTP
 ARG ENABLE_OPENWSMAN
+ARG ENABLE_AZURE
 ARG DISABLE_CURL
 ARG DISABLE_JEMALLOC
 ARG DISABLE_CIVET
@@ -125,7 +126,7 @@ RUN cd ${MINIFI_BASE_DIR} \
     -DENABLE_WEL=${ENABLE_WEL} -DENABLE_SQL=${ENABLE_SQL} -DENABLE_MQTT=${ENABLE_MQTT} -DENABLE_PCAP=${ENABLE_PCAP} \
     -DENABLE_LIBRDKAFKA=${ENABLE_LIBRDKAFKA} -DENABLE_SENSORS=${ENABLE_SENSORS} -DENABLE_SQLITE=${ENABLE_SQLITE} \
     -DENABLE_USB_CAMERA=${ENABLE_USB_CAMERA} -DENABLE_TENSORFLOW=${ENABLE_TENSORFLOW} -DENABLE_AWS=${ENABLE_AWS} \
-    -DENABLE_BUSTACHE=${ENABLE_BUSTACHE} -DENABLE_SFTP=${ENABLE_SFTP} -DENABLE_OPENWSMAN=${ENABLE_OPENWSMAN} \
+    -DENABLE_BUSTACHE=${ENABLE_BUSTACHE} -DENABLE_SFTP=${ENABLE_SFTP} -DENABLE_OPENWSMAN=${ENABLE_OPENWSMAN} -DENABLE_AZURE=${ENABLE_AZURE} \
     -DDISABLE_CURL=${DISABLE_CURL} -DDISABLE_JEMALLOC=${DISABLE_JEMALLOC} -DDISABLE_CIVET=${DISABLE_CIVET} \
     -DDISABLE_EXPRESSION_LANGUAGE=${DISABLE_EXPRESSION_LANGUAGE} -DDISABLE_ROCKSDB=${DISABLE_ROCKSDB} \
     -DDISABLE_LIBARCHIVE=${DISABLE_LIBARCHIVE} -DDISABLE_LZMA=${DISABLE_LZMA} -DDISABLE_BZIP2=${DISABLE_BZIP2} \
diff --git a/docker/test/integration/MiNiFi_integration_test_driver.py b/docker/test/integration/MiNiFi_integration_test_driver.py
index d8207c1..6bb5151 100644
--- a/docker/test/integration/MiNiFi_integration_test_driver.py
+++ b/docker/test/integration/MiNiFi_integration_test_driver.py
@@ -110,6 +110,12 @@ class MiNiFi_integration_test():
                 startup_success = cluster.wait_for_app_logs("Starting Flow Controller...", 120)
             elif cluster.get_engine() == "kafka-broker":
                 startup_success = cluster.wait_for_app_logs("Startup complete.", 120)
+            elif cluster.get_engine() == "http-proxy":
+                startup_success = cluster.wait_for_app_logs("Accepting HTTP Socket connections at", 120)
+            elif cluster.get_engine() == "s3-server":
+                startup_success = cluster.wait_for_app_logs("Started S3MockApplication", 120)
+            elif cluster.get_engine() == "azure-storage-server":
+                startup_success = cluster.wait_for_app_logs("Azurite Queue service is successfully listening at", 120)
             if not startup_success:
                 cluster.log_nifi_output()
             assert startup_success
@@ -200,15 +206,19 @@ class MiNiFi_integration_test():
 
     def check_s3_server_object_data(self, cluster_name, object_data):
         cluster = self.acquire_cluster(cluster_name)
-        cluster.check_s3_server_object_data(object_data)
+        assert cluster.check_s3_server_object_data(object_data)
 
     def check_s3_server_object_metadata(self, cluster_name, content_type):
         cluster = self.acquire_cluster(cluster_name)
-        cluster.check_s3_server_object_metadata(content_type)
+        assert cluster.check_s3_server_object_metadata(content_type)
 
     def check_empty_s3_bucket(self, cluster_name):
         cluster = self.acquire_cluster(cluster_name)
         assert cluster.is_s3_bucket_empty()
 
     def check_http_proxy_access(self, cluster_name, url):
-        self.clusters[cluster_name].check_http_proxy_access(url)
+        assert self.clusters[cluster_name].check_http_proxy_access(url)
+
+    def check_azure_storage_server_data(self, cluster_name, object_data):
+        cluster = self.acquire_cluster(cluster_name)
+        assert cluster.check_azure_storage_server_data(object_data)
diff --git a/docker/test/integration/features/azure_storage.feature b/docker/test/integration/features/azure_storage.feature
new file mode 100644
index 0000000..7f198da
--- /dev/null
+++ b/docker/test/integration/features/azure_storage.feature
@@ -0,0 +1,22 @@
+Feature: Sending data from MiNiFi-C++ to an Azure storage server
+  In order to transfer data to interact with Azure servers
+  As a user of MiNiFi
+  I need to have a PutAzureBlobStorage processor
+
+  Background:
+    Given the content of "/tmp/output" is monitored
+
+  Scenario: A MiNiFi instance can upload data to Azure blob storage
+    Given a GetFile processor with the "Input Directory" property set to "/tmp/input"
+    And a file with the content "#test_data$123$#" is present in "/tmp/input"
+    And a PutAzureBlobStorage processor set up to communicate with an Azure blob storage
+    And a PutFile processor with the "Directory" property set to "/tmp/output"
+    And the "success" relationship of the GetFile processor is connected to the PutAzureBlobStorage
+    And the "success" relationship of the PutAzureBlobStorage processor is connected to the PutFile
+
+    And an Azure storage server "azure-storage" is set up in correspondence with the PutAzureBlobStorage
+
+    When all instances start up
+
+    Then a flowfile with the content "test" is placed in the monitored directory in less than 60 seconds
+    And the object on the "azure-storage" Azure storage server is "#test_data$123$#"
diff --git a/docker/test/integration/features/s3.feature b/docker/test/integration/features/s3.feature
index 529d052..4f6ee74 100644
--- a/docker/test/integration/features/s3.feature
+++ b/docker/test/integration/features/s3.feature
@@ -195,4 +195,4 @@ Feature: Sending data from MiNiFi-C++ to an AWS server
     When all instances start up
 
     Then 1 flowfile is placed in the monitored directory in 120 seconds
-    And no errors were generated on the "http-proxy" regarding "http://s3-server:9090/test_bucket/test_object_key"
+    And no errors were generated on the "http-proxy" regarding "http://s3-server:9090/test_bucket"
diff --git a/docker/test/integration/minifi/core/DockerTestCluster.py b/docker/test/integration/minifi/core/DockerTestCluster.py
index 3fa5035..8eceb67 100644
--- a/docker/test/integration/minifi/core/DockerTestCluster.py
+++ b/docker/test/integration/minifi/core/DockerTestCluster.py
@@ -49,47 +49,48 @@ class DockerTestCluster(SingleNodeDockerCluster):
             encoding = "utf8"
         return encoding
 
-    def get_app_log(self):
-        for container in self.containers.values():
-            container = self.client.containers.get(container.id)
-            if b'Segmentation fault' in container.logs():
-                logging.warn('Container segfaulted: %s', container.name)
-                self.segfault=True
-            if container.status == 'running':
-                apps = [("MiNiFi", self.minifi_root + '/logs/minifi-app.log'), ("NiFi", self.nifi_root + '/logs/nifi-app.log'), ("Kafka", self.kafka_broker_root + '/logs/server.log')]
-                for app in apps:
-                    app_log_status, app_log = container.exec_run('/bin/sh -c \'cat ' + app[1] + '\'')
-                    if app_log_status == 0:
-                        logging.info('%s app logs for container \'%s\':\n', app[0], container.name)
-                        return app_log
-                        break
-                else:
-                    logging.warning("The container is running, but none of %s logs were found", " or ".join([x[0] for x in apps]))
+    def get_app_log(self, container_id):
+        container = self.client.containers.get(container_id)
+        if b'Segmentation fault' in container.logs():
+            logging.warn('Container segfaulted: %s', container.name)
+            self.segfault = True
+        if container.status == 'running':
+            apps = [("MiNiFi", self.minifi_root + '/logs/minifi-app.log'), ("NiFi", self.nifi_root + '/logs/nifi-app.log'), ("Kafka", self.kafka_broker_root + '/logs/server.log')]
+            for app in apps:
+                app_log_status, app_log = container.exec_run('/bin/sh -c \'cat ' + app[1] + '\'')
+                if app_log_status == 0:
+                    logging.info('%s app logs for container \'%s\':\n', app[0], container.name)
+                    return app_log
             else:
-                logging.info(container.status)
-                logging.info('Could not cat app logs for container \'%s\' because it is not running', container.name)
+                logging.warning("The container is running, but none of %s logs were found, presuming application logs to stdout, returning docker logs",
+                                " or ".join([x[0] for x in apps]))
+                logging.info('Docker logs for container \'%s\':\n', container.name)
+                return container.logs()
+        else:
+            logging.info(container.status)
+            logging.info('Could not cat app logs for container \'%s\' because it is not running', container.name)
         return None
 
     def wait_for_app_logs(self, log, timeout_seconds, count=1):
         wait_start_time = time.perf_counter()
-        for container_name, container in self.containers.items():
-            logging.info('Waiting for app-logs `%s` in container `%s`', log, container_name)
-            while (time.perf_counter() - wait_start_time) < timeout_seconds:
-                logs = self.get_app_log()
+        while (time.perf_counter() - wait_start_time) < timeout_seconds:
+            for container_name, container in self.containers.items():
+                logging.info('Waiting for app-logs `%s` in container `%s`', log, container_name)
+                logs = self.get_app_log(container.id)
                 if logs is not None and count <= logs.decode("utf-8").count(log):
                     return True
-                if logs is not None:
-                    for line in logs.decode("utf-8").splitlines():
-                        logging.info("App-log: %s", line)
-                time.sleep(1)
+            time.sleep(1)
+
+        logging.error('Waiting for app-log failed. Current logs:')
+        self.log_nifi_output()
         return False
 
     def log_nifi_output(self):
-        app_log = self.get_app_log()
-        if app_log is None:
-            return
-        for line in app_log.decode("utf-8").splitlines():
-            logging.info(line)
+        for container_name, container in self.containers.items():
+            logs = self.get_app_log(container.id)
+            if logs is not None:
+                for line in logs.decode("utf-8").splitlines():
+                    logging.info(line)
 
     def check_minifi_container_started(self):
         for container in self.containers.values():
@@ -100,9 +101,12 @@ class DockerTestCluster(SingleNodeDockerCluster):
 
     def check_http_proxy_access(self, url):
         output = subprocess.check_output(["docker", "exec", "http-proxy", "cat", "/var/log/squid/access.log"]).decode(self.get_stdout_encoding())
+        print(output)
+        print(output.count("TCP_DENIED/407"))
+        print(output.count("TCP_MISS"))
         return url in output and \
             ((output.count("TCP_DENIED/407") != 0 and \
-              output.count("TCP_MISS/200") == output.count("TCP_DENIED/407")) or \
+              output.count("TCP_MISS") == output.count("TCP_DENIED/407")) or \
              output.count("TCP_DENIED/407") == 0 and "TCP_MISS" in output)
 
     @retry_check()
@@ -119,6 +123,12 @@ class DockerTestCluster(SingleNodeDockerCluster):
         return server_metadata["contentType"] == content_type and metadata == server_metadata["userMetadata"]
 
     @retry_check()
+    def check_azure_storage_server_data(self, test_data):
+        data_file = subprocess.check_output(["docker", "exec", "azure-storage-server", "find", "/data/__blobstorage__", "-type", "f"]).decode(self.get_stdout_encoding()).strip()
+        file_data = subprocess.check_output(["docker", "exec", "azure-storage-server", "cat", data_file]).decode(self.get_stdout_encoding())
+        return test_data in file_data
+
+    @retry_check()
     def is_s3_bucket_empty(self):
         s3_mock_dir = subprocess.check_output(["docker", "exec", "s3-server", "find", "/tmp/", "-type", "d", "-name", "s3mock*"]).decode(self.get_stdout_encoding()).strip()
         ls_result = subprocess.check_output(["docker", "exec", "s3-server", "ls", s3_mock_dir + "/test_bucket/"]).decode(self.get_stdout_encoding())
diff --git a/docker/test/integration/minifi/core/SingleNodeDockerCluster.py b/docker/test/integration/minifi/core/SingleNodeDockerCluster.py
index 79ff8ec..083340f 100644
--- a/docker/test/integration/minifi/core/SingleNodeDockerCluster.py
+++ b/docker/test/integration/minifi/core/SingleNodeDockerCluster.py
@@ -112,6 +112,8 @@ class SingleNodeDockerCluster(Cluster):
             self.deploy_http_proxy()
         elif self.engine == 's3-server':
             self.deploy_s3_server()
+        elif self.engine == 'azure-storage-server':
+            self.deploy_azure_storage_server()
         else:
             raise Exception('invalid flow engine: \'%s\'' % self.engine)
 
@@ -273,7 +275,7 @@ class SingleNodeDockerCluster(Cluster):
         self.containers[consumer.name] = consumer
 
     def deploy_s3_server(self):
-        consumer = self.client.containers.run(
+        server = self.client.containers.run(
                     "adobe/s3mock:2.1.28",
                     detach=True,
                     name='s3-server',
@@ -281,7 +283,17 @@ class SingleNodeDockerCluster(Cluster):
                     ports={'9090/tcp': 9090, '9191/tcp': 9191},
                     environment=["initialBuckets=test_bucket"],
                     )
-        self.containers[consumer.name] = consumer
+        self.containers[server.name] = server
+
+    def deploy_azure_storage_server(self):
+        server = self.client.containers.run(
+                    "mcr.microsoft.com/azure-storage/azurite",
+                    detach=True,
+                    name='azure-storage-server',
+                    network=self.network.name,
+                    ports={'10000/tcp': 10000, '10001/tcp': 10001},
+                    )
+        self.containers[server.name] = server
 
     def build_image(self, dockerfile, context_files):
         conf_dockerfile_buffer = BytesIO()
diff --git a/docker/test/integration/minifi/processors/PutAzureBlobStorage.py b/docker/test/integration/minifi/processors/PutAzureBlobStorage.py
new file mode 100644
index 0000000..42d3015
--- /dev/null
+++ b/docker/test/integration/minifi/processors/PutAzureBlobStorage.py
@@ -0,0 +1,13 @@
+from ..core.Processor import Processor
+
+
+class PutAzureBlobStorage(Processor):
+    def __init__(self):
+        super(PutAzureBlobStorage, self).__init__('PutAzureBlobStorage',
+                                                  properties={
+                                                      'Container Name': 'test_container',
+                                                      'Connection String': 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure-storage-server:10000/devstoreaccount1;QueueEndpoint=http://azure-storage-server:10001/devstoreaccount1;',
+                                                      'Blob': 'test_blob',
+                                                      'Create Container': 'true',
+                                                  },
+                                                  auto_terminate=['success', 'failure'])
diff --git a/docker/test/integration/steps/steps.py b/docker/test/integration/steps/steps.py
index bfa4feb..53742a9 100644
--- a/docker/test/integration/steps/steps.py
+++ b/docker/test/integration/steps/steps.py
@@ -12,6 +12,7 @@ from minifi.processors.PutS3Object import PutS3Object
 from minifi.processors.DeleteS3Object import DeleteS3Object
 from minifi.processors.FetchS3Object import FetchS3Object
 from minifi.processors.ListS3 import ListS3
+from minifi.processors.PutAzureBlobStorage import PutAzureBlobStorage
 
 
 from behave import given, then, when
@@ -107,6 +108,12 @@ def step_impl(context):
     fetch_s3.set_name("FetchS3Object")
     context.test.add_node(fetch_s3)
 
+@given("a PutAzureBlobStorage processor set up to communicate with an Azure blob storage")
+def step_impl(context):
+    put_azure_blob_storage = PutAzureBlobStorage()
+    put_azure_blob_storage.set_name("PutAzureBlobStorage")
+    context.test.add_node(put_azure_blob_storage)
+
 @given("a PublishKafka processor set up to communicate with a kafka broker instance")
 def step_impl(context):
     # PublishKafka is never the first node of a flow potential cluster-flow setup is omitted
@@ -232,6 +239,15 @@ def step_impl(context, cluster_name):
     cluster.set_engine("s3-server")
     cluster.set_flow(None)
 
+# azure storage setup
+
+@given("an Azure storage server \"{cluster_name}\" is set up in correspondence with the PutAzureBlobStorage")
+def step_impl(context, cluster_name):
+    cluster = context.test.acquire_cluster(cluster_name)
+    cluster.set_name(cluster_name)
+    cluster.set_engine("azure-storage-server")
+    cluster.set_flow(None)
+
 @when("the MiNiFi instance starts up")
 @when("both instances start up")
 @when("all instances start up")
@@ -275,3 +291,7 @@ def step_impl(context, cluster_name, content_type):
 @then("the object bucket on the \"{cluster_name}\" s3 server is empty")
 def step_impl(context, cluster_name):
     context.test.check_empty_s3_bucket(cluster_name)
+
+@then("the object on the \"{cluster_name}\" Azure storage server is \"{object_data}\"")
+def step_impl(context, cluster_name, object_data):
+    context.test.check_azure_storage_server_data(cluster_name, object_data)
diff --git a/extensions/aws/processors/PutS3Object.h b/extensions/aws/processors/PutS3Object.h
index 4198a5a..e9cf600 100644
--- a/extensions/aws/processors/PutS3Object.h
+++ b/extensions/aws/processors/PutS3Object.h
@@ -92,11 +92,10 @@ class PutS3Object : public S3Processor {
       }
       std::vector<uint8_t> buffer;
       auto data_stream = std::make_shared<std::stringstream>();
-      buffer.reserve(BUFFER_SIZE);
       read_size_ = 0;
       while (read_size_ < flow_size_) {
         auto next_read_size = (std::min)(flow_size_ - read_size_, BUFFER_SIZE);
-        int read_ret = stream->read(buffer.data(), next_read_size);
+        int read_ret = stream->read(buffer, next_read_size);
         if (read_ret < 0) {
           return -1;
         }
diff --git a/extensions/aws/processors/S3Processor.cpp b/extensions/aws/processors/S3Processor.cpp
index 9c4fe7b..617dc2b 100644
--- a/extensions/aws/processors/S3Processor.cpp
+++ b/extensions/aws/processors/S3Processor.cpp
@@ -137,11 +137,13 @@ minifi::utils::optional<Aws::Auth::AWSCredentials> S3Processor::getAWSCredential
 
   std::shared_ptr<core::controller::ControllerService> service = context->getControllerService(service_name);
   if (!service) {
+    logger_->log_error("AWS credentials service with name: '%s' could not be found", service_name);
     return minifi::utils::nullopt;
   }
 
   auto aws_credentials_service = std::dynamic_pointer_cast<minifi::aws::controllers::AWSCredentialsService>(service);
   if (!aws_credentials_service) {
+    logger_->log_error("Controller service with name: '%s' is not an AWS credentials service", service_name);
     return minifi::utils::nullopt;
   }
 
diff --git a/extensions/aws/s3/S3Wrapper.cpp b/extensions/aws/s3/S3Wrapper.cpp
index 107de57..d248cfb 100644
--- a/extensions/aws/s3/S3Wrapper.cpp
+++ b/extensions/aws/s3/S3Wrapper.cpp
@@ -143,13 +143,10 @@ bool S3Wrapper::deleteObject(const std::string& bucket, const std::string& objec
 }
 
 int64_t S3Wrapper::writeFetchedBody(Aws::IOStream& source, const int64_t data_size, io::BaseStream& output) {
-  static const int64_t BUFFER_SIZE = 4096;
-  std::vector<uint8_t> buffer;
-  buffer.resize(BUFFER_SIZE);
-
+  std::vector<uint8_t> buffer(4096);
   int64_t write_size = 0;
   while (write_size < data_size) {
-    auto next_write_size = (std::min)(data_size - write_size, BUFFER_SIZE);
+    auto next_write_size = (std::min)(data_size - write_size, static_cast<int64_t>(4096));
     if (!source.read(reinterpret_cast<char*>(buffer.data()), next_write_size)) {
       return -1;
     }
diff --git a/extensions/azure/AzureLoader.cpp b/extensions/azure/AzureLoader.cpp
new file mode 100644
index 0000000..2bde8c1
--- /dev/null
+++ b/extensions/azure/AzureLoader.cpp
@@ -0,0 +1,28 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AzureLoader.h"
+#include "core/FlowConfiguration.h"
+
+bool AzureObjectFactory::added = core::FlowConfiguration::add_static_func("createAzureFactory");
+extern "C" {
+
+void *createAzureFactory(void) {
+  return new AzureObjectFactory();
+}
+
+}
diff --git a/extensions/azure/AzureLoader.h b/extensions/azure/AzureLoader.h
new file mode 100644
index 0000000..11224d8
--- /dev/null
+++ b/extensions/azure/AzureLoader.h
@@ -0,0 +1,69 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include <string>
+#include <memory>
+
+#include "core/ClassLoader.h"
+#include "utils/StringUtils.h"
+#include "utils/GeneralUtils.h"
+#include "controllerservices/AzureStorageCredentialsService.h"
+
+class AzureObjectFactory : public core::ObjectFactory {
+ public:
+  AzureObjectFactory() = default;
+
+  /**
+   * Gets the name of the object.
+   * @return class name of processor
+   */
+  std::string getName() override {
+    return "AzureObjectFactory";
+  }
+
+  std::string getClassName() override {
+    return "AzureObjectFactory";
+  }
+
+  /**
+   * Gets the class name for the object
+   * @return class name for the processor.
+   */
+  std::vector<std::string> getClassNames() override {
+    std::vector<std::string> class_names;
+    class_names.push_back("AzureStorageCredentialsService");
+    return class_names;
+  }
+
+  std::unique_ptr<ObjectFactory> assign(const std::string &class_name) override {
+    if (utils::StringUtils::equalsIgnoreCase(class_name, "AzureStorageCredentialsService")) {
+      return minifi::utils::make_unique<core::DefautObjectFactory<minifi::azure::controllers::AzureStorageCredentialsService>>();
+    } else {
+      return nullptr;
+    }
+  }
+
+  static bool added;
+};
+
+extern "C" {
+DLL_EXPORT void *createAzureFactory(void);
+}
diff --git a/extensions/azure/CMakeLists.txt b/extensions/azure/CMakeLists.txt
new file mode 100644
index 0000000..6478a3e
--- /dev/null
+++ b/extensions/azure/CMakeLists.txt
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+include(${CMAKE_SOURCE_DIR}/extensions/ExtensionHeader.txt)
+
+file(GLOB SOURCES "*.cpp" "storage/*.cpp" "controllerservices/*.cpp" "processors/*.cpp")
+
+add_library(minifi-azure STATIC ${SOURCES})
+
+target_compile_features(minifi-azure PUBLIC cxx_std_14)
+
+target_include_directories(minifi-azure BEFORE PRIVATE controllerservices)
+target_include_directories(minifi-azure BEFORE PRIVATE processors)
+target_include_directories(minifi-azure BEFORE PRIVATE storage)
+target_include_directories(minifi-azure BEFORE PRIVATE ${CMAKE_SOURCE_DIR}/extensions/azure)
+
+target_link_libraries(minifi-azure ${LIBMINIFI} Threads::Threads)
+target_link_libraries(minifi-azure CURL::libcurl LibXml2::LibXml2)
+target_link_libraries(minifi-azure AZURE::azure-storage-blobs AZURE::azure-storage-common AZURE::azure-core)
+
+SET (AZURE-EXTENSION minifi-azure PARENT_SCOPE)
+register_extension(minifi-azure)
+
+register_extension_linter(minifi-azure-extensions-linter)
diff --git a/extensions/azure/controllerservices/AzureStorageCredentialsService.cpp b/extensions/azure/controllerservices/AzureStorageCredentialsService.cpp
new file mode 100644
index 0000000..7fea3c8
--- /dev/null
+++ b/extensions/azure/controllerservices/AzureStorageCredentialsService.cpp
@@ -0,0 +1,70 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AzureStorageCredentialsService.h"
+
+#include <set>
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace controllers {
+
+const core::Property AzureStorageCredentialsService::StorageAccountName(
+    core::PropertyBuilder::createProperty("Storage Account Name")
+      ->withDescription("The storage account name.")
+      ->build());
+const core::Property AzureStorageCredentialsService::StorageAccountKey(
+    core::PropertyBuilder::createProperty("Storage Account Key")
+      ->withDescription("The storage account key. This is an admin-like password providing access to every container in this account. "
+                        "It is recommended one uses Shared Access Signature (SAS) token instead for fine-grained control with policies.")
+      ->build());
+const core::Property AzureStorageCredentialsService::SASToken(
+    core::PropertyBuilder::createProperty("SAS Token")
+      ->withDescription("Shared Access Signature token. Specify either SAS Token (recommended) or Account Key.")
+      ->build());
+const core::Property AzureStorageCredentialsService::CommonStorageAccountEndpointSuffix(
+    core::PropertyBuilder::createProperty("Common Storage Account Endpoint Suffix")
+      ->withDescription("Storage accounts in public Azure always use a common FQDN suffix. Override this endpoint suffix with a "
+                        "different suffix in certain circumstances (like Azure Stack or non-public Azure regions).")
+      ->build());
+const core::Property AzureStorageCredentialsService::ConnectionString(
+  core::PropertyBuilder::createProperty("Connection String")
+    ->withDescription("Connection string used to connect to Azure Storage service. This overrides all other set credential properties.")
+    ->build());
+
+void AzureStorageCredentialsService::initialize() {
+  setSupportedProperties({StorageAccountName, StorageAccountKey, SASToken, CommonStorageAccountEndpointSuffix, ConnectionString});
+}
+
+void AzureStorageCredentialsService::onEnable() {
+  getProperty(StorageAccountName.getName(), credentials_.storage_account_name);
+  getProperty(StorageAccountKey.getName(), credentials_.storage_account_key);
+  getProperty(SASToken.getName(), credentials_.sas_token);
+  getProperty(CommonStorageAccountEndpointSuffix.getName(), credentials_.endpoint_suffix);
+  getProperty(ConnectionString.getName(), credentials_.connection_string);
+}
+
+}  // namespace controllers
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/controllerservices/AzureStorageCredentialsService.h b/extensions/azure/controllerservices/AzureStorageCredentialsService.h
new file mode 100644
index 0000000..9e55e55
--- /dev/null
+++ b/extensions/azure/controllerservices/AzureStorageCredentialsService.h
@@ -0,0 +1,85 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <memory>
+
+#include "core/Resource.h"
+#include "core/controller/ControllerService.h"
+#include "core/logging/LoggerConfiguration.h"
+#include "storage/AzureStorageCredentials.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace controllers {
+
+class AzureStorageCredentialsService : public core::controller::ControllerService {
+ public:
+  static const core::Property StorageAccountName;
+  static const core::Property StorageAccountKey;
+  static const core::Property SASToken;
+  static const core::Property CommonStorageAccountEndpointSuffix;
+  static const core::Property ConnectionString;
+
+  explicit AzureStorageCredentialsService(const std::string &name, const minifi::utils::Identifier& uuid = {})
+      : ControllerService(name, uuid),
+        logger_(logging::LoggerFactory<AzureStorageCredentialsService>::getLogger()) {
+  }
+
+  explicit AzureStorageCredentialsService(const std::string& name, const std::shared_ptr<Configure>& /*configuration*/)
+      : ControllerService(name),
+        logger_(logging::LoggerFactory<AzureStorageCredentialsService>::getLogger()) {
+  }
+
+  void initialize() override;
+
+  void yield() override {
+  }
+
+  bool isWorkAvailable() override {
+    return false;
+  }
+
+  bool isRunning() override {
+    return getState() == core::controller::ControllerServiceState::ENABLED;
+  }
+
+  void onEnable() override;
+
+  std::string getConnectionString() const {
+    return credentials_.getConnectionString();
+  }
+
+ private:
+  storage::AzureStorageCredentials credentials_;
+  std::shared_ptr<logging::Logger> logger_;
+};
+
+REGISTER_RESOURCE(AzureStorageCredentialsService, "Azure Storage Credentials Management Service");
+
+}  // namespace controllers
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/processors/PutAzureBlobStorage.cpp b/extensions/azure/processors/PutAzureBlobStorage.cpp
new file mode 100644
index 0000000..275e23a
--- /dev/null
+++ b/extensions/azure/processors/PutAzureBlobStorage.cpp
@@ -0,0 +1,265 @@
+/**
+ * @file PutAzureBlobStorage.cpp
+ * PutAzureBlobStorage class implementation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PutAzureBlobStorage.h"
+
+#include <memory>
+#include <string>
+
+#include "storage/AzureBlobStorage.h"
+#include "controllerservices/AzureStorageCredentialsService.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace processors {
+
+const core::Property PutAzureBlobStorage::ContainerName(
+  core::PropertyBuilder::createProperty("Container Name")
+    ->withDescription("Name of the Azure Storage container. In case of PutAzureBlobStorage processor, container can be created if it does not exist.")
+    ->supportsExpressionLanguage(true)
+    ->isRequired(true)
+    ->build());
+const core::Property PutAzureBlobStorage::AzureStorageCredentialsService(
+  core::PropertyBuilder::createProperty("Azure Storage Credentials Service")
+    ->withDescription("Name of the Azure Storage Credentials Service used to retrieve the connection string from.")
+    ->build());
+const core::Property PutAzureBlobStorage::StorageAccountName(
+    core::PropertyBuilder::createProperty("Storage Account Name")
+      ->withDescription("The storage account name.")
+      ->supportsExpressionLanguage(true)
+      ->build());
+const core::Property PutAzureBlobStorage::StorageAccountKey(
+    core::PropertyBuilder::createProperty("Storage Account Key")
+      ->withDescription("The storage account key. This is an admin-like password providing access to every container in this account. "
+                        "It is recommended one uses Shared Access Signature (SAS) token instead for fine-grained control with policies.")
+      ->supportsExpressionLanguage(true)
+      ->build());
+const core::Property PutAzureBlobStorage::SASToken(
+    core::PropertyBuilder::createProperty("SAS Token")
+      ->withDescription("Shared Access Signature token. Specify either SAS Token (recommended) or Account Key.")
+      ->supportsExpressionLanguage(true)
+      ->build());
+const core::Property PutAzureBlobStorage::CommonStorageAccountEndpointSuffix(
+    core::PropertyBuilder::createProperty("Common Storage Account Endpoint Suffix")
+      ->withDescription("Storage accounts in public Azure always use a common FQDN suffix. Override this endpoint suffix with a "
+                        "different suffix in certain circumstances (like Azure Stack or non-public Azure regions). ")
+      ->supportsExpressionLanguage(true)
+      ->build());
+const core::Property PutAzureBlobStorage::ConnectionString(
+  core::PropertyBuilder::createProperty("Connection String")
+    ->withDescription("Connection string used to connect to Azure Storage service. This overrides all other set credential properties.")
+    ->supportsExpressionLanguage(true)
+    ->build());
+const core::Property PutAzureBlobStorage::Blob(
+  core::PropertyBuilder::createProperty("Blob")
+    ->withDescription("The filename of the blob.")
+    ->supportsExpressionLanguage(true)
+    ->isRequired(true)
+    ->build());
+const core::Property PutAzureBlobStorage::CreateContainer(
+  core::PropertyBuilder::createProperty("Create Container")
+    ->withDescription("Specifies whether to check if the container exists and to automatically create it if it does not. "
+                      "Permission to list containers is required. If false, this check is not made, but the Put operation will "
+                      "fail if the container does not exist.")
+    ->isRequired(true)
+    ->withDefaultValue<bool>(false)
+    ->build());
+
+const core::Relationship PutAzureBlobStorage::Success("success", "All successfully processed FlowFiles are routed to this relationship");
+const core::Relationship PutAzureBlobStorage::Failure("failure", "Unsuccessful operations will be transferred to the failure relationship");
+
+void PutAzureBlobStorage::initialize() {
+  // Set the supported properties
+  setSupportedProperties({
+    ContainerName,
+    StorageAccountName,
+    StorageAccountKey,
+    SASToken,
+    CommonStorageAccountEndpointSuffix,
+    ConnectionString,
+    AzureStorageCredentialsService,
+    Blob,
+    CreateContainer
+  });
+  // Set the supported relationships
+  setSupportedRelationships({
+    Success,
+    Failure
+  });
+}
+
+void PutAzureBlobStorage::onSchedule(const std::shared_ptr<core::ProcessContext>& context, const std::shared_ptr<core::ProcessSessionFactory>& /*sessionFactory*/) {
+  context->getProperty(CreateContainer.getName(), create_container_);
+
+  std::string value;
+  if (!context->getProperty(ContainerName.getName(), value) || value.empty()) {
+    throw Exception(PROCESS_SCHEDULE_EXCEPTION, "Container Name property missing or invalid");
+  }
+
+  if (!context->getProperty(Blob.getName(), value) || value.empty()) {
+    throw Exception(PROCESS_SCHEDULE_EXCEPTION, "Blob property missing or invalid");
+  }
+
+  if (context->getProperty(AzureStorageCredentialsService.getName(), value) && !value.empty()) {
+    logger_->log_info("Getting Azure Storage credentials from controller service with name: '%s'", value);
+    return;
+  }
+
+  if (context->getProperty(ConnectionString.getName(), value) && !value.empty()) {
+    logger_->log_info("Using connectionstring directly for Azure Storage authentication");
+    return;
+  }
+
+  if (!context->getProperty(StorageAccountName.getName(), value) || value.empty()) {
+    throw Exception(PROCESS_SCHEDULE_EXCEPTION, "Storage Account Name property missing or invalid");
+  }
+
+  if (context->getProperty(StorageAccountKey.getName(), value) && !value.empty()) {
+    logger_->log_info("Using storage account name and key for authentication");
+    return;
+  }
+
+  if (!context->getProperty(SASToken.getName(), value) || value.empty()) {
+    throw Exception(PROCESS_SCHEDULE_EXCEPTION, "Neither Storage Account Key nor SAS Token property was set.");
+  }
+
+  logger_->log_info("Using storage account name and SAS token for authentication");
+}
+
+std::string PutAzureBlobStorage::getConnectionStringFromControllerService(const std::shared_ptr<core::ProcessContext> &context) const {
+  std::string service_name;
+  if (!context->getProperty(AzureStorageCredentialsService.getName(), service_name) || service_name.empty()) {
+    return "";
+  }
+
+  std::shared_ptr<core::controller::ControllerService> service = context->getControllerService(service_name);
+  if (nullptr == service) {
+    logger_->log_error("Azure Storage credentials service with name: '%s' could not be found", service_name.c_str());
+    return "";
+  }
+
+  auto azure_credentials_service = std::dynamic_pointer_cast<minifi::azure::controllers::AzureStorageCredentialsService>(service);
+  if (!azure_credentials_service) {
+    logger_->log_error("Controller service with name: '%s' is not an Azure Storage credentials service", service_name.c_str());
+    return "";
+  }
+
+  return azure_credentials_service->getConnectionString();
+}
+
+std::string PutAzureBlobStorage::getAzureConnectionStringFromProperties(
+    const std::shared_ptr<core::ProcessContext> &context,
+    const std::shared_ptr<core::FlowFile> &flow_file) {
+  azure::storage::AzureStorageCredentials credentials;
+  context->getProperty(StorageAccountName, credentials.storage_account_name, flow_file);
+  context->getProperty(StorageAccountKey, credentials.storage_account_key, flow_file);
+  context->getProperty(SASToken, credentials.sas_token, flow_file);
+  context->getProperty(CommonStorageAccountEndpointSuffix, credentials.endpoint_suffix, flow_file);
+  context->getProperty(ConnectionString, credentials.connection_string, flow_file);
+  return credentials.getConnectionString();
+}
+
+void PutAzureBlobStorage::createAzureStorageClient(const std::string &connection_string, const std::string &container_name) {
+  // When used in multithreaded environment make sure to use the azure_storage_mutex_ to lock the wrapper so the
+  // client is not reset with different configuration while another thread is using it.
+  if (blob_storage_wrapper_ == nullptr) {
+    blob_storage_wrapper_ = minifi::utils::make_unique<storage::AzureBlobStorage>(connection_string, container_name);
+    return;
+  }
+
+  blob_storage_wrapper_->resetClientIfNeeded(connection_string, container_name);
+}
+
+std::string PutAzureBlobStorage::getConnectionString(
+    const std::shared_ptr<core::ProcessContext> &context,
+    const std::shared_ptr<core::FlowFile> &flow_file) const {
+  auto connection_string = getAzureConnectionStringFromProperties(context, flow_file);
+  if (!connection_string.empty()) {
+    return connection_string;
+  }
+
+  return getConnectionStringFromControllerService(context);
+}
+
+void PutAzureBlobStorage::onTrigger(const std::shared_ptr<core::ProcessContext> &context, const std::shared_ptr<core::ProcessSession> &session) {
+  logger_->log_debug("PutAzureBlobStorage onTrigger");
+  std::shared_ptr<core::FlowFile> flow_file = session->get();
+  if (!flow_file) {
+    return;
+  }
+
+  auto connection_string = getConnectionString(context, flow_file);
+  if (connection_string.empty()) {
+    logger_->log_error("Connection string is empty!");
+    session->transfer(flow_file, Failure);
+    return;
+  }
+
+  std::string container_name;
+  if (!context->getProperty(ContainerName, container_name, flow_file) || container_name.empty()) {
+    logger_->log_error("Container Name is invalid or empty!");
+    session->transfer(flow_file, Failure);
+    return;
+  }
+
+  std::string blob_name;
+  if (!context->getProperty(Blob, blob_name, flow_file) || blob_name.empty()) {
+    logger_->log_error("Blob name is invalid or empty!");
+    session->transfer(flow_file, Failure);
+    return;
+  }
+
+  utils::optional<azure::storage::UploadBlobResult> upload_result;
+  {
+    std::lock_guard<std::mutex> lock(azure_storage_mutex_);
+    createAzureStorageClient(connection_string, container_name);
+    if (create_container_) {
+      blob_storage_wrapper_->createContainer();
+    }
+    PutAzureBlobStorage::ReadCallback callback(flow_file->getSize(), *blob_storage_wrapper_, blob_name);
+    session->read(flow_file, &callback);
+    upload_result = callback.getResult();
+  }
+
+  if (!upload_result) {
+    logger_->log_error("Failed to upload blob '%s' to Azure Storage container '%s'", blob_name, container_name);
+    session->transfer(flow_file, Failure);
+    return;
+  }
+
+  session->putAttribute(flow_file, "azure.container", container_name);
+  session->putAttribute(flow_file, "azure.blobname", blob_name);
+  session->putAttribute(flow_file, "azure.primaryUri", upload_result->primary_uri);
+  session->putAttribute(flow_file, "azure.etag", upload_result->etag);
+  session->putAttribute(flow_file, "azure.length", std::to_string(upload_result->length));
+  session->putAttribute(flow_file, "azure.timestamp", upload_result->timestamp);
+  logger_->log_debug("Successfully uploaded blob '%s' to Azure Storage container '%s'", blob_name, container_name);
+  session->transfer(flow_file, Success);
+}
+
+}  // namespace processors
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/processors/PutAzureBlobStorage.h b/extensions/azure/processors/PutAzureBlobStorage.h
new file mode 100644
index 0000000..06a02f4
--- /dev/null
+++ b/extensions/azure/processors/PutAzureBlobStorage.h
@@ -0,0 +1,136 @@
+/**
+ * @file PutAzureBlobStorage.h
+ * PutAzureBlobStorage class declaration
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utility>
+#include <vector>
+#include <string>
+#include <memory>
+
+#include "core/Property.h"
+#include "core/Processor.h"
+#include "core/logging/Logger.h"
+#include "core/logging/LoggerConfiguration.h"
+#include "storage/BlobStorage.h"
+#include "utils/OptionalUtils.h"
+
+class PutAzureBlobStorageTestsFixture;
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace processors {
+
+class PutAzureBlobStorage : public core::Processor {
+ public:
+  static constexpr char const* ProcessorName = "PutAzureBlobStorage";
+
+  // Supported Properties
+  static const core::Property ContainerName;
+  static const core::Property AzureStorageCredentialsService;
+  static const core::Property StorageAccountName;
+  static const core::Property StorageAccountKey;
+  static const core::Property SASToken;
+  static const core::Property CommonStorageAccountEndpointSuffix;
+  static const core::Property ConnectionString;
+  static const core::Property Blob;
+  static const core::Property CreateContainer;
+
+  // Supported Relationships
+  static const core::Relationship Failure;
+  static const core::Relationship Success;
+
+  explicit PutAzureBlobStorage(const std::string& name, const minifi::utils::Identifier& uuid = minifi::utils::Identifier())
+    : PutAzureBlobStorage(name, uuid, nullptr) {
+  }
+
+  ~PutAzureBlobStorage() override = default;
+
+  void initialize() override;
+  void onSchedule(const std::shared_ptr<core::ProcessContext> &context, const std::shared_ptr<core::ProcessSessionFactory> &sessionFactory) override;
+  void onTrigger(const std::shared_ptr<core::ProcessContext> &context, const std::shared_ptr<core::ProcessSession> &session) override;
+
+  class ReadCallback : public InputStreamCallback {
+   public:
+    ReadCallback(uint64_t flow_size, azure::storage::BlobStorage& blob_storage_wrapper, const std::string &blob_name)
+      : flow_size_(flow_size)
+      , blob_storage_wrapper_(blob_storage_wrapper)
+      , blob_name_(blob_name) {
+    }
+
+    int64_t process(const std::shared_ptr<io::BaseStream>& stream) override {
+      std::vector<uint8_t> buffer;
+      int read_ret = stream->read(buffer, flow_size_);
+      if (read_ret < 0) {
+        return -1;
+      }
+
+      result_ = blob_storage_wrapper_.uploadBlob(blob_name_, buffer.data(), flow_size_);
+      if (!result_) {
+        return -1;
+      }
+      return result_->length;
+    }
+
+    utils::optional<azure::storage::UploadBlobResult> getResult() const {
+      return result_;
+    }
+
+   private:
+    uint64_t flow_size_;
+    azure::storage::BlobStorage &blob_storage_wrapper_;
+    std::string blob_name_;
+    utils::optional<azure::storage::UploadBlobResult> result_ = utils::nullopt;
+  };
+
+ private:
+  friend class ::PutAzureBlobStorageTestsFixture;
+
+  explicit PutAzureBlobStorage(const std::string& name, const minifi::utils::Identifier& uuid, std::unique_ptr<storage::BlobStorage> blob_storage_wrapper)
+    : core::Processor(name, uuid)
+    , blob_storage_wrapper_(std::move(blob_storage_wrapper)) {
+  }
+
+  std::string getConnectionStringFromControllerService(const std::shared_ptr<core::ProcessContext> &context) const;
+  static std::string getAzureConnectionStringFromProperties(
+    const std::shared_ptr<core::ProcessContext> &context,
+    const std::shared_ptr<core::FlowFile> &flow_file);
+  std::string getConnectionString(
+    const std::shared_ptr<core::ProcessContext> &context,
+    const std::shared_ptr<core::FlowFile> &flow_file) const;
+  void createAzureStorageClient(const std::string &connection_string, const std::string &container_name);
+
+  std::mutex azure_storage_mutex_;
+  std::unique_ptr<storage::BlobStorage> blob_storage_wrapper_;
+  bool create_container_ = false;
+  std::shared_ptr<logging::Logger> logger_{logging::LoggerFactory<PutAzureBlobStorage>::getLogger()};
+};
+
+REGISTER_RESOURCE(PutAzureBlobStorage, "Puts content into an Azure Storage Blob");
+
+}  // namespace processors
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/storage/AzureBlobStorage.cpp b/extensions/azure/storage/AzureBlobStorage.cpp
new file mode 100644
index 0000000..2427dcf
--- /dev/null
+++ b/extensions/azure/storage/AzureBlobStorage.cpp
@@ -0,0 +1,87 @@
+/**
+ * @file AzureBlobStorage.cpp
+ * AzureBlobStorage class implementation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AzureBlobStorage.h"
+
+#include <utility>
+
+#include "utils/GeneralUtils.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace storage {
+
+AzureBlobStorage::AzureBlobStorage(std::string connection_string, std::string container_name)
+  : BlobStorage(std::move(connection_string), std::move(container_name))
+  , container_client_(minifi::utils::make_unique<Azure::Storage::Blobs::BlobContainerClient>(
+      Azure::Storage::Blobs::BlobContainerClient::CreateFromConnectionString(connection_string_, container_name_))) {
+}
+
+void AzureBlobStorage::resetClientIfNeeded(const std::string &connection_string, const std::string &container_name) {
+  if (connection_string == connection_string_ && container_name_ == container_name) {
+    logger_->log_debug("Client credentials have not changed, no need to reset client");
+    return;
+  }
+  connection_string_ = connection_string;
+  container_name_ = container_name;
+  logger_->log_debug("Client has been reset with new credentials");
+  container_client_ = minifi::utils::make_unique<Azure::Storage::Blobs::BlobContainerClient>(Azure::Storage::Blobs::BlobContainerClient::CreateFromConnectionString(connection_string, container_name));
+}
+
+void AzureBlobStorage::createContainer() {
+  try {
+    auto blob_client = container_client_->Create();
+    logger_->log_debug("Container created");
+  } catch (const std::runtime_error&) {
+    logger_->log_debug("Container creation failed, it already exists.");
+  }
+}
+
+utils::optional<UploadBlobResult> AzureBlobStorage::uploadBlob(const std::string &blob_name, const uint8_t* buffer, std::size_t buffer_size) {
+  try {
+    auto blob_client = container_client_->GetBlockBlobClient(blob_name);
+    auto response = blob_client.UploadFrom(buffer, buffer_size);
+    if (!response.HasValue()) {
+      return utils::nullopt;
+    }
+
+    UploadBlobResult result;
+    result.length = buffer_size;
+    result.primary_uri = container_client_->GetUrl();
+    if (response->ETag.HasValue()) {
+      result.etag = response->ETag.ToString();
+    }
+    result.timestamp = response->LastModified.GetString(Azure::Core::DateTime::DateFormat::Rfc1123);
+    return result;
+  } catch (const std::runtime_error& err) {
+    logger_->log_error("A runtime error occurred while uploading blob: %s", err.what());
+    return utils::nullopt;
+  }
+}
+
+}  // namespace storage
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/storage/AzureBlobStorage.h b/extensions/azure/storage/AzureBlobStorage.h
new file mode 100644
index 0000000..3b2a602
--- /dev/null
+++ b/extensions/azure/storage/AzureBlobStorage.h
@@ -0,0 +1,55 @@
+/**
+ * @file AzureBlobStorage.h
+ * AzureBlobStorage class declaration
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+#include <vector>
+#include <memory>
+
+#include "BlobStorage.h"
+#include "azure/storage/blobs.hpp"
+#include "core/logging/Logger.h"
+#include "core/logging/LoggerConfiguration.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace storage {
+
+class AzureBlobStorage : public BlobStorage {
+ public:
+  AzureBlobStorage(std::string connection_string, std::string container_name);
+  void createContainer() override;
+  void resetClientIfNeeded(const std::string &connection_string, const std::string &container_name) override;
+  utils::optional<UploadBlobResult> uploadBlob(const std::string &blob_name, const uint8_t* buffer, std::size_t buffer_size) override;
+
+ private:
+  std::unique_ptr<Azure::Storage::Blobs::BlobContainerClient> container_client_;
+  std::shared_ptr<logging::Logger> logger_{logging::LoggerFactory<AzureBlobStorage>::getLogger()};
+};
+
+}  // namespace storage
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/storage/AzureStorageCredentials.h b/extensions/azure/storage/AzureStorageCredentials.h
new file mode 100644
index 0000000..b6b4b8d
--- /dev/null
+++ b/extensions/azure/storage/AzureStorageCredentials.h
@@ -0,0 +1,73 @@
+/**
+ * @file AzureStorageCredentials.h
+ * AzureStorageCredentials class declaration
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+
+#include "utils/StringUtils.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace storage {
+
+struct AzureStorageCredentials {
+  std::string storage_account_name;
+  std::string storage_account_key;
+  std::string sas_token;
+  std::string endpoint_suffix;
+  std::string connection_string;
+
+  std::string getConnectionString() const {
+    if (!connection_string.empty()) {
+      return connection_string;
+    }
+
+    if (storage_account_name.empty() || (storage_account_key.empty() && sas_token.empty())) {
+      return "";
+    }
+
+    std::string credentials;
+    credentials += "AccountName=" + storage_account_name;
+
+    if (!storage_account_key.empty()) {
+      credentials += ";AccountKey=" + storage_account_key;
+    }
+
+    if (!sas_token.empty()) {
+      credentials += ";SharedAccessSignature=" + (sas_token[0] == '?' ? sas_token.substr(1) : sas_token);
+    }
+
+    if (!endpoint_suffix.empty()) {
+      credentials += ";EndpointSuffix=" + endpoint_suffix;
+    }
+
+    return credentials;
+  }
+};
+
+}  // namespace storage
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/azure/storage/BlobStorage.h b/extensions/azure/storage/BlobStorage.h
new file mode 100644
index 0000000..fe85cb1
--- /dev/null
+++ b/extensions/azure/storage/BlobStorage.h
@@ -0,0 +1,64 @@
+/**
+ * @file BlobStorage.h
+ * BlobStorage class declaration
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+#include <vector>
+#include <utility>
+
+#include "utils/OptionalUtils.h"
+
+namespace org {
+namespace apache {
+namespace nifi {
+namespace minifi {
+namespace azure {
+namespace storage {
+
+struct UploadBlobResult {
+  std::string primary_uri;
+  std::string etag;
+  std::size_t length;
+  std::string timestamp;
+};
+
+class BlobStorage {
+ public:
+  BlobStorage(std::string connection_string, std::string container_name)
+    : connection_string_(std::move(connection_string))
+    , container_name_(std::move(container_name)) {
+  }
+
+  virtual void createContainer() = 0;
+  virtual void resetClientIfNeeded(const std::string &connection_string, const std::string &container_name) = 0;
+  virtual utils::optional<UploadBlobResult> uploadBlob(const std::string &blob_name, const uint8_t* buffer, std::size_t buffer_size) = 0;
+  virtual ~BlobStorage() = default;
+
+ protected:
+  std::string connection_string_;
+  std::string container_name_;
+};
+
+}  // namespace storage
+}  // namespace azure
+}  // namespace minifi
+}  // namespace nifi
+}  // namespace apache
+}  // namespace org
diff --git a/extensions/mqtt/processors/PublishMQTT.h b/extensions/mqtt/processors/PublishMQTT.h
index b166a1a..d60c819 100644
--- a/extensions/mqtt/processors/PublishMQTT.h
+++ b/extensions/mqtt/processors/PublishMQTT.h
@@ -78,8 +78,7 @@ class PublishMQTT : public processors::AbstractMQTTProcessor {
     int64_t process(const std::shared_ptr<io::BaseStream>& stream) {
       if (flow_size_ < max_seg_size_)
         max_seg_size_ = flow_size_;
-      std::vector<unsigned char> buffer;
-      buffer.reserve(max_seg_size_);
+      std::vector<unsigned char> buffer(max_seg_size_);
       read_size_ = 0;
       status_ = 0;
       while (read_size_ < flow_size_) {
diff --git a/libminifi/test/azure-tests/CMakeLists.txt b/libminifi/test/azure-tests/CMakeLists.txt
new file mode 100644
index 0000000..3c4a02f
--- /dev/null
+++ b/libminifi/test/azure-tests/CMakeLists.txt
@@ -0,0 +1,39 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+file(GLOB AZURE_INTEGRATION_TESTS  "*.cpp")
+
+SET(AZURE_TEST_COUNT 0)
+FOREACH(testfile ${AZURE_INTEGRATION_TESTS})
+	get_filename_component(testfilename "${testfile}" NAME_WE)
+  add_executable("${testfilename}" "${testfile}")
+  target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/standard-processors")
+	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/azure")
+	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/azure/storage")
+	target_include_directories(${testfilename} PRIVATE BEFORE "${CMAKE_SOURCE_DIR}/extensions/expression-language")
+	target_compile_features(${testfilename} PRIVATE cxx_std_14)
+	createTests("${testfilename}")
+	target_link_libraries(${testfilename} ${CATCH_MAIN_LIB})
+  target_wholearchive_library(${testfilename} minifi-azure)
+	target_wholearchive_library(${testfilename} minifi-standard-processors)
+	target_wholearchive_library(${testfilename} minifi-expression-language-extensions)
+	MATH(EXPR AZURE_TEST_COUNT "${AZURE_TEST_COUNT}+1")
+	add_test(NAME "${testfilename}" COMMAND "${testfilename}" WORKING_DIRECTORY ${TEST_DIR})
+ENDFOREACH()
+message("-- Finished building ${AZURE_TEST_COUNT} Azure related test file(s)...")
diff --git a/libminifi/test/azure-tests/PutAzureBlobStorageTests.cpp b/libminifi/test/azure-tests/PutAzureBlobStorageTests.cpp
new file mode 100644
index 0000000..ba53b0d
--- /dev/null
+++ b/libminifi/test/azure-tests/PutAzureBlobStorageTests.cpp
@@ -0,0 +1,276 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../TestBase.h"
+#include "core/Processor.h"
+#include "processors/PutAzureBlobStorage.h"
+#include "processors/GetFile.h"
+#include "processors/LogAttribute.h"
+#include "processors/UpdateAttribute.h"
+#include "storage/BlobStorage.h"
+#include "utils/file/FileUtils.h"
+
+const std::string CONTAINER_NAME = "test-container";
+const std::string STORAGE_ACCOUNT_NAME = "test-account";
+const std::string STORAGE_ACCOUNT_KEY = "test-key";
+const std::string SAS_TOKEN = "test-sas-token";
+const std::string ENDPOINT_SUFFIX = "test.suffix.com";
+const std::string CONNECTION_STRING = "test-connectionstring";
+const std::string BLOB_NAME = "test-blob.txt";
+const std::string TEST_DATA = "data";
+
+class MockBlobStorage : public minifi::azure::storage::BlobStorage {
+ public:
+  const std::string ETAG = "test-etag";
+  const std::string PRIMARY_URI = "test-uri";
+  const std::string TEST_TIMESTAMP = "test-timestamp";
+
+  MockBlobStorage()
+    : BlobStorage("", "") {
+  }
+
+  void createContainer() override {
+    container_created_ = true;
+  }
+
+  void resetClientIfNeeded(const std::string &connection_string, const std::string &container_name) override {
+    connection_string_ = connection_string;
+    container_name_ = container_name;
+  }
+
+  utils::optional<minifi::azure::storage::UploadBlobResult> uploadBlob(const std::string& /*blob_name*/, const uint8_t* buffer, std::size_t buffer_size) override {
+    input_data = std::string(buffer, buffer + buffer_size);
+    minifi::azure::storage::UploadBlobResult result;
+    result.etag = ETAG;
+    result.length = buffer_size;
+    result.primary_uri = PRIMARY_URI;
+    result.timestamp = TEST_TIMESTAMP;
+    return result;
+  }
+
+  std::string getConnectionString() const {
+    return connection_string_;
+  }
+
+  std::string getContainerName() const {
+    return container_name_;
+  }
+
+  bool getContainerCreated() const {
+    return container_created_;
+  }
+
+  std::string input_data;
+
+ private:
+  bool container_created_ = false;
+};
+
+class PutAzureBlobStorageTestsFixture {
+ public:
+  PutAzureBlobStorageTestsFixture() {
+    LogTestController::getInstance().setDebug<TestPlan>();
+    LogTestController::getInstance().setDebug<minifi::core::Processor>();
+    LogTestController::getInstance().setTrace<minifi::core::ProcessSession>();
+    LogTestController::getInstance().setTrace<processors::GetFile>();
+    LogTestController::getInstance().setDebug<processors::UpdateAttribute>();
+    LogTestController::getInstance().setDebug<processors::LogAttribute>();
+    LogTestController::getInstance().setTrace<minifi::azure::processors::PutAzureBlobStorage>();
+
+    // Build MiNiFi processing graph
+    plan = test_controller.createPlan();
+    auto mock_blob_storage = utils::make_unique<MockBlobStorage>();
+    mock_blob_storage_ptr = mock_blob_storage.get();
+    put_azure_blob_storage = std::shared_ptr<minifi::azure::processors::PutAzureBlobStorage>(
+      new minifi::azure::processors::PutAzureBlobStorage("PutAzureBlobStorage", utils::Identifier(), std::move(mock_blob_storage)));
+    char input_dir_mask[] = "/tmp/gt.XXXXXX";
+    auto input_dir = test_controller.createTempDirectory(input_dir_mask);
+    std::ofstream input_file_stream(input_dir + utils::file::FileUtils::get_separator() + "input_data.log");
+    input_file_stream << TEST_DATA;
+    input_file_stream.close();
+    get_file = plan->addProcessor("GetFile", "GetFile");
+    plan->setProperty(get_file, processors::GetFile::Directory.getName(), input_dir);
+    plan->setProperty(get_file, processors::GetFile::KeepSourceFile.getName(), "false");
+    update_attribute = plan->addProcessor("UpdateAttribute", "UpdateAttribute", { {"success", "d"} },  true);
+    plan->addProcessor(put_azure_blob_storage, "PutAzureBlobStorage", { {"success", "d"} }, true);
+    plan->addProcessor("LogAttribute", "LogAttribute", { {"success", "d"} }, true);
+  }
+
+  void setDefaultCredentials() {
+    plan->setProperty(update_attribute, "test.account_name", STORAGE_ACCOUNT_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", "${test.account_name}");
+    plan->setProperty(update_attribute, "test.account_key", STORAGE_ACCOUNT_KEY, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Key", "${test.account_key}");
+  }
+
+  virtual ~PutAzureBlobStorageTestsFixture() {
+    LogTestController::getInstance().reset();
+  }
+
+ protected:
+  TestController test_controller;
+  std::shared_ptr<TestPlan> plan;
+  MockBlobStorage* mock_blob_storage_ptr;
+  std::shared_ptr<core::Processor> put_azure_blob_storage;
+  std::shared_ptr<core::Processor> get_file;
+  std::shared_ptr<core::Processor> update_attribute;
+};
+
+TEST_CASE_METHOD(PutAzureBlobStorageTestsFixture, "Test required parameters", "[azureStorageParameters]") {
+  SECTION("Container name not set") {
+  }
+
+  SECTION("Blob name not set") {
+    plan->setProperty(update_attribute, "test.container", CONTAINER_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Container Name", "${test.container}");
+  }
+
+  REQUIRE_THROWS_AS(test_controller.runSession(plan, true), minifi::Exception&);
+}
+
+TEST_CASE_METHOD(PutAzureBlobStorageTestsFixture, "Test credentials settings", "[azureStorageCredentials]") {
+  plan->setProperty(update_attribute, "test.container", CONTAINER_NAME, true);
+  plan->setProperty(put_azure_blob_storage, "Container Name", "${test.container}");
+  plan->setProperty(update_attribute, "test.blob", BLOB_NAME, true);
+  plan->setProperty(put_azure_blob_storage, "Blob", "${test.blob}");
+
+  SECTION("No credentials are set") {
+    REQUIRE_THROWS_AS(test_controller.runSession(plan, true), minifi::Exception&);
+  }
+
+  SECTION("No account key or SAS is set") {
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", STORAGE_ACCOUNT_NAME);
+    REQUIRE_THROWS_AS(test_controller.runSession(plan, true), minifi::Exception&);
+  }
+
+  SECTION("Credentials set in Azure Storage Credentials Service") {
+    auto azure_storage_cred_service = plan->addController("AzureStorageCredentialsService", "AzureStorageCredentialsService");
+    plan->setProperty(azure_storage_cred_service, "Storage Account Name", STORAGE_ACCOUNT_NAME);
+    plan->setProperty(azure_storage_cred_service, "Storage Account Key", STORAGE_ACCOUNT_KEY);
+    plan->setProperty(put_azure_blob_storage, "Azure Storage Credentials Service", "AzureStorageCredentialsService");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == "AccountName=" + STORAGE_ACCOUNT_NAME + ";AccountKey=" + STORAGE_ACCOUNT_KEY);
+  }
+
+  SECTION("Overriding credentials set in Azure Storage Credentials Service with connection string") {
+    auto azure_storage_cred_service = plan->addController("AzureStorageCredentialsService", "AzureStorageCredentialsService");
+    plan->setProperty(azure_storage_cred_service, "Storage Account Name", STORAGE_ACCOUNT_NAME);
+    plan->setProperty(azure_storage_cred_service, "Storage Account Key", STORAGE_ACCOUNT_KEY);
+    plan->setProperty(azure_storage_cred_service, "Connection String", CONNECTION_STRING);
+    plan->setProperty(put_azure_blob_storage, "Azure Storage Credentials Service", "AzureStorageCredentialsService");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == CONNECTION_STRING);
+  }
+
+  SECTION("Account name and key set in properties") {
+    plan->setProperty(update_attribute, "test.account_name", STORAGE_ACCOUNT_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", "${test.account_name}");
+    plan->setProperty(update_attribute, "test.account_key", STORAGE_ACCOUNT_KEY, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Key", "${test.account_key}");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == "AccountName=" + STORAGE_ACCOUNT_NAME + ";AccountKey=" + STORAGE_ACCOUNT_KEY);
+  }
+
+  SECTION("Account name and SAS token set in properties") {
+    plan->setProperty(update_attribute, "test.account_name", STORAGE_ACCOUNT_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", "${test.account_name}");
+    plan->setProperty(update_attribute, "test.sas_token", SAS_TOKEN, true);
+    plan->setProperty(put_azure_blob_storage, "SAS Token", "${test.sas_token}");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == "AccountName=" + STORAGE_ACCOUNT_NAME + ";SharedAccessSignature=" + SAS_TOKEN);
+  }
+
+  SECTION("Account name and SAS token with question mark set in properties") {
+    plan->setProperty(update_attribute, "test.account_name", STORAGE_ACCOUNT_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", "${test.account_name}");
+    plan->setProperty(update_attribute, "test.sas_token", "?" + SAS_TOKEN, true);
+    plan->setProperty(put_azure_blob_storage, "SAS Token", "${test.sas_token}");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == "AccountName=" + STORAGE_ACCOUNT_NAME + ";SharedAccessSignature=" + SAS_TOKEN);
+  }
+
+  SECTION("Endpoint suffix overriden") {
+    plan->setProperty(update_attribute, "test.account_name", STORAGE_ACCOUNT_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", "${test.account_name}");
+    plan->setProperty(update_attribute, "test.account_key", STORAGE_ACCOUNT_KEY, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Key", "${test.account_key}");
+    plan->setProperty(update_attribute, "test.endpoint_suffix", ENDPOINT_SUFFIX, true);
+    plan->setProperty(put_azure_blob_storage, "Common Storage Account Endpoint Suffix", "${test.endpoint_suffix}");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == "AccountName=" + STORAGE_ACCOUNT_NAME + ";AccountKey=" + STORAGE_ACCOUNT_KEY + ";EndpointSuffix=" + ENDPOINT_SUFFIX);
+  }
+
+  SECTION("Use connection string") {
+    plan->setProperty(update_attribute, "test.connection_string", CONNECTION_STRING, true);
+    plan->setProperty(put_azure_blob_storage, "Connection String", "${test.connection_string}");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == CONNECTION_STRING);
+  }
+
+  SECTION("Overriding credentials with connection string") {
+    auto azure_storage_cred_service = plan->addController("AzureStorageCredentialsService", "AzureStorageCredentialsService");
+    plan->setProperty(azure_storage_cred_service, "Storage Account Name", STORAGE_ACCOUNT_NAME);
+    plan->setProperty(azure_storage_cred_service, "Storage Account Key", STORAGE_ACCOUNT_KEY);
+    plan->setProperty(put_azure_blob_storage, "Azure Storage Credentials Service", "AzureStorageCredentialsService");
+    plan->setProperty(update_attribute, "test.account_name", STORAGE_ACCOUNT_NAME, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Name", "${test.account_name}");
+    plan->setProperty(update_attribute, "test.account_key", STORAGE_ACCOUNT_KEY, true);
+    plan->setProperty(put_azure_blob_storage, "Storage Account Key", "${test.account_key}");
+    plan->setProperty(update_attribute, "test.connection_string", CONNECTION_STRING, true);
+    plan->setProperty(put_azure_blob_storage, "Connection String", "${test.connection_string}");
+    test_controller.runSession(plan, true);
+    REQUIRE(mock_blob_storage_ptr->getConnectionString() == CONNECTION_STRING);
+  }
+}
+
+TEST_CASE_METHOD(PutAzureBlobStorageTestsFixture, "Test Azure blob upload", "[azureBlobStorageUpload]") {
+  plan->setProperty(update_attribute, "test.container", CONTAINER_NAME, true);
+  plan->setProperty(put_azure_blob_storage, "Container Name", "${test.container}");
+  plan->setProperty(update_attribute, "test.blob", BLOB_NAME, true);
+  plan->setProperty(put_azure_blob_storage, "Blob", "${test.blob}");
+  setDefaultCredentials();
+  test_controller.runSession(plan, true);
+  REQUIRE(LogTestController::getInstance().contains("key:azure.container value:" + CONTAINER_NAME));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.blobname value:" + BLOB_NAME));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.primaryUri value:" + mock_blob_storage_ptr->PRIMARY_URI));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.etag value:" + mock_blob_storage_ptr->ETAG));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.length value:" + std::to_string(TEST_DATA.size())));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.timestamp value:" + mock_blob_storage_ptr->TEST_TIMESTAMP));
+  REQUIRE(mock_blob_storage_ptr->input_data == TEST_DATA);
+  REQUIRE(mock_blob_storage_ptr->getContainerCreated() == false);
+  REQUIRE(mock_blob_storage_ptr->getContainerName() == CONTAINER_NAME);
+}
+
+TEST_CASE_METHOD(PutAzureBlobStorageTestsFixture, "Test Azure blob upload with container creation", "[azureBlobStorageUpload]") {
+  plan->setProperty(update_attribute, "test.container", CONTAINER_NAME, true);
+  plan->setProperty(put_azure_blob_storage, "Container Name", "${test.container}");
+  plan->setProperty(update_attribute, "test.blob", BLOB_NAME, true);
+  plan->setProperty(put_azure_blob_storage, "Blob", "${test.blob}");
+  plan->setProperty(put_azure_blob_storage, "Create Container", "true");
+  setDefaultCredentials();
+  test_controller.runSession(plan, true);
+  REQUIRE(LogTestController::getInstance().contains("key:azure.container value:" + CONTAINER_NAME));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.blobname value:" + BLOB_NAME));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.primaryUri value:" + mock_blob_storage_ptr->PRIMARY_URI));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.etag value:" + mock_blob_storage_ptr->ETAG));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.length value:" + std::to_string(TEST_DATA.size())));
+  REQUIRE(LogTestController::getInstance().contains("key:azure.timestamp value:" + mock_blob_storage_ptr->TEST_TIMESTAMP));
+  REQUIRE(mock_blob_storage_ptr->input_data == TEST_DATA);
+  REQUIRE(mock_blob_storage_ptr->getContainerCreated() == true);
+  REQUIRE(mock_blob_storage_ptr->getContainerName() == CONTAINER_NAME);
+}
diff --git a/msi/LICENSE.txt b/msi/LICENSE.txt
index 0f30eab..e48765f 100644
--- a/msi/LICENSE.txt
+++ b/msi/LICENSE.txt
@@ -214,14 +214,14 @@ notices and license terms. Your use of the source code for the these
 subcomponents is subject to the terms and conditions of the following
 licenses.
 
-This product bundles 'cpplint.py' which is  available under a 3-Clause BSD License. 
+This product bundles 'cpplint.py' which is  available under a 3-Clause BSD License.
 
 	 Copyright (c) 2009 Google Inc. All rights reserved.
-	
+
 	 Redistribution and use in source and binary forms, with or without
 	 modification, are permitted provided that the following conditions are
 	 met:
-	
+
 	    * Redistributions of source code must retain the above copyright
 	 notice, this list of conditions and the following disclaimer.
 	    * Redistributions in binary form must reproduce the above
@@ -231,7 +231,7 @@ This product bundles 'cpplint.py' which is  available under a 3-Clause BSD Licen
 	    * Neither the name of Google Inc. nor the names of its
 	 contributors may be used to endorse or promote products derived from
 	 this software without specific prior written permission.
-	
+
 	 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 	 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 	 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -251,11 +251,11 @@ This product bundles 'spdlog' which is available under an MIT license.
   Copyright (c) 2019 ZVYAGIN.Alexander@.gmail.com
 
   Copyright(c) 2015 Ruslan Baratov.
-  
+
   Copyright (c) 2015-2019 Gabi Melman.
 
   Copyright (c) 2016-2019 spdlog contributors.
-	
+
   Permission is hereby granted, free of charge, to any person obtaining a copy
   of this software and associated documentation files (the "Software"), to deal
   in the Software without restriction, including without limitation the rights
@@ -277,7 +277,7 @@ This product bundles 'spdlog' which is available under an MIT license.
   -- NOTE: Third party dependency used by this software --
   This software depends on the fmt lib (MIT License),
   and users must comply to its license: https://github.com/fmtlib/fmt/blob/master/LICENSE.rst
-		
+
 This product bundles 'fmt' which is available under the following license.
 
   Copyright (c) 2012 - present, Victor Zverovich
@@ -653,7 +653,7 @@ For more information, please refer to <http://unlicense.org>
 This projects includes libarchive bundle (https://www.libarchive.org)
 which is available under a BSD License by Tim Kientzle and others
 
-Copyright (c) 2003-2009 Tim Kientzle and other authors 
+Copyright (c) 2003-2009 Tim Kientzle and other authors
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -685,10 +685,10 @@ above.
 The following source files are in the public domain and have no copyright:
 libarchive/archive_getdate.c
 
-This libarchive includes below files 
+This libarchive includes below files
 libarchive/archive_entry.c
 libarchive/archive_read_support_filter_compress.c
-libarchive/archive_write_add_filter_compress.c 
+libarchive/archive_write_add_filter_compress.c
 which under a 3-clause UC Regents copyright as below
 /*-
  * Copyright (c) 1993
@@ -976,7 +976,7 @@ For sys/queue.h:
  *
  *  @(#)queue.h 8.5 (Berkeley) 8/20/94
  * $FreeBSD$
- 
+
 This product bundles regexp.c and regexp.h within librdkafka offered under a public domain license as below.
 
 LICENSE.regexp
@@ -1056,7 +1056,7 @@ freely, subject to the following restrictions:
     3. This notice may not be removed or altered from any source
     distribution.
 
-This product bundles wingetopt.c and wingetopt.h within librdkafka under the licenses below. 
+This product bundles wingetopt.c and wingetopt.h within librdkafka under the licenses below.
 
 For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
 
@@ -1109,7 +1109,7 @@ LICENSE.wingetopt
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
- 
+
 This product bundles LightPcapNg within PcapPlusPlus under the MIT license below.
 
 Copyright (c) 2016 Radu Velea, radu.velea@gmail.com
@@ -1193,7 +1193,7 @@ Redistribution and use in source and binary forms, with or without modification,
 
     Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
     Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
+    Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFI [...]
 
@@ -1238,7 +1238,7 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  * are met:
  *
  * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer. 
+ *    notice, this list of conditions and the following disclaimer.
  *
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in
@@ -1293,21 +1293,21 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  * This package is an SSL implementation written
  * by Eric Young (eay@cryptsoft.com).
  * The implementation was written so as to conform with Netscapes SSL.
- * 
+ *
  * This library is free for commercial and non-commercial use as long as
  * the following conditions are aheared to.  The following conditions
  * apply to all code found in this distribution, be it the RC4, RSA,
  * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
  * included with this distribution is covered by the same copyright terms
  * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- * 
+ *
  * Copyright remains Eric Young's, and as such any Copyright notices in
  * the code are not to be removed.
  * If this package is used in a product, Eric Young should be given attribution
  * as the author of the parts of the library used.
  * This can be in the form of a textual message at program startup or
  * in documentation (online or textual) provided with the package.
- * 
+ *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
@@ -1322,10 +1322,10 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  *     Eric Young (eay@cryptsoft.com)"
  *    The word 'cryptographic' can be left out if the rouines from the library
  *    being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from 
+ * 4. If you include any Windows specific code (or a derivative thereof) from
  *    the apps directory (application code) you must include an acknowledgement:
  *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- * 
+ *
  * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -1337,7 +1337,7 @@ The OpenSSL toolkit stays under a dual license, i.e. both the conditions of
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
- * 
+ *
  * The licence and distribution terms for any publically available version or
  * derivative of this code cannot be changed.  i.e. this code cannot simply be
  * copied and put under another distribution licence
@@ -1462,8 +1462,8 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
 This product bundles RapidJSON:
-Tencent is pleased to support the open source community by making RapidJSON available. 
- 
+Tencent is pleased to support the open source community by making RapidJSON available.
+
 Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip.  All rights reserved.
 
 If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License.
@@ -1474,24 +1474,24 @@ This product bundles RTIMULib2, which is offered under the license, below:
 
 This file is part of RTIMULib
 Copyright (c) 2014-2015, richards-tech, LLC
-Permission is hereby granted, free of charge, 
-to any person obtaining a copy of 
+Permission is hereby granted, free of charge,
+to any person obtaining a copy of
 this software and associated documentation files
 (the "Software"), to deal in the Software without
-restriction, including without limitation the rights 
-to use, copy, modify, merge, publish, distribute, 
-sublicense, and/or sell copies of the Software, and 
-to permit persons to whom the Software is furnished 
+restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and
+to permit persons to whom the Software is furnished
 to do so, subject to the following conditions:
 
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 
-THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 DEALINGS IN THE SOFTWARE.
 
 
@@ -1500,13 +1500,13 @@ Other dependencies and licenses:
 Open Source Software Licensed Under the BSD License:
 --------------------------------------------------------------------
 
-The msinttypes r29 
-Copyright (c) 2006-2013 Alexander Chemeris 
+The msinttypes r29
+Copyright (c) 2006-2013 Alexander Chemeris
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
 
-* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
 * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
 * Neither the name of  copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
 
@@ -1573,16 +1573,16 @@ This product bundles 'bsdiff' which is available under a "2-clause BSD" license.
 	 Copyright 2003-2005 Colin Percival
 	 Copyright 2012 Matthew Endsley
 	 All rights reserved
-	
+
 	 Redistribution and use in source and binary forms, with or without
-	 modification, are permitted providing that the following conditions 
+	 modification, are permitted providing that the following conditions
 	 are met:
 	 1. Redistributions of source code must retain the above copyright
 	    notice, this list of conditions and the following disclaimer.
 	 2. Redistributions in binary form must reproduce the above copyright
 	    notice, this list of conditions and the following disclaimer in the
 	    documentation and/or other materials provided with the distribution.
-	
+
 	 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 	 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 	 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -2958,3 +2958,53 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 --------------------------------------------------------------------------
 
 This product bundles the IANA timezone database which is in the public domain.
+
+--------------------------------------------------------------------------
+
+This project bundles 'azure-sdk-for-cpp', which is available under an MIT License:
+MIT License
+
+Copyright (c) Microsoft Corporation.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE
+
+--------------------------------------------------------------------------
+
+This product bundles 'JSON for Modern C++' which is available under a MIT license:
+MIT License
+
+Copyright (c) 2013-2021 Niels Lohmann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/thirdparty/azure-sdk-cpp-for-cpp/azure-sdk-for-cpp-old-compiler.patch b/thirdparty/azure-sdk-cpp-for-cpp/azure-sdk-for-cpp-old-compiler.patch
new file mode 100644
index 0000000..c54af17
--- /dev/null
+++ b/thirdparty/azure-sdk-cpp-for-cpp/azure-sdk-for-cpp-old-compiler.patch
@@ -0,0 +1,42 @@
+diff -rupN orig/sdk/core/azure-core/inc/azure/core/context.hpp patched/sdk/core/azure-core/inc/azure/core/context.hpp
+--- orig/sdk/core/azure-core/inc/azure/core/context.hpp	2021-02-03 09:06:18.580502882 +0000
++++ patched/sdk/core/azure-core/inc/azure/core/context.hpp	2021-02-03 09:07:11.302899054 +0000
+@@ -255,7 +255,7 @@ namespace Azure { namespace Core {
+     struct ContextSharedState
+     {
+       std::shared_ptr<ContextSharedState> Parent;
+-      std::atomic_int64_t CancelAtMsecSinceEpoch;
++      std::atomic<int64_t> CancelAtMsecSinceEpoch;
+       std::string Key;
+       ContextValue Value;
+
+diff -rupN orig/sdk/core/azure-core/src/http/policy.cpp patched/sdk/core/azure-core/src/http/policy.cpp
+--- orig/sdk/core/azure-core/src/http/policy.cpp	2021-02-03 09:10:44.454678199 +0000
++++ patched/sdk/core/azure-core/src/http/policy.cpp	2021-02-03 09:11:15.535238932 +0000
+@@ -10,10 +10,10 @@ using namespace Azure::Core::Http;
+ #ifndef _MSC_VER
+ // Non-MSVC compilers do require allocation of statics, even if they are const constexpr.
+ // MSVC, on the other hand, has problem if you "redefine" static constexprs.
+-Azure::Core::Logging::LogClassification const Azure::Core::Http::LogClassification::Request;
+-Azure::Core::Logging::LogClassification const Azure::Core::Http::LogClassification::Response;
+-Azure::Core::Logging::LogClassification const Azure::Core::Http::LogClassification::Retry;
+-Azure::Core::Logging::LogClassification const
++constexpr Azure::Core::Logging::LogClassification const Azure::Core::Http::LogClassification::Request;
++constexpr Azure::Core::Logging::LogClassification const Azure::Core::Http::LogClassification::Response;
++constexpr Azure::Core::Logging::LogClassification const Azure::Core::Http::LogClassification::Retry;
++constexpr Azure::Core::Logging::LogClassification const
+     Azure::Core::Http::LogClassification::HttpTransportAdapter;
+ #endif
+
+diff -rupN orig/sdk/keyvault/azure-security-keyvault-keys/src/key_client.cpp patched/sdk/keyvault/azure-security-keyvault-keys/src/key_client.cpp
+--- orig/sdk/keyvault/azure-security-keyvault-keys/src/key_client.cpp	2021-02-10 10:35:03.305252930 +0100
++++ patched/sdk/keyvault/azure-security-keyvault-keys/src/key_client.cpp	2021-02-10 16:11:25.139169400 +0100
+@@ -16,7 +16,7 @@ using namespace Azure::Core::Http;
+
+ KeyClient::KeyClient(
+     std::string const& vaultUrl,
+-    std::shared_ptr<Core::TokenCredential const> credential,
++    std::shared_ptr<Azure::Core::TokenCredential const> credential,
+     KeyClientOptions options)
+ {
+   auto apiVersion = options.GetVersionString();
diff --git a/win_build_vs.bat b/win_build_vs.bat
index 61ab032..cfef2c0 100755
--- a/win_build_vs.bat
+++ b/win_build_vs.bat
@@ -29,6 +29,7 @@ set build_jni=OFF
 set build_SQL=OFF
 set build_AWS=OFF
 set build_SFTP=OFF
+set build_azure=OFF
 set test_custom_wel_provider=OFF
 set generator="Visual Studio 15 2017"
 set cpack=OFF
@@ -49,6 +50,7 @@ for %%x in (%*) do (
     if [%%~x] EQU [/A]           set build_AWS=ON
     if [%%~x] EQU [/SFTP]        set build_SFTP=ON
     if [%%~x] EQU [/M]           set installer_merge_modules=ON
+    if [%%~x] EQU [/Z]           set build_azure=ON
     if [%%~x] EQU [/2019]        set generator="Visual Studio 16 2019"
     if [%%~x] EQU [/64]          set build_platform=x64
     if [%%~x] EQU [/D]           set cmake_build_type=RelWithDebInfo
@@ -60,7 +62,7 @@ for %%x in (%*) do (
 mkdir %builddir%
 pushd %builddir%\
 
-cmake -G %generator% -A %build_platform% -DINSTALLER_MERGE_MODULES=%installer_merge_modules% -DTEST_CUSTOM_WEL_PROVIDER=%test_custom_wel_provider% -DENABLE_SQL=%build_SQL% -DCMAKE_BUILD_TYPE_INIT=%cmake_build_type% -DCMAKE_BUILD_TYPE=%cmake_build_type% -DWIN32=WIN32 -DENABLE_LIBRDKAFKA=%build_kafka% -DENABLE_JNI=%build_jni% -DOPENSSL_OFF=OFF -DENABLE_COAP=%build_coap% -DENABLE_AWS=%build_AWS% -DENABLE_SFTP=%build_SFTP%  -DUSE_SHARED_LIBS=OFF -DDISABLE_CONTROLLER=ON  -DBUILD_ROCKSDB=ON -D [...]
+cmake -G %generator% -A %build_platform% -DINSTALLER_MERGE_MODULES=%installer_merge_modules% -DTEST_CUSTOM_WEL_PROVIDER=%test_custom_wel_provider% -DENABLE_SQL=%build_SQL% -DCMAKE_BUILD_TYPE_INIT=%cmake_build_type% -DCMAKE_BUILD_TYPE=%cmake_build_type% -DWIN32=WIN32 -DENABLE_LIBRDKAFKA=%build_kafka% -DENABLE_JNI=%build_jni% -DOPENSSL_OFF=OFF -DENABLE_COAP=%build_coap% -DENABLE_AWS=%build_AWS% -DENABLE_AZURE=%build_azure% -DENABLE_SFTP=%build_SFTP%  -DUSE_SHARED_LIBS=OFF -DDISABLE_CONTROL [...]
 IF %ERRORLEVEL% NEQ 0 EXIT /b %ERRORLEVEL%
 if [%cpack%] EQU [ON] (
     cpack -C %cmake_build_type%