You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@airflow.apache.org by po...@apache.org on 2021/06/22 19:24:48 UTC

[airflow] 10/47: Make scripts/ci/libraries Google Shell Guide Compliant (#15973)

This is an automated email from the ASF dual-hosted git repository.

potiuk pushed a commit to branch v2-1-test
in repository https://gitbox.apache.org/repos/asf/airflow.git

commit 612504dce712f17c691da920db2def39f05c2a60
Author: Katsunori Kanda <po...@gmail.com>
AuthorDate: Tue May 25 05:25:12 2021 +0900

    Make scripts/ci/libraries Google Shell Guide Compliant (#15973)
    
    * Make scripts/ci/libraries Google Shell Guide Compliant
    
    Part of #10576
    
    (cherry picked from commit fcde2123d34766f204c92f58d0f1f3d2d72cd942)
---
 scripts/ci/libraries/_build_images.sh            | 59 ++++++++--------
 scripts/ci/libraries/_kind.sh                    | 89 +++++++++++++-----------
 scripts/ci/libraries/_md5sum.sh                  | 72 +++++++++----------
 scripts/ci/libraries/_parameters.sh              | 27 +++----
 scripts/ci/libraries/_push_pull_remove_images.sh | 80 ++++++++++-----------
 scripts/ci/libraries/_pylint.sh                  | 10 +--
 scripts/ci/libraries/_sanity_checks.sh           | 14 ++--
 scripts/ci/libraries/_spinner.sh                 | 32 +++++----
 scripts/ci/libraries/_start_end.sh               |  8 ++-
 9 files changed, 206 insertions(+), 185 deletions(-)

diff --git a/scripts/ci/libraries/_build_images.sh b/scripts/ci/libraries/_build_images.sh
index 6f7f8a1..775a6bf 100644
--- a/scripts/ci/libraries/_build_images.sh
+++ b/scripts/ci/libraries/_build_images.sh
@@ -558,10 +558,12 @@ function build_images::rebuild_ci_image_if_needed() {
             build_images::confirm_image_rebuild
         fi
         if [[ ${SKIP_REBUILD} != "true" ]]; then
-            SYSTEM=$(uname -s)
-            if [[ ${SYSTEM} != "Darwin" ]]; then
-                ROOT_FILES_COUNT=$(find "airflow" "tests" -user root | wc -l | xargs)
-                if [[ ${ROOT_FILES_COUNT} != "0" ]]; then
+            local system
+            system=$(uname -s)
+            if [[ ${system} != "Darwin" ]]; then
+                local root_files_count
+                root_files_count=$(find "airflow" "tests" -user root | wc -l | xargs)
+                if [[ ${root_files_count} != "0" ]]; then
                     ./scripts/ci/tools/ci_fix_ownership.sh
                 fi
             fi
@@ -649,14 +651,15 @@ function get_github_container_registry_image_prefix() {
 # it also passes the right Build args depending on the configuration of the build
 # selected by Breeze flags or environment variables.
 function build_images::build_ci_image() {
+    local spin_pid
     build_images::print_build_info
     if [[ -n ${DETECTED_TERMINAL=} ]]; then
         echo -n "Preparing ${AIRFLOW_CI_IMAGE}.
         " >"${DETECTED_TERMINAL}"
         spinner::spin "${OUTPUT_LOG}" &
-        SPIN_PID=$!
+        spin_pid=$!
         # shellcheck disable=SC2064,SC2016
-        traps::add_trap '$(kill '${SPIN_PID}' || true)' EXIT HUP INT TERM
+        traps::add_trap '$(kill '${spin_pid}' || true)' EXIT HUP INT TERM
     fi
     push_pull_remove_images::pull_ci_images_if_needed
     if [[ "${DOCKER_CACHE}" == "disabled" ]]; then
@@ -686,18 +689,18 @@ function build_images::build_ci_image() {
         )
     fi
 
-    if [[ -n ${SPIN_PID=} ]]; then
-        kill -HUP "${SPIN_PID}" || true
-        wait "${SPIN_PID}" || true
+    if [[ -n ${spin_pid=} ]]; then
+        kill -HUP "${spin_pid}" || true
+        wait "${spin_pid}" || true
         echo >"${DETECTED_TERMINAL}"
     fi
     if [[ -n ${DETECTED_TERMINAL=} ]]; then
         echo -n "Preparing ${AIRFLOW_CI_IMAGE}.
         " >"${DETECTED_TERMINAL}"
         spinner::spin "${OUTPUT_LOG}" &
-        SPIN_PID=$!
+        spin_pid=$!
         # shellcheck disable=SC2064,SC2016
-        traps::add_trap '$(kill '${SPIN_PID}' || true)' EXIT HUP INT TERM
+        traps::add_trap '$(kill '${spin_pid}' || true)' EXIT HUP INT TERM
     fi
     if [[ -n ${DETECTED_TERMINAL=} ]]; then
         echo -n "
@@ -760,9 +763,9 @@ Docker building ${AIRFLOW_CI_IMAGE}.
         echo "Tagging additionally image ${AIRFLOW_CI_IMAGE} with ${IMAGE_TAG}"
         docker_v tag "${AIRFLOW_CI_IMAGE}" "${IMAGE_TAG}"
     fi
-    if [[ -n ${SPIN_PID=} ]]; then
-        kill -HUP "${SPIN_PID}" || true
-        wait "${SPIN_PID}" || true
+    if [[ -n ${spin_pid=} ]]; then
+        kill -HUP "${spin_pid}" || true
+        wait "${spin_pid}" || true
         echo >"${DETECTED_TERMINAL}"
     fi
 }
@@ -959,23 +962,23 @@ function build_images::build_prod_images() {
 #  $3, $4, ... - target tags to tag the image with
 function build_images::wait_for_image_tag() {
 
-    IMAGE_NAME="${1}"
-    IMAGE_SUFFIX=${2}
+    local image_name="${1}"
+    local image_suffix="${2}"
     shift 2
 
-    IMAGE_TO_WAIT_FOR="${IMAGE_NAME}${IMAGE_SUFFIX}"
-    start_end::group_start "Wait for image tag ${IMAGE_TO_WAIT_FOR}"
+    local image_to_wait_for="${image_name}${image_suffix}"
+    start_end::group_start "Wait for image tag ${image_to_wait_for}"
     while true; do
         set +e
-        echo "${COLOR_BLUE}Docker pull ${IMAGE_TO_WAIT_FOR} ${COLOR_RESET}" >"${OUTPUT_LOG}"
-        docker_v pull "${IMAGE_TO_WAIT_FOR}" >>"${OUTPUT_LOG}" 2>&1
+        echo "${COLOR_BLUE}Docker pull ${image_to_wait_for} ${COLOR_RESET}" >"${OUTPUT_LOG}"
+        docker_v pull "${image_to_wait_for}" >>"${OUTPUT_LOG}" 2>&1
         set -e
         local image_hash
-        echo "${COLOR_BLUE} Docker images -q ${IMAGE_TO_WAIT_FOR}${COLOR_RESET}" >>"${OUTPUT_LOG}"
-        image_hash="$(docker images -q "${IMAGE_TO_WAIT_FOR}" 2>>"${OUTPUT_LOG}" || true)"
+        echo "${COLOR_BLUE} Docker images -q ${image_to_wait_for}${COLOR_RESET}" >>"${OUTPUT_LOG}"
+        image_hash="$(docker images -q "${image_to_wait_for}" 2>>"${OUTPUT_LOG}" || true)"
         if [[ -z "${image_hash}" ]]; then
             echo
-            echo "The image ${IMAGE_TO_WAIT_FOR} is not yet available. No local hash for the image. Waiting."
+            echo "The image ${image_to_wait_for} is not yet available. No local hash for the image. Waiting."
             echo
             echo "Last log:"
             cat "${OUTPUT_LOG}" || true
@@ -983,17 +986,17 @@ function build_images::wait_for_image_tag() {
             sleep 10
         else
             echo
-            echo "The image ${IMAGE_TO_WAIT_FOR} with '${IMAGE_NAME}' tag"
+            echo "The image ${image_to_wait_for} with '${image_name}' tag"
             echo
             echo
-            echo "Tagging ${IMAGE_TO_WAIT_FOR} as ${IMAGE_NAME}."
+            echo "Tagging ${image_to_wait_for} as ${image_name}."
             echo
-            docker_v tag "${IMAGE_TO_WAIT_FOR}" "${IMAGE_NAME}"
+            docker_v tag "${image_to_wait_for}" "${image_name}"
             for TARGET_TAG in "${@}"; do
                 echo
-                echo "Tagging ${IMAGE_TO_WAIT_FOR} as ${TARGET_TAG}."
+                echo "Tagging ${image_to_wait_for} as ${TARGET_TAG}."
                 echo
-                docker_v tag "${IMAGE_TO_WAIT_FOR}" "${TARGET_TAG}"
+                docker_v tag "${image_to_wait_for}" "${TARGET_TAG}"
             done
             break
         fi
diff --git a/scripts/ci/libraries/_kind.sh b/scripts/ci/libraries/_kind.sh
index cc42bf7..085dfac 100644
--- a/scripts/ci/libraries/_kind.sh
+++ b/scripts/ci/libraries/_kind.sh
@@ -19,67 +19,74 @@
 function kind::get_kind_cluster_name() {
     # Name of the KinD cluster to connect to
     export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:="airflow-python-${PYTHON_MAJOR_MINOR_VERSION}-${KUBERNETES_VERSION}"}
+    readonly KIND_CLUSTER_NAME
     # Name of the KinD cluster to connect to when referred to via kubectl
     export KUBECTL_CLUSTER_NAME=kind-${KIND_CLUSTER_NAME}
+    readonly KUBECTL_CLUSTER_NAME
     export KUBECONFIG="${BUILD_CACHE_DIR}/${KIND_CLUSTER_NAME}/.kube/config"
+    readonly KUBECONFIG
     mkdir -pv "${BUILD_CACHE_DIR}/${KIND_CLUSTER_NAME}/.kube/"
     touch "${KUBECONFIG}"
 }
 
 function kind::dump_kind_logs() {
     verbosity::print_info "Dumping logs from KinD"
-    local DUMP_DIR_NAME DUMP_DIR
-    DUMP_DIR_NAME=kind_logs_$(date "+%Y-%m-%d")_${CI_BUILD_ID}_${CI_JOB_ID}
-    DUMP_DIR="/tmp/${DUMP_DIR_NAME}"
-    kind --name "${KIND_CLUSTER_NAME}" export logs "${DUMP_DIR}"
+    local dump_dir_name dump_dir
+    dump_dir_name=kind_logs_$(date "+%Y-%m-%d")_${CI_BUILD_ID}_${CI_JOB_ID}
+    dump_dir="/tmp/${dump_dir_name}"
+    kind --name "${KIND_CLUSTER_NAME}" export logs "${dump_dir}"
 }
 
 function kind::make_sure_kubernetes_tools_are_installed() {
-    SYSTEM=$(uname -s | tr '[:upper:]' '[:lower:]')
+    local system
+    system=$(uname -s | tr '[:upper:]' '[:lower:]')
 
-    KIND_URL="https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${SYSTEM}-amd64"
+    local kind_url="https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${system}-amd64"
     mkdir -pv "${BUILD_CACHE_DIR}/kubernetes-bin/${KUBERNETES_VERSION}"
     if [[ -f "${KIND_BINARY_PATH}" ]]; then
-        DOWNLOADED_KIND_VERSION=v"$(${KIND_BINARY_PATH} --version | awk '{ print $3 }')"
-        echo "Currently downloaded kind version = ${DOWNLOADED_KIND_VERSION}"
+        local downloaded_kind_version
+        downloaded_kind_version=v"$(${KIND_BINARY_PATH} --version | awk '{ print $3 }')"
+        echo "Currently downloaded kind version = ${downloaded_kind_version}"
     fi
-    if [[ ! -f "${KIND_BINARY_PATH}" || ${DOWNLOADED_KIND_VERSION} != "${KIND_VERSION}" ]]; then
+    if [[ ! -f "${KIND_BINARY_PATH}" || ${downloaded_kind_version} != "${KIND_VERSION}" ]]; then
         echo
         echo "Downloading Kind version ${KIND_VERSION}"
         repeats::run_with_retry 4 \
-            "curl --connect-timeout 60  --max-time 180 --fail --location '${KIND_URL}' --output '${KIND_BINARY_PATH}'"
+            "curl --connect-timeout 60  --max-time 180 --fail --location '${kind_url}' --output '${KIND_BINARY_PATH}'"
         chmod a+x "${KIND_BINARY_PATH}"
     else
         echo "Kind version ok"
         echo
     fi
 
-    KUBECTL_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/${SYSTEM}/amd64/kubectl"
+    local kubectl_url="https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/${system}/amd64/kubectl"
     if [[ -f "${KUBECTL_BINARY_PATH}" ]]; then
-        DOWNLOADED_KUBECTL_VERSION="$(${KUBECTL_BINARY_PATH} version --client=true --short | awk '{ print $3 }')"
-        echo "Currently downloaded kubectl version = ${DOWNLOADED_KUBECTL_VERSION}"
+        local downloaded_kubectl_version
+        downloaded_kubectl_version="$(${KUBECTL_BINARY_PATH} version --client=true --short | awk '{ print $3 }')"
+        echo "Currently downloaded kubectl version = ${downloaded_kubectl_version}"
     fi
-    if [[ ! -f "${KUBECTL_BINARY_PATH}" || ${DOWNLOADED_KUBECTL_VERSION} != "${KUBECTL_VERSION}" ]]; then
+    if [[ ! -f "${KUBECTL_BINARY_PATH}" || ${downloaded_kubectl_version} != "${KUBECTL_VERSION}" ]]; then
         echo
         echo "Downloading Kubectl version ${KUBECTL_VERSION}"
         repeats::run_with_retry 4 \
-            "curl --connect-timeout 60 --max-time 180 --fail --location '${KUBECTL_URL}' --output '${KUBECTL_BINARY_PATH}'"
+            "curl --connect-timeout 60 --max-time 180 --fail --location '${kubectl_url}' --output '${KUBECTL_BINARY_PATH}'"
         chmod a+x "${KUBECTL_BINARY_PATH}"
     else
         echo "Kubectl version ok"
         echo
     fi
 
-    HELM_URL="https://get.helm.sh/helm-${HELM_VERSION}-${SYSTEM}-amd64.tar.gz"
+    local helm_url="https://get.helm.sh/helm-${HELM_VERSION}-${system}-amd64.tar.gz"
     if [[ -f "${HELM_BINARY_PATH}" ]]; then
-        DOWNLOADED_HELM_VERSION="$(${HELM_BINARY_PATH} version --template '{{.Version}}')"
-        echo "Currently downloaded helm version = ${DOWNLOADED_HELM_VERSION}"
+        local downloaded_helm_version
+        downloaded_helm_version="$(${HELM_BINARY_PATH} version --template '{{.Version}}')"
+        echo "Currently downloaded helm version = ${downloaded_helm_version}"
     fi
-    if [[ ! -f "${HELM_BINARY_PATH}" || ${DOWNLOADED_HELM_VERSION} != "${HELM_VERSION}" ]]; then
+    if [[ ! -f "${HELM_BINARY_PATH}" || ${downloaded_helm_version} != "${HELM_VERSION}" ]]; then
         echo
         echo "Downloading Helm version ${HELM_VERSION}"
         repeats::run_with_retry 4 \
-            "curl --connect-timeout 60  --max-time 180 --location '${HELM_URL}' | tar -xvz -O '${SYSTEM}-amd64/helm' >'${HELM_BINARY_PATH}'"
+            "curl --connect-timeout 60  --max-time 180 --location '${helm_url}' | tar -xvz -O '${system}-amd64/helm' >'${HELM_BINARY_PATH}'"
         chmod a+x "${HELM_BINARY_PATH}"
     else
         echo "Helm version ok"
@@ -115,6 +122,7 @@ function kind::set_current_context() {
 
 function kind::perform_kind_cluster_operation() {
     ALLOWED_KIND_OPERATIONS="[ start restart stop deploy test shell recreate k9s]"
+    readonly ALLOWED_KIND_OPERATIONS
     set +u
     if [[ -z "${1=}" ]]; then
         echo
@@ -124,19 +132,16 @@ function kind::perform_kind_cluster_operation() {
     fi
 
     set -u
-    OPERATION="${1}"
-    ALL_CLUSTERS=$(kind get clusters || true)
+    local operation="${1}"
+    local all_clusters
+    all_clusters=$(kind get clusters || true)
 
     echo
     echo "Kubernetes mode: ${KUBERNETES_MODE}"
     echo
 
-    echo
-    echo "Executor: ${EXECUTOR}"
-    echo
-
-    if [[ ${OPERATION} == "status" ]]; then
-        if [[ ${ALL_CLUSTERS} == *"${KIND_CLUSTER_NAME}"* ]]; then
+    if [[ ${operation} == "status" ]]; then
+        if [[ ${all_clusters} == *"${KIND_CLUSTER_NAME}"* ]]; then
             echo
             echo "Cluster name: ${KIND_CLUSTER_NAME}"
             echo
@@ -150,26 +155,26 @@ function kind::perform_kind_cluster_operation() {
             exit
         fi
     fi
-    if [[ ${ALL_CLUSTERS} == *"${KIND_CLUSTER_NAME}"* ]]; then
-        if [[ ${OPERATION} == "start" ]]; then
+    if [[ ${all_clusters} == *"${KIND_CLUSTER_NAME}"* ]]; then
+        if [[ ${operation} == "start" ]]; then
             echo
             echo "Cluster ${KIND_CLUSTER_NAME} is already created"
             echo "Reusing previously created cluster"
             echo
-        elif [[ ${OPERATION} == "restart" ]]; then
+        elif [[ ${operation} == "restart" ]]; then
             echo
             echo "Recreating cluster"
             echo
             kind::delete_cluster
             kind::create_cluster
             kind::set_current_context
-        elif [[ ${OPERATION} == "stop" ]]; then
+        elif [[ ${operation} == "stop" ]]; then
             echo
             echo "Deleting cluster"
             echo
             kind::delete_cluster
             exit
-        elif [[ ${OPERATION} == "deploy" ]]; then
+        elif [[ ${operation} == "deploy" ]]; then
             echo
             echo "Deploying Airflow to KinD"
             echo
@@ -178,19 +183,19 @@ function kind::perform_kind_cluster_operation() {
             kind::deploy_airflow_with_helm
             kind::deploy_test_kubernetes_resources
             kind::wait_for_webserver_healthy
-        elif [[ ${OPERATION} == "test" ]]; then
+        elif [[ ${operation} == "test" ]]; then
             echo
             echo "Testing with KinD"
             echo
             kind::set_current_context
             "${AIRFLOW_SOURCES}/scripts/ci/kubernetes/ci_run_kubernetes_tests.sh"
-        elif [[ ${OPERATION} == "shell" ]]; then
+        elif [[ ${operation} == "shell" ]]; then
             echo
             echo "Entering an interactive shell for kubernetes testing"
             echo
             kind::set_current_context
             "${AIRFLOW_SOURCES}/scripts/ci/kubernetes/ci_run_kubernetes_tests.sh" "-i"
-        elif [[ ${OPERATION} == "k9s" ]]; then
+        elif [[ ${operation} == "k9s" ]]; then
             echo
             echo "Starting k9s CLI"
             echo
@@ -204,30 +209,30 @@ function kind::perform_kind_cluster_operation() {
                 -v "${KUBECONFIG}:/root/.kube/config" quay.io/derailed/k9s
         else
             echo
-            echo  "${COLOR_RED}ERROR: Wrong cluster operation: ${OPERATION}. Should be one of: ${ALLOWED_KIND_OPERATIONS}  ${COLOR_RESET}"
+            echo  "${COLOR_RED}ERROR: Wrong cluster operation: ${operation}. Should be one of: ${ALLOWED_KIND_OPERATIONS}  ${COLOR_RESET}"
             echo
             exit 1
         fi
     else
-        if [[ ${OPERATION} == "start" ]]; then
+        if [[ ${operation} == "start" ]]; then
             echo
             echo "Creating cluster"
             echo
             kind::create_cluster
-        elif [[ ${OPERATION} == "recreate" ]]; then
+        elif [[ ${operation} == "recreate" ]]; then
             echo
             echo "Cluster ${KIND_CLUSTER_NAME} does not exist. Creating rather than recreating"
             echo "Creating cluster"
             echo
             kind::create_cluster
-        elif [[ ${OPERATION} == "stop" || ${OPERATION} == "deploy" || ${OPERATION} == "test" || ${OPERATION} == "shell" ]]; then
+        elif [[ ${operation} == "stop" || ${operation} == "deploy" || ${operation} == "test" || ${operation} == "shell" ]]; then
             echo
-            echo  "${COLOR_RED}ERROR: Cluster ${KIND_CLUSTER_NAME} does not exist. It should exist for ${OPERATION} operation  ${COLOR_RESET}"
+            echo  "${COLOR_RED}ERROR: Cluster ${KIND_CLUSTER_NAME} does not exist. It should exist for ${operation} operation  ${COLOR_RESET}"
             echo
             exit 1
         else
             echo
-            echo  "${COLOR_RED}ERROR: Wrong cluster operation: ${OPERATION}. Should be one of ${ALLOWED_KIND_OPERATIONS}  ${COLOR_RESET}"
+            echo  "${COLOR_RED}ERROR: Wrong cluster operation: ${operation}. Should be one of ${ALLOWED_KIND_OPERATIONS}  ${COLOR_RESET}"
             echo
             exit 1
         fi
diff --git a/scripts/ci/libraries/_md5sum.sh b/scripts/ci/libraries/_md5sum.sh
index 4d4a1e7..de9d376 100644
--- a/scripts/ci/libraries/_md5sum.sh
+++ b/scripts/ci/libraries/_md5sum.sh
@@ -22,35 +22,35 @@
 # If you want to rebuild everything from the scratch
 #
 function md5sum::calculate_file_md5sum {
-    local FILE="${1}"
-    local MD5SUM
-    local MD5SUM_CACHE_DIR="${BUILD_CACHE_DIR}/${BRANCH_NAME}/${PYTHON_MAJOR_MINOR_VERSION}/${THE_IMAGE_TYPE}"
-    mkdir -pv "${MD5SUM_CACHE_DIR}"
-    MD5SUM=$(md5sum "${FILE}")
-    local MD5SUM_FILE
-    MD5SUM_FILE="${MD5SUM_CACHE_DIR}"/$(basename "$(dirname "${FILE}")")-$(basename "${FILE}").md5sum
-    local MD5SUM_FILE_NEW
-    MD5SUM_FILE_NEW=${CACHE_TMP_FILE_DIR}/$(basename "$(dirname "${FILE}")")-$(basename "${FILE}").md5sum.new
-    echo "${MD5SUM}" > "${MD5SUM_FILE_NEW}"
-    local RET_CODE=0
-    if [[ ! -f "${MD5SUM_FILE}" ]]; then
-        verbosity::print_info "Missing md5sum for ${FILE#${AIRFLOW_SOURCES}} (${MD5SUM_FILE#${AIRFLOW_SOURCES}})"
-        RET_CODE=1
+    local file="${1}"
+    local md5sum
+    local md5sum_cache_dir="${BUILD_CACHE_DIR}/${BRANCH_NAME}/${PYTHON_MAJOR_MINOR_VERSION}/${THE_IMAGE_TYPE}"
+    mkdir -pv "${md5sum_cache_dir}"
+    md5sum=$(md5sum "${file}")
+    local md5sum_file
+    md5sum_file="${md5sum_cache_dir}"/$(basename "$(dirname "${file}")")-$(basename "${file}").md5sum
+    local md5sum_file_new
+    md5sum_file_new=${CACHE_TMP_FILE_DIR}/$(basename "$(dirname "${file}")")-$(basename "${file}").md5sum.new
+    echo "${md5sum}" > "${md5sum_file_new}"
+    local ret_code=0
+    if [[ ! -f "${md5sum_file}" ]]; then
+        verbosity::print_info "Missing md5sum for ${file#${AIRFLOW_SOURCES}} (${md5sum_file#${AIRFLOW_SOURCES}})"
+        ret_code=1
     else
-        diff "${MD5SUM_FILE_NEW}" "${MD5SUM_FILE}" >/dev/null
-        RES=$?
-        if [[ "${RES}" != "0" ]]; then
-            verbosity::print_info "The md5sum changed for ${FILE}: was $(cat "${MD5SUM_FILE}") now it is $(cat "${MD5SUM_FILE_NEW}")"
+        diff "${md5sum_file_new}" "${md5sum_file}" >/dev/null
+        local res=$?
+        if [[ "${res}" != "0" ]]; then
+            verbosity::print_info "The md5sum changed for ${file}: was $(cat "${md5sum_file}") now it is $(cat "${md5sum_file_new}")"
             if [[ ${CI} == "true" ]]; then
-                echo "${COLOR_RED}The file has changed: ${FILE}${COLOR_RESET}"
+                echo "${COLOR_RED}The file has changed: ${file}${COLOR_RESET}"
                 echo "${COLOR_BLUE}==============================${COLOR_RESET}"
-                cat "${FILE}"
+                cat "${file}"
                 echo "${COLOR_BLUE}==============================${COLOR_RESET}"
             fi
-            RET_CODE=1
+            ret_code=1
         fi
     fi
-    return ${RET_CODE}
+    return ${ret_code}
 }
 
 #
@@ -58,16 +58,16 @@ function md5sum::calculate_file_md5sum {
 # BUILD_CACHE_DIR - thus updating stored MD5 sum for the file
 #
 function md5sum::move_file_md5sum {
-    local FILE="${1}"
-    local MD5SUM_FILE
-    local MD5SUM_CACHE_DIR="${BUILD_CACHE_DIR}/${BRANCH_NAME}/${PYTHON_MAJOR_MINOR_VERSION}/${THE_IMAGE_TYPE}"
-    mkdir -pv "${MD5SUM_CACHE_DIR}"
-    MD5SUM_FILE="${MD5SUM_CACHE_DIR}"/$(basename "$(dirname "${FILE}")")-$(basename "${FILE}").md5sum
-    local MD5SUM_FILE_NEW
-    MD5SUM_FILE_NEW=${CACHE_TMP_FILE_DIR}/$(basename "$(dirname "${FILE}")")-$(basename "${FILE}").md5sum.new
-    if [[ -f "${MD5SUM_FILE_NEW}" ]]; then
-        mv "${MD5SUM_FILE_NEW}" "${MD5SUM_FILE}"
-        verbosity::print_info "Updated md5sum file ${MD5SUM_FILE} for ${FILE}: $(cat "${MD5SUM_FILE}")"
+    local file="${1}"
+    local md5sum_file
+    local md5sum_cache_dir="${BUILD_CACHE_DIR}/${BRANCH_NAME}/${PYTHON_MAJOR_MINOR_VERSION}/${THE_IMAGE_TYPE}"
+    mkdir -pv "${md5sum_cache_dir}"
+    md5sum_file="${md5sum_cache_dir}"/$(basename "$(dirname "${file}")")-$(basename "${file}").md5sum
+    local md5sum_file_new
+    md5sum_file_new=${CACHE_TMP_FILE_DIR}/$(basename "$(dirname "${file}")")-$(basename "${file}").md5sum.new
+    if [[ -f "${md5sum_file_new}" ]]; then
+        mv "${md5sum_file_new}" "${md5sum_file}"
+        verbosity::print_info "Updated md5sum file ${md5sum_file} for ${file}: $(cat "${md5sum_file}")"
     fi
 }
 
@@ -80,9 +80,9 @@ function md5sum::update_all_md5() {
     verbosity::print_info
     verbosity::print_info "Updating md5sum files"
     verbosity::print_info
-    for FILE in "${FILES_FOR_REBUILD_CHECK[@]}"
+    for file in "${FILES_FOR_REBUILD_CHECK[@]}"
     do
-        md5sum::move_file_md5sum "${AIRFLOW_SOURCES}/${FILE}"
+        md5sum::move_file_md5sum "${AIRFLOW_SOURCES}/${file}"
     done
     mkdir -pv "${BUILD_CACHE_DIR}/${BRANCH_NAME}"
     touch "${BUILT_CI_IMAGE_FLAG_FILE}"
@@ -97,9 +97,9 @@ function md5sum::update_all_md5_with_group() {
 function md5sum::calculate_md5sum_for_all_files() {
     FILES_MODIFIED="false"
     set +e
-    for FILE in "${FILES_FOR_REBUILD_CHECK[@]}"
+    for file in "${FILES_FOR_REBUILD_CHECK[@]}"
     do
-        if ! md5sum::calculate_file_md5sum "${AIRFLOW_SOURCES}/${FILE}"; then
+        if ! md5sum::calculate_file_md5sum "${AIRFLOW_SOURCES}/${file}"; then
             FILES_MODIFIED="true"
         fi
     done
diff --git a/scripts/ci/libraries/_parameters.sh b/scripts/ci/libraries/_parameters.sh
index c1ee8ee..5648929 100644
--- a/scripts/ci/libraries/_parameters.sh
+++ b/scripts/ci/libraries/_parameters.sh
@@ -33,24 +33,27 @@ function parameters::save_to_file() {
 # $2 - descriptive name of the parameter
 # $3 - flag used to set the parameter
 function parameters::check_allowed_param() {
-    _VARIABLE_NAME="${1}"
-    _VARIABLE_DESCRIPTIVE_NAME="${2}"
-    _FLAG="${3}"
-    _ALLOWED_VALUES_ENV_NAME="_breeze_allowed_$(echo "${_VARIABLE_NAME}" | tr '[:upper:]' '[:lower:]')s"
-    _ALLOWED_VALUES=" ${!_ALLOWED_VALUES_ENV_NAME//$'\n'/ } "
-    _VALUE=${!_VARIABLE_NAME}
-    if [[ ${_ALLOWED_VALUES:=} != *" ${_VALUE} "* ]]; then
+    local _variable_name="${1}"
+    local _variable_descriptive_name="${2}"
+    local _flag="${3}"
+    local _allowed_values_env_name
+    local _allowed_values
+    local _value
+    _allowed_values_env_name="_breeze_allowed_$(echo "${_variable_name}" | tr '[:upper:]' '[:lower:]')s"
+    _allowed_values=" ${!_allowed_values_env_name//$'\n'/ } "
+    _value=${!_variable_name}
+    if [[ ${_allowed_values:=} != *" ${_value} "* ]]; then
         echo
-        echo  "${COLOR_RED}ERROR: Allowed ${_VARIABLE_DESCRIPTIVE_NAME}: [${_ALLOWED_VALUES}]. Passed: '${!_VARIABLE_NAME}'  ${COLOR_RESET}"
+        echo  "${COLOR_RED}ERROR: Allowed ${_variable_descriptive_name}: [${_allowed_values}]. Passed: '${!_variable_name}'  ${COLOR_RESET}"
         echo
-        echo "Switch to supported value with ${_FLAG} flag."
+        echo "Switch to supported value with ${_flag} flag."
         echo
-        if [[ -n ${!_VARIABLE_NAME} && -f "${BUILD_CACHE_DIR}/.${_VARIABLE_NAME}" && ${!_VARIABLE_NAME} == $(cat "${BUILD_CACHE_DIR}/.${_VARIABLE_NAME}") ]]; then
+        if [[ -n ${!_variable_name} && -f "${BUILD_CACHE_DIR}/.${_variable_name}" && ${!_variable_name} == $(cat "${BUILD_CACHE_DIR}/.${_variable_name}") ]]; then
             echo
-            echo  "${COLOR_YELLOW}WARNING: Removing ${BUILD_CACHE_DIR}/.${_VARIABLE_NAME}. Next time you run it, it should be OK.  ${COLOR_RESET}"
+            echo  "${COLOR_YELLOW}WARNING: Removing ${BUILD_CACHE_DIR}/.${_variable_name}. Next time you run it, it should be OK.  ${COLOR_RESET}"
             echo
             echo
-            rm -f "${BUILD_CACHE_DIR}/.${_VARIABLE_NAME}"
+            rm -f "${BUILD_CACHE_DIR}/.${_variable_name}"
         fi
         exit 1
     fi
diff --git a/scripts/ci/libraries/_push_pull_remove_images.sh b/scripts/ci/libraries/_push_pull_remove_images.sh
index 6dea2f7..932797c 100644
--- a/scripts/ci/libraries/_push_pull_remove_images.sh
+++ b/scripts/ci/libraries/_push_pull_remove_images.sh
@@ -49,38 +49,38 @@ function push_pull_remove_images::push_image_with_retries() {
 # Parameters:
 #   $1 -> image to pull
 function push_pull_remove_images::pull_image_if_not_present_or_forced() {
-    local IMAGE_TO_PULL="${1}"
-    local IMAGE_HASH
-    IMAGE_HASH=$(docker images -q "${IMAGE_TO_PULL}" 2> /dev/null || true)
-    local PULL_IMAGE=${FORCE_PULL_IMAGES}
+    local image_to_pull="${1}"
+    local image_hash
+    image_hash=$(docker images -q "${image_to_pull}" 2> /dev/null || true)
+    local pull_image=${FORCE_PULL_IMAGES}
 
-    if [[ -z "${IMAGE_HASH=}" ]]; then
-        PULL_IMAGE="true"
+    if [[ -z "${image_hash=}" ]]; then
+        pull_image="true"
     fi
-    if [[ "${PULL_IMAGE}" == "true" ]]; then
+    if [[ "${pull_image}" == "true" ]]; then
         echo
-        echo "Pulling the image ${IMAGE_TO_PULL}"
+        echo "Pulling the image ${image_to_pull}"
         echo
-        docker_v pull "${IMAGE_TO_PULL}"
-        EXIT_VALUE="$?"
-        if [[ ${EXIT_VALUE} != "0" && ${FAIL_ON_GITHUB_DOCKER_PULL_ERROR} == "true" ]]; then
+        docker_v pull "${image_to_pull}"
+        local exit_value="$?"
+        if [[ ${exit_value} != "0" && ${FAIL_ON_GITHUB_DOCKER_PULL_ERROR} == "true" ]]; then
             echo
             echo """
 ${COLOR_RED}ERROR: Exiting on docker pull error
 
 If you have authorisation problems, you might want to run:
 
-docker login ${IMAGE_TO_PULL%%\/*}
+docker login ${image_to_pull%%\/*}
 
 You need to use generate token as the password, not your personal password.
 You can generate one at https://github.com/settings/tokens
 Make sure to choose 'read:packages' scope.
 ${COLOR_RESET}
 """
-            exit ${EXIT_VALUE}
+            exit ${exit_value}
         fi
         echo
-        return ${EXIT_VALUE}
+        return ${exit_value}
     fi
 }
 
@@ -91,15 +91,15 @@ ${COLOR_RESET}
 #   $1 -> DockerHub image to pull
 #   $2 -> GitHub image to try to pull first
 function push_pull_remove_images::pull_image_github_dockerhub() {
-    local DOCKERHUB_IMAGE="${1}"
-    local GITHUB_IMAGE="${2}"
+    local dockerhub_image="${1}"
+    local github_image="${2}"
 
     set +e
-    if push_pull_remove_images::pull_image_if_not_present_or_forced "${GITHUB_IMAGE}"; then
+    if push_pull_remove_images::pull_image_if_not_present_or_forced "${github_image}"; then
         # Tag the image to be the DockerHub one
-        docker_v tag "${GITHUB_IMAGE}" "${DOCKERHUB_IMAGE}"
+        docker_v tag "${github_image}" "${dockerhub_image}"
     else
-        push_pull_remove_images::pull_image_if_not_present_or_forced "${DOCKERHUB_IMAGE}"
+        push_pull_remove_images::pull_image_if_not_present_or_forced "${dockerhub_image}"
     fi
     set -e
 }
@@ -137,12 +137,12 @@ function push_pull_remove_images::pull_base_python_image() {
 " > "${DETECTED_TERMINAL}"
     fi
     if [[ ${USE_GITHUB_REGISTRY} == "true" ]]; then
-        PYTHON_TAG_SUFFIX=""
+        local python_tag_suffix=""
         if [[ ${GITHUB_REGISTRY_PULL_IMAGE_TAG} != "latest" ]]; then
-            PYTHON_TAG_SUFFIX="-${GITHUB_REGISTRY_PULL_IMAGE_TAG}"
+            python_tag_suffix="-${GITHUB_REGISTRY_PULL_IMAGE_TAG}"
         fi
         push_pull_remove_images::pull_image_github_dockerhub "${AIRFLOW_PYTHON_BASE_IMAGE}" \
-            "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}${PYTHON_TAG_SUFFIX}"
+            "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}${python_tag_suffix}"
     else
         docker_v pull "${AIRFLOW_PYTHON_BASE_IMAGE}"
     fi
@@ -210,14 +210,14 @@ function push_pull_remove_images::push_ci_images_to_dockerhub() {
 #     X.Y-slim-buster-"${GITHUB_RUN_ID}" - in case of pull-request triggered 'workflow_run' builds
 #     X.Y-slim-buster                    - in case of push builds
 function push_pull_remove_images::push_python_image_to_github() {
-    PYTHON_TAG_SUFFIX=""
+    local python_tag_suffix=""
     if [[ ${GITHUB_REGISTRY_PUSH_IMAGE_TAG} != "latest" ]]; then
-        PYTHON_TAG_SUFFIX="-${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
+        python_tag_suffix="-${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
     fi
     docker_v tag "${AIRFLOW_PYTHON_BASE_IMAGE}" \
-        "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}${PYTHON_TAG_SUFFIX}"
+        "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}${python_tag_suffix}"
     push_pull_remove_images::push_image_with_retries \
-        "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}${PYTHON_TAG_SUFFIX}"
+        "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}${python_tag_suffix}"
 }
 
 # Pushes Ci images and their tags to registry in GitHub
@@ -225,14 +225,14 @@ function push_pull_remove_images::push_ci_images_to_github() {
     if [[ "${PUSH_PYTHON_BASE_IMAGE=}" != "false" ]]; then
         push_pull_remove_images::push_python_image_to_github
     fi
-    AIRFLOW_CI_TAGGED_IMAGE="${GITHUB_REGISTRY_AIRFLOW_CI_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
-    docker_v tag "${AIRFLOW_CI_IMAGE}" "${AIRFLOW_CI_TAGGED_IMAGE}"
-    push_pull_remove_images::push_image_with_retries "${AIRFLOW_CI_TAGGED_IMAGE}"
+    local airflow_ci_tagged_image="${GITHUB_REGISTRY_AIRFLOW_CI_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
+    docker_v tag "${AIRFLOW_CI_IMAGE}" "${airflow_ci_tagged_image}"
+    push_pull_remove_images::push_image_with_retries "${airflow_ci_tagged_image}"
     if [[ -n ${GITHUB_SHA=} ]]; then
         # Also push image to GitHub registry with commit SHA
-        AIRFLOW_CI_SHA_IMAGE="${GITHUB_REGISTRY_AIRFLOW_CI_IMAGE}:${COMMIT_SHA}"
-        docker_v tag "${AIRFLOW_CI_IMAGE}" "${AIRFLOW_CI_SHA_IMAGE}"
-        push_pull_remove_images::push_image_with_retries "${AIRFLOW_CI_SHA_IMAGE}"
+        local airflow_ci_sha_image="${GITHUB_REGISTRY_AIRFLOW_CI_IMAGE}:${COMMIT_SHA}"
+        docker_v tag "${AIRFLOW_CI_IMAGE}" "${airflow_ci_sha_image}"
+        push_pull_remove_images::push_image_with_retries "${airflow_ci_sha_image}"
     fi
 }
 
@@ -265,19 +265,19 @@ function push_pull_remove_images::push_prod_images_to_dockerhub () {
 #     "${GITHUB_RUN_ID}" - in case of pull-request triggered 'workflow_run' builds
 #     "latest"           - in case of push builds
 function push_pull_remove_images::push_prod_images_to_github () {
-    AIRFLOW_PROD_TAGGED_IMAGE="${GITHUB_REGISTRY_AIRFLOW_PROD_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
-    docker_v tag "${AIRFLOW_PROD_IMAGE}" "${AIRFLOW_PROD_TAGGED_IMAGE}"
+    local airflow_prod_tagged_image="${GITHUB_REGISTRY_AIRFLOW_PROD_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
+    docker_v tag "${AIRFLOW_PROD_IMAGE}" "${airflow_prod_tagged_image}"
     push_pull_remove_images::push_image_with_retries "${GITHUB_REGISTRY_AIRFLOW_PROD_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
     if [[ -n ${COMMIT_SHA=} ]]; then
         # Also push image to GitHub registry with commit SHA
-        AIRFLOW_PROD_SHA_IMAGE="${GITHUB_REGISTRY_AIRFLOW_PROD_IMAGE}:${COMMIT_SHA}"
-        docker_v tag "${AIRFLOW_PROD_IMAGE}" "${AIRFLOW_PROD_SHA_IMAGE}"
-        push_pull_remove_images::push_image_with_retries "${AIRFLOW_PROD_SHA_IMAGE}"
+        local airflow_prod_sha_image="${GITHUB_REGISTRY_AIRFLOW_PROD_IMAGE}:${COMMIT_SHA}"
+        docker_v tag "${AIRFLOW_PROD_IMAGE}" "${airflow_prod_sha_image}"
+        push_pull_remove_images::push_image_with_retries "${airflow_prod_sha_image}"
     fi
     # Also push prod build image
-    AIRFLOW_PROD_BUILD_TAGGED_IMAGE="${GITHUB_REGISTRY_AIRFLOW_PROD_BUILD_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
-    docker_v tag "${AIRFLOW_PROD_BUILD_IMAGE}" "${AIRFLOW_PROD_BUILD_TAGGED_IMAGE}"
-    push_pull_remove_images::push_image_with_retries "${AIRFLOW_PROD_BUILD_TAGGED_IMAGE}"
+    local airflow_prod_build_tagged_image="${GITHUB_REGISTRY_AIRFLOW_PROD_BUILD_IMAGE}:${GITHUB_REGISTRY_PUSH_IMAGE_TAG}"
+    docker_v tag "${AIRFLOW_PROD_BUILD_IMAGE}" "${airflow_prod_build_tagged_image}"
+    push_pull_remove_images::push_image_with_retries "${airflow_prod_build_tagged_image}"
 }
 
 
diff --git a/scripts/ci/libraries/_pylint.sh b/scripts/ci/libraries/_pylint.sh
index 3e08278..6299e7a 100644
--- a/scripts/ci/libraries/_pylint.sh
+++ b/scripts/ci/libraries/_pylint.sh
@@ -20,16 +20,18 @@
 function pylint::filter_out_files_from_pylint_todo_list() {
   FILTERED_FILES=()
   set +e
-  for FILE in "$@"
+  local file
+  for file in "$@"
   do
-      if [[ ${FILE} == "airflow/migrations/versions/"* ]]; then
+      if [[ ${file} == "airflow/migrations/versions/"* ]]; then
           # Skip all generated migration scripts
           continue
       fi
-      if ! grep -x "./${FILE}" <"${AIRFLOW_SOURCES}/scripts/ci/pylint_todo.txt" >/dev/null; then
-          FILTERED_FILES+=("${FILE}")
+      if ! grep -x "./${file}" <"${AIRFLOW_SOURCES}/scripts/ci/pylint_todo.txt" >/dev/null; then
+          FILTERED_FILES+=("${file}")
       fi
   done
   set -e
   export FILTERED_FILES
+  readonly FILTERED_FILES
 }
diff --git a/scripts/ci/libraries/_sanity_checks.sh b/scripts/ci/libraries/_sanity_checks.sh
index 35fa4b3..f3cc5d1 100644
--- a/scripts/ci/libraries/_sanity_checks.sh
+++ b/scripts/ci/libraries/_sanity_checks.sh
@@ -52,20 +52,24 @@ function sanity_checks::sanitize_mounted_files() {
 # Checks if core utils required in the host system are installed and explain what needs to be done if not
 #
 function sanity_checks::check_if_coreutils_installed() {
+    local getopt_retval
+    local stat_present
+    local md5sum_present
+
     set +e
     getopt -T >/dev/null
-    GETOPT_RETVAL=$?
+    getopt_retval=$?
 
     if [[ $(uname -s) == 'Darwin' ]] ; then
         command -v gstat >/dev/null
-        STAT_PRESENT=$?
+        stat_present=$?
     else
         command -v stat >/dev/null
-        STAT_PRESENT=$?
+        stat_present=$?
     fi
 
     command -v md5sum >/dev/null
-    MD5SUM_PRESENT=$?
+    md5sum_present=$?
 
     set -e
 
@@ -74,7 +78,7 @@ function sanity_checks::check_if_coreutils_installed() {
     readonly CMDNAME
 
     ####################  Parsing options/arguments
-    if [[ ${GETOPT_RETVAL} != 4 || "${STAT_PRESENT}" != "0" || "${MD5SUM_PRESENT}" != "0" ]]; then
+    if [[ ${getopt_retval} != 4 || "${stat_present}" != "0" || "${md5sum_present}" != "0" ]]; then
         verbosity::print_info
         if [[ $(uname -s) == 'Darwin' ]] ; then
             echo """
diff --git a/scripts/ci/libraries/_spinner.sh b/scripts/ci/libraries/_spinner.sh
index 363b31e..000a0a0 100644
--- a/scripts/ci/libraries/_spinner.sh
+++ b/scripts/ci/libraries/_spinner.sh
@@ -20,34 +20,36 @@
 # Pull/Build is happening. It only spins if the output log changes, so if pull/build is stalled
 # The spinner will not move.
 function spinner::spin() {
-    local FILE_TO_MONITOR=${1}
-    local SPIN=("-" "\\" "|" "/")
+    local file_to_monitor=${1}
+    SPIN=("-" "\\" "|" "/")
+    readonly SPIN
     echo -n "
-Build log: ${FILE_TO_MONITOR}
+Build log: ${file_to_monitor}
 " > "${DETECTED_TERMINAL}"
 
-    LAST_STEP=""
+    local last_step=""
     while "true"
     do
       for i in "${SPIN[@]}"
       do
-            echo -ne "\r${LAST_STEP}$i" > "${DETECTED_TERMINAL}"
-            local LAST_FILE_SIZE
-            local FILE_SIZE
-            LAST_FILE_SIZE=$(set +e; wc -c "${FILE_TO_MONITOR}" 2>/dev/null | awk '{print $1}' || true)
-            FILE_SIZE=${LAST_FILE_SIZE}
-            while [[ "${LAST_FILE_SIZE}" == "${FILE_SIZE}" ]];
+            echo -ne "\r${last_step}$i" > "${DETECTED_TERMINAL}"
+            local last_file_size
+            local file_size
+            last_file_size=$(set +e; wc -c "${file_to_monitor}" 2>/dev/null | awk '{print $1}' || true)
+            file_size=${last_file_size}
+            while [[ "${last_file_size}" == "${file_size}" ]];
             do
-                FILE_SIZE=$(set +e; wc -c "${FILE_TO_MONITOR}" 2>/dev/null | awk '{print $1}' || true)
+                file_size=$(set +e; wc -c "${file_to_monitor}" 2>/dev/null | awk '{print $1}' || true)
                 sleep 0.2
             done
-            LAST_FILE_SIZE=FILE_SIZE
+            last_file_size=file_size
             sleep 0.2
-            if [[ ! -f "${FILE_TO_MONITOR}" ]]; then
+            if [[ ! -f "${file_to_monitor}" ]]; then
                 exit
             fi
-            LAST_LINE=$(set +e; grep "Step" <"${FILE_TO_MONITOR}" | tail -1 || true)
-            [[ ${LAST_LINE} =~ ^(Step [0-9/]*)\ : ]] && LAST_STEP="${BASH_REMATCH[1]} :"
+            local last_line
+            last_line=$(set +e; grep "Step" <"${file_to_monitor}" | tail -1 || true)
+            [[ ${last_line} =~ ^(Step [0-9/]*)\ : ]] && last_step="${BASH_REMATCH[1]} :"
       done
     done
 }
diff --git a/scripts/ci/libraries/_start_end.sh b/scripts/ci/libraries/_start_end.sh
index bb00175..35d9e1c 100644
--- a/scripts/ci/libraries/_start_end.sh
+++ b/scripts/ci/libraries/_start_end.sh
@@ -125,12 +125,14 @@ function start_end::script_end {
       rm -rf -- "${FILES_TO_CLEANUP_ON_EXIT[@]}"
     fi
 
-    END_SCRIPT_TIME=$(date +%s)
-    RUN_SCRIPT_TIME=$((END_SCRIPT_TIME-START_SCRIPT_TIME))
+    local end_script_time
+    end_script_time=$(date +%s)
+    local run_script_time
+    run_script_time=$((end_script_time-START_SCRIPT_TIME))
     if [[ ${BREEZE:=} != "true" && ${RUN_TESTS=} != "true" ]]; then
         verbosity::print_info
         verbosity::print_info "Finished the script ${COLOR_GREEN}$(basename "$0")${COLOR_RESET}"
-        verbosity::print_info "Elapsed time spent in the script: ${COLOR_BLUE}${RUN_SCRIPT_TIME} seconds${COLOR_RESET}"
+        verbosity::print_info "Elapsed time spent in the script: ${COLOR_BLUE}${run_script_time} seconds${COLOR_RESET}"
         if [[ ${exit_code} == "0" ]]; then
             verbosity::print_info "Exit code ${COLOR_GREEN}${exit_code}${COLOR_RESET}"
         else