You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2019/06/26 20:48:47 UTC

[accumulo-testing] branch master updated: Fix shellcheck issues in conf dir

This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo-testing.git


The following commit(s) were added to refs/heads/master by this push:
     new 475d713  Fix shellcheck issues in conf dir
475d713 is described below

commit 475d71308d8de5e6ecd30b6506e4230423fb8396
Author: Christopher Tubbs <ct...@apache.org>
AuthorDate: Wed Jun 26 16:48:32 2019 -0400

    Fix shellcheck issues in conf dir
---
 conf/cluster-control.sh.uno | 33 +++++++++++++++++----------------
 conf/env.sh.example         | 40 ++++++++++++++++++++--------------------
 2 files changed, 37 insertions(+), 36 deletions(-)

diff --git a/conf/cluster-control.sh.uno b/conf/cluster-control.sh.uno
index a98d520..ee047cc 100644
--- a/conf/cluster-control.sh.uno
+++ b/conf/cluster-control.sh.uno
@@ -1,3 +1,4 @@
+#! /usr/bin/env bash
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
@@ -19,7 +20,7 @@ UNO_HOME=/home/ubuntu/git/uno
 UNO=$UNO_HOME/bin/uno
 
 function get_ah {
-  echo "$($UNO env | grep ACCUMULO_HOME | sed 's/export ACCUMULO_HOME=//' | sed 's/"//g')"
+  $UNO env | grep ACCUMULO_HOME | sed 's/export ACCUMULO_HOME=//' | sed 's/"//g'
 }
 
 # functions required for accumulo testing cluster control
@@ -34,34 +35,34 @@ function get_version {
     ACCUMULO)
       (
         # run following in sub shell so it does not pollute
-        if [ -f $UNO_HOME/conf/uno-local.conf ]; then
-          . $UNO_HOME/conf/uno-local.conf
+        if [[ -f "$UNO_HOME"/conf/uno-local.conf ]]; then
+          . "$UNO_HOME"/conf/uno-local.conf
         else
-          . $UNO_HOME/conf/uno.conf
+          . "$UNO_HOME"/conf/uno.conf
         fi
-        echo $ACCUMULO_VERSION
+        echo "$ACCUMULO_VERSION"
       )
       ;;
     HADOOP)
       (
         # run following in sub shell so it does not pollute
-        if [ -f $UNO_HOME/conf/uno-local.conf ]; then
-          . $UNO_HOME/conf/uno-local.conf
+        if [[ -f "$UNO_HOME"/conf/uno-local.conf ]]; then
+          . "$UNO_HOME"/conf/uno-local.conf
         else
-          . $UNO_HOME/conf/uno.conf
+          . "$UNO_HOME"/conf/uno.conf
         fi
-        echo $HADOOP_VERSION
+        echo "$HADOOP_VERSION"
       )
       ;;
     ZOOKEEPER)
       (
         # run following in sub shell so it does not pollute
-        if [ -f $UNO_HOME/conf/uno-local.conf ]; then
-          . $UNO_HOME/conf/uno-local.conf
+        if [[ -f "$UNO_HOME"/conf/uno-local.conf ]]; then
+          . "$UNO_HOME"/conf/uno-local.conf
         else
-          . $UNO_HOME/conf/uno.conf
+          . "$UNO_HOME"/conf/uno.conf
         fi
-        echo $ZOOKEEPER_VERSION
+        echo "$ZOOKEEPER_VERSION"
       )
       ;;
     *)
@@ -78,17 +79,17 @@ function setup_accumulo {
 }
 
 function get_config_file {
-  local ah=$(get_ah)
+  local ah; ah=$(get_ah)
   cp "$ah/conf/$1" "$2"
 }
 
 function put_config_file {
-  local ah=$(get_ah)
+  local ah; ah=$(get_ah)
   cp "$1" "$ah/conf"
 }
 
 function put_server_code {
-  local ah=$(get_ah)
+  local ah; ah=$(get_ah)
   cp "$1" "$ah/lib/ext"
 }
 
diff --git a/conf/env.sh.example b/conf/env.sh.example
index dde67f8..780b09f 100644
--- a/conf/env.sh.example
+++ b/conf/env.sh.example
@@ -53,9 +53,9 @@ fi
 # Shaded test jar
 # ===============
 # Versions set below will be what is included in the shaded jar
-export ACCUMULO_VERSION="`$ACCUMULO_HOME/bin/accumulo version`"
-export HADOOP_VERSION="`hadoop version | head -n1 | awk '{print $2}'`"
-export ZOOKEEPER_VERSION=3.4.9
+ACCUMULO_VERSION="$("$ACCUMULO_HOME"/bin/accumulo version)"; export ACCUMULO_VERSION
+HADOOP_VERSION="$(hadoop version | head -n1 | awk '{print $2}')"; export HADOOP_VERSION
+export ZOOKEEPER_VERSION=3.4.14
 # Path to shaded test jar
 at_home=$( cd "$( dirname "$conf_dir" )" && pwd )
 export TEST_JAR_PATH="${at_home}/target/accumulo-testing-shaded.jar"
@@ -76,27 +76,27 @@ fi
 # Agitator
 # ========
 # Accumulo user
-AGTR_ACCUMULO_USER=$(whoami)
+AGTR_ACCUMULO_USER=$(whoami); export AGTR_ACCUMULO_USER
 # Time (in minutes) between killing Accumulo masters
-AGTR_MASTER_KILL_SLEEP_TIME=60
-AGTR_MASTER_RESTART_SLEEP_TIME=2
+export AGTR_MASTER_KILL_SLEEP_TIME=60
+export AGTR_MASTER_RESTART_SLEEP_TIME=2
 # Time (in minutes) between killing Accumulo tservers
-AGTR_TSERVER_KILL_SLEEP_TIME=20
-AGTR_TSERVER_RESTART_SLEEP_TIME=10
+export AGTR_TSERVER_KILL_SLEEP_TIME=20
+export AGTR_TSERVER_RESTART_SLEEP_TIME=10
 # Min and max number of Accumulo tservers that the agitator will kill at once
-AGTR_TSERVER_MIN_KILL=1
-AGTR_TSERVER_MAX_KILL=1
+export AGTR_TSERVER_MIN_KILL=1
+export AGTR_TSERVER_MAX_KILL=1
 # Amount of time (in minutes) the agitator should sleep before killing datanodes
-AGTR_DATANODE_KILL_SLEEP_TIME=20
+export AGTR_DATANODE_KILL_SLEEP_TIME=20
 # Amount of time (in minutes) the agitator should wait before restarting datanodes
-AGTR_DATANODE_RESTART_SLEEP_TIME=10
+export AGTR_DATANODE_RESTART_SLEEP_TIME=10
 # Min and max number of datanodes the agitator will kill at once
-AGTR_DATANODE_MIN_KILL=1
-AGTR_DATANODE_MAX_KILL=1
+export AGTR_DATANODE_MIN_KILL=1
+export AGTR_DATANODE_MAX_KILL=1
 # HDFS agitation
-AGTR_HDFS_USER=$(whoami)
-AGTR_HDFS=false
-AGTR_HDFS_SLEEP_TIME=10
-AGTR_HDFS_SUPERUSER=hdfs
-AGTR_HDFS_COMMAND="${HADOOP_PREFIX:-/usr/lib/hadoop}/share/hadoop/hdfs/bin/hdfs"
-AGTR_HDFS_SUDO=$(which sudo)
+AGTR_HDFS_USER=$(whoami); export AGTR_HDFS_USER
+export AGTR_HDFS=false
+export AGTR_HDFS_SLEEP_TIME=10
+export AGTR_HDFS_SUPERUSER=hdfs
+export AGTR_HDFS_COMMAND="${HADOOP_PREFIX:-/usr/lib/hadoop}/share/hadoop/hdfs/bin/hdfs"
+AGTR_HDFS_SUDO=$(command -v sudo); export AGTR_HDFS_SUDO