You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by db...@apache.org on 2015/10/14 03:14:48 UTC

[05/11] incubator-trafodion git commit: TRAFODION-1521 Build Trafodion without having HBase installed

TRAFODION-1521 Build Trafodion without having HBase installed

Adding a new script, core/sqf/sql/scripts/get_libhdfs_files. The new
script will download Google Protocol Buffers 2.5.0 and a source tar
file for Hadoop and build protobuf-2.5.0 and the Hadoop native
libraries. It will then copy the libraries (libhadoop.so and
libhdfs.so) to $MY_SQROOT/export/lib$ {SQ_MBTYPE}. It will also copy
the include file hdfs.h to $MY_SQROOT/include. Since this step is very
time-consuming, it takes a shortcut if Hadoop is installed
(e.g. with install_local_hadoop), in that case it will copy the
existing native libraries.  To address the remaining needs for jar
files during the build, the hbasetmlib2 build is converted to
Maven.  We also remove the case in sqenvcom.sh where a
configuration does not have install_local_hadoop, Cloudera,
Hortonworks or MapR installed, but has a TOOLSDIR. Such cases should
use the recently added case for a native Apache Hadoop installation or
the case where no HBase installation is found. That last case will
print out a reminder that can be ignored.

Note: Users will need to do "sqgen" to update their classpath. This is
because we need a new jar in the classpath: trafodion-dtm-1.2.0.jar.


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/77eab6ba
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/77eab6ba
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/77eab6ba

Branch: refs/heads/master
Commit: 77eab6badc3cebaef392a03b25c3d9e1dc5106c2
Parents: df76bab
Author: Hans Zeller <ha...@esgyn.com>
Authored: Tue Oct 13 14:40:46 2015 +0000
Committer: Hans Zeller <ha...@esgyn.com>
Committed: Tue Oct 13 15:05:04 2015 +0000

----------------------------------------------------------------------
 core/Makefile                                   |    8 +-
 core/bldenvchk.sh                               |   18 +-
 core/conn/jdbc_type2/macros.gmk                 |    2 +-
 core/conn/odbc/src/odbc/macros.gmk              |    2 +-
 core/sqf/Makefile                               |   10 +-
 core/sqf/sqenvcom.sh                            |   61 +-
 core/sqf/sql/Makefile                           |    2 +-
 core/sqf/sql/scripts/get_libhdfs_files          |  186 +++
 .../tm/hbasetmlib2/HBaseAuditControlPoint.java  |  416 ------
 .../src/seatrans/tm/hbasetmlib2/HBaseTmZK.java  |  247 ----
 .../seatrans/tm/hbasetmlib2/HBaseTxClient.java  | 1375 ------------------
 .../seatrans/tm/hbasetmlib2/HashMapArray.java   |  229 ---
 core/sqf/src/seatrans/tm/hbasetmlib2/Makefile   |   30 +-
 .../seatrans/tm/hbasetmlib2/TmAuditTlog.java    | 1201 ---------------
 .../src/seatrans/tm/hbasetmlib2/TrafInfo.java   |  204 ---
 core/sqf/src/seatrans/tm/hbasetmlib2/pom.xml    |  110 ++
 .../trafodion/dtm/HBaseAuditControlPoint.java   |  416 ++++++
 .../main/java/org/trafodion/dtm/HBaseTmZK.java  |  247 ++++
 .../java/org/trafodion/dtm/HBaseTxClient.java   | 1375 ++++++++++++++++++
 .../java/org/trafodion/dtm/HashMapArray.java    |  229 +++
 .../java/org/trafodion/dtm/TmAuditTlog.java     | 1201 +++++++++++++++
 .../main/java/org/trafodion/dtm/TrafInfo.java   |  204 +++
 core/sql/nskgmake/Makerules.build               |    1 -
 core/sql/nskgmake/Makerules.linux               |   62 +-
 core/sql/nskgmake/Makerules.mk                  |    2 +-
 core/sql/nskgmake/tdm_sqlcli/Makefile           |    2 +-
 core/sql/optimizer/mdam.cpp                     |   12 +-
 core/sql/pom.xml                                |   43 +-
 28 files changed, 4093 insertions(+), 3802 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/Makefile
----------------------------------------------------------------------
diff --git a/core/Makefile b/core/Makefile
index 28df509..8b8fde6 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -64,7 +64,7 @@ verhdr:
 dbsecurity: $(MPI_TARGET)
 	cd dbsecurity && $(MAKE) all 2>&1 | sed -e "s/$$/	##(Security)/";exit $${PIPESTATUS[0]}
 
-foundation: dbsecurity $(MPI_TARGET) $(SEAMONSTER_TARGET)
+foundation: sqroot dbsecurity $(MPI_TARGET) $(SEAMONSTER_TARGET)
 	cd sqf && $(MAKE) all
 
 jdbc_jar: verhdr
@@ -100,7 +100,7 @@ odb: ndcs
 cleantests:
 	cd ../dcs/src/test/pytests && $(RM) -r odbc* tox.ini config.ini .tox .testrep* env.sh test_p2.pyc 
 	cd ../dcs/src/test/jdbc_test && $(RM) -r jdbcprop pom.xml  target
-	
+
 clean: sqroot
 	cd $(MPI_TARGET) &&		$(MAKE) clean-local
 	cd $(SEAMONSTER_TARGET)/src &&	$(MAKE) clean
@@ -153,9 +153,11 @@ pkg-dcs-tests: all
 version:
 	@cd sqf; unset SQ_VERBOSE; source sqenv.sh ; echo "$${TRAFODION_VER}"
 
-# Check that Environment variables are set correctly
+# Check that Environment variables are set correctly and
+# create links and files needed as prerequisite for the build
 sqroot:
 	./bldenvchk.sh;
+	cd sqf && $(MAKE) setupdir
 
 # Check for absolute filenames used as dynamic linked libraries
 find-absolute-dlls:

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/bldenvchk.sh
----------------------------------------------------------------------
diff --git a/core/bldenvchk.sh b/core/bldenvchk.sh
index fd3a81d..6c1bfd8 100755
--- a/core/bldenvchk.sh
+++ b/core/bldenvchk.sh
@@ -57,18 +57,12 @@ done
 
 # These directories should exist.
 VARDIRLIST="JAVA_HOME PERL5LIB MPI_TMPDIR"
-VARDIRLIST="$VARDIRLIST LLVM UDIS86 ICU MPICH_ROOT ZOOKEEPER_DIR THRIFT_LIB_DIR"
-VARDIRLIST="$VARDIRLIST THRIFT_INC_DIR PROTOBUFS"
-VARDIRLIST="$VARDIRLIST HADOOP_INC_DIR HADOOP_LIB_DIR"
-# QT_TOOLKIT is optional; if it is not set then the SQL Compiler Debugger should
-# not build.  Check the value of QT_TOOLKIT variable only if set, including when
-# set to blanks or empty string.
-if [ ! -z ${QT_TOOLKIT+x} ]; then
-   VARDIRLIST="$VARDIRLIST QT_TOOLKIT"
-else
-   if [[ "$SQ_VERBOSE" == "1" ]]; then
-      echo "QT_TOOLKIT is not set.  SQL Compiler Debugger will not be built."
-   fi
+VARDIRLIST="$VARDIRLIST LLVM UDIS86 ICU MPICH_ROOT ZOOKEEPER_DIR PROTOBUFS"
+# QT_TOOLKIT is optional; if it is not set correctly then the SQL Compiler Debugger should
+# not build.
+if [ ! -d "${QT_TOOLKIT}" ]; then
+   echo "*** Warning: QT_TOOLKIT does not point to an existing directory."
+   echo "*** Warning: SQL Compiler Debugger will not be built."
 fi
 
 for AVAR in $VARDIRLIST; do

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/conn/jdbc_type2/macros.gmk
----------------------------------------------------------------------
diff --git a/core/conn/jdbc_type2/macros.gmk b/core/conn/jdbc_type2/macros.gmk
index 81fdb8e..6c8438e 100644
--- a/core/conn/jdbc_type2/macros.gmk
+++ b/core/conn/jdbc_type2/macros.gmk
@@ -31,7 +31,7 @@ CC              = $(CC64)
 CXXFLAGS        = $(DBG_FLGS) $(OPTIM_FLGS) $(INTEL_TOOL_FLGS)
 GCCMODE         = $(GCCMODE64)
 LOC_JVMLIBS     = $(JAVA_HOME)/jre/lib/amd64/server
-HADOOP_LIBS     = -L $(HADOOP_LIB_DIR) -L $(LOC_JVMLIBS) -lhdfs -ljvm
+HADOOP_LIBS     = -L $(LOC_JVMLIBS) -lhdfs -ljvm
 endif
 
 # flags

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/conn/odbc/src/odbc/macros.gmk
----------------------------------------------------------------------
diff --git a/core/conn/odbc/src/odbc/macros.gmk b/core/conn/odbc/src/odbc/macros.gmk
index 6a4bbac..9d77d9c 100644
--- a/core/conn/odbc/src/odbc/macros.gmk
+++ b/core/conn/odbc/src/odbc/macros.gmk
@@ -31,7 +31,7 @@ CC              = $(CC64)
 CXXFLAGS        = $(DBG_FLGS) $(OPTIM_FLGS) $(INTEL_TOOL_FLGS)
 GCCMODE         = $(GCCMODE64)
 LOC_JVMLIBS     = $(JAVA_HOME)/jre/lib/amd64/server
-HADOOP_LIBS     = -L $(HADOOP_LIB_DIR) -L $(LOC_JVMLIBS) -lhdfs -ljvm
+HADOOP_LIBS     = -L $(LOC_JVMLIBS) -lhdfs -ljvm
 endif
 
 # flags

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/sqf/Makefile
----------------------------------------------------------------------
diff --git a/core/sqf/Makefile b/core/sqf/Makefile
index 7efd2e9..35a4509 100644
--- a/core/sqf/Makefile
+++ b/core/sqf/Makefile
@@ -48,8 +48,8 @@ SQ_COMPONENTS := make_sqevlog seabed stfs $(MISC) tm $(SEAMONSTER_TARGET) make_s
 buildinfo:
 	@echo "Building $(TRAFODION_VER_PROD) Version $(TRAFODION_VER) $(SQ_BUILD_TYPE)"
 	@echo ""
- 
-all: genverhdr setupdir $(SQ_COMPONENTS) 
+
+all: genverhdr $(SQ_COMPONENTS) 
 
 win: make_sqevlog seabed
 	cd src/win; $(MAKE) 2>&1 | sed -e "s/$$/	##(SQF)/" ; exit $${PIPESTATUS[0]}
@@ -171,7 +171,7 @@ setupdir:
 	-cd src/win; $(MAKE) setup
 	-cd src/seabed; $(MAKE) setup
 	-cd src/tm; $(MAKE) setup
-	-cd sql; $(MAKE) WROOT=$(SQL_W) setup
+	cd sql; $(MAKE) WROOT=$(SQL_W) setup
 	-cd sqevlog; $(MAKE) setup
 	-@./makelink ../../mondump $(EXPORTBIN_DIR)
 	-@./makelink ../../sqid $(EXPORTBIN_DIR)
@@ -203,7 +203,9 @@ setupdir_clean:
 	rm -rf $(EXPORTLIB_DIR)/librwtlh.so
 	rm -rf $(EXPORTLIB_DIR)/libdummy.so $(EXPORTLIB_DIR)/libtdm_measenty.so \
 		$(EXPORTLIB_DIR)/libtdm_security.so $(EXPORTLIB_DIR)/libtdm_tfds.so \
-		$(EXPORTLIB_DIR)/libtdm_tmflib.so $(EXPORTLIB_DIR)/libtdm_tmfuser.so
+		$(EXPORTLIB_DIR)/libtdm_tmflib.so $(EXPORTLIB_DIR)/libtdm_tmfuser.so \
+	        $(EXPORTLIB_DIR)/libhdfs*.so $(EXPORTLIB_DIR)/libhadoop*.so \
+	        $(EXPORTINCEVL_DIR)/hdfs.h
 	@for f in $(EXPORTSQLLIBS); do \
 	  echo rm -f $(EXPORTLIB_DIR)/$$f ; \
 	       rm -f $(EXPORTLIB_DIR)/$$f ; done

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/sqf/sqenvcom.sh
----------------------------------------------------------------------
diff --git a/core/sqf/sqenvcom.sh b/core/sqf/sqenvcom.sh
index b9ff2d9..b33f647 100644
--- a/core/sqf/sqenvcom.sh
+++ b/core/sqf/sqenvcom.sh
@@ -263,42 +263,6 @@ if [[ -e $MY_SQROOT/sql/scripts/sw_env.sh ]]; then
   export HBASE_CNF_DIR=$MY_SQROOT/sql/local_hadoop/hbase/conf
   export HIVE_CNF_DIR=$MY_SQROOT/sql/local_hadoop/hive/conf
 
-elif [[ -f $MY_SQROOT/Makefile && -d $TOOLSDIR ]]; then
-  # we are are in a source tree - use build-time dependencies in TOOLSDIR
-  # ----------------------------------------------------------------
-
-  # native library directories and include directories
-  # Trafodion needs native libs and include file for C++ code to build
-  export HADOOP_LIB_DIR=$TOOLSDIR/hadoop-2.4.0/lib/native
-  export HADOOP_INC_DIR=$TOOLSDIR/hadoop-2.4.0/include
-  export THRIFT_LIB_DIR=$TOOLSDIR/thrift-0.9.0/lib
-  export THRIFT_INC_DIR=$TOOLSDIR/thrift-0.9.0/include
-  export CURL_INC_DIR=/usr/include
-  export CURL_LIB_DIR=/usr/lib64
-
-  # directories with jar files and list of jar files
-  export HADOOP_JAR_DIRS="$TOOLSDIR/hadoop-2.4.0/share/hadoop/common
-                          $TOOLSDIR/hadoop-2.4.0/share/hadoop/common/lib
-                          $TOOLSDIR/hadoop-2.4.0/share/hadoop/mapreduce
-                          $TOOLSDIR/hadoop-2.4.0/share/hadoop/hdfs"
-  export HBASE_JAR_FILES=
-  HBASE_JAR_DIRS="$HBASE_HOME/lib"
-  for d in $TOOLSDIR/hbase-0.98.1-cdh5.1.0/lib; do
-    HBASE_JAR_FILES="$HBASE_JAR_FILES $d/*.jar"
-  done
-
-  export HIVE_JAR_DIRS="$TOOLSDIR/apache-hive-0.13.1-bin/lib"
-  export HIVE_JAR_FILES="$TOOLSDIR/hadoop-2.4.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-*.jar"
-
-  # suffixes to suppress in the classpath (set this to ---none--- to add all files)
-  export SUFFIXES_TO_SUPPRESS="-sources.jar -tests.jar"
-
-  # Configuration directories
-
-  export HADOOP_CNF_DIR=$MY_SQROOT/sql/local_hadoop/hadoop/etc/hadoop
-  export HBASE_CNF_DIR=$MY_SQROOT/sql/local_hadoop/hbase/conf
-  export HIVE_CNF_DIR=$MY_SQROOT/sql/local_hadoop/hive/conf
-
 elif [[ -d /opt/cloudera/parcels/CDH ]]; then
   # we are on a cluster with Cloudera parcels installed
   # -------------------------------------------
@@ -521,6 +485,9 @@ else
     Yet another option is to use the install_local_hadoop script on a
     single node for evaluation or development.
 
+    If you just checked out or copied a Trafodion source tree and want to build,
+    then you can ignore the above and continue with your build.
+
 EOF
   }
 
@@ -580,6 +547,13 @@ EOF
     echo "**** ERROR: Unable to determine location of HBase lib directory"
   fi
 
+  if [[ -d $TOOLSDIR/thrift-0.9.0 ]]; then
+    # this is mostly for a build environment, where we need
+    # thrift from TOOLSDIR
+    export THRIFT_LIB_DIR=$TOOLSDIR/thrift-0.9.0/lib
+    export THRIFT_INC_DIR=$TOOLSDIR/thrift-0.9.0/include
+  fi
+
   if [ -n "$HBASE_CNF_DIR" -a -n "$HADOOP_CNF_DIR" -a \
        -d $APACHE_HADOOP_HOME/lib -a -d $APACHE_HBASE_HOME/lib ]; then
     # We are on a system with Apache HBase, probably without a distro
@@ -601,11 +575,6 @@ EOF
       export HADOOP_INC_DIR=/usr/include
     elif [ -f $APACHE_HADOOP_HOME/include/hdfs.h ]; then
       export HADOOP_INC_DIR=$APACHE_HADOOP_HOME/include
-    else
-      # ok for running Trafodion, not ok for building it
-      if [ "$SQ_VERBOSE" == 1 ]; then
-        echo '*** WARNING: Could not find hdfs.h include file'
-      fi
     fi
 
     # directories with jar files and list of jar files
@@ -631,6 +600,7 @@ EOF
     export HIVE_JAR_DIRS="$APACHE_HIVE_HOME/lib"
 
     export HBASE_TRX_JAR=hbase-trx-hbase_98_4-${TRAFODION_VER}.jar
+
     # end of code for Apache Hadoop/HBase installation w/o distro
   else
     # print usage information, not enough information about Hadoop/HBase
@@ -661,8 +631,10 @@ fi
 
 # For now, set the QT_TOOLKIT envvar if the required version exists in the
 # download location
-if [[ -d $TOOLSDIR/Qt-4.8.5-64 ]]; 
+if [[ -z "$QT_TOOLKIT" && -d $TOOLSDIR/Qt-4.8.5-64 ]]; 
 then
+   # QT_TOOLKIT is optional, if the directory doesn't exist
+   # then we won't build the compiler GUI
    export QT_TOOLKIT="$TOOLSDIR/Qt-4.8.5-64"
 fi
 
@@ -822,7 +794,9 @@ export PROTOBUFS_INC=$PROTOBUFS/include
 
 ######################
 # Library Path may include local over-rides
-export LD_LIBRARY_PATH=$CC_LIB_RUNTIME:$MPI_ROOT/lib/$MPILIB:$MY_SQROOT/export/lib"$SQ_MBTYPE":$HADOOP_LIB_DIR:$LOC_JVMLIBS:$LOG4CXX_LIB_DIR:.
+# Put Hadoop native dir before Trafodion, so that an available libhdfs will
+# be picked up there, otherwise use the libhdfs distributed with Trafodion.
+export LD_LIBRARY_PATH=$CC_LIB_RUNTIME:$MPI_ROOT/lib/$MPILIB:$HADOOP_LIB_DIR:$MY_SQROOT/export/lib"$SQ_MBTYPE":$LOC_JVMLIBS:$LOG4CXX_LIB_DIR:.
 
 ######################
 # classpath calculation may include local over-rides
@@ -878,6 +852,7 @@ if [[ -n "$SQ_CLASSPATH"   ]]; then SQ_CLASSPATH="$SQ_CLASSPATH:";   fi
 SQ_CLASSPATH=${SQ_CLASSPATH}${HBASE_TRXDIR}:\
 ${HBASE_TRXDIR}/${HBASE_TRX_JAR}:\
 $MY_SQROOT/export/lib/trafodion-sql-${TRAFODION_VER}.jar:\
+$MY_SQROOT/export/lib/trafodion-dtm-${TRAFODION_VER}.jar:\
 $MY_SQROOT/export/lib/jdbcT4.jar:\
 $MY_SQROOT/export/lib/jdbcT2.jar
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/sqf/sql/Makefile
----------------------------------------------------------------------
diff --git a/core/sqf/sql/Makefile b/core/sqf/sql/Makefile
index d7d4e4b..c96fd08 100644
--- a/core/sqf/sql/Makefile
+++ b/core/sqf/sql/Makefile
@@ -35,6 +35,6 @@ clean:
 
 setup:
 	-cd stublibs; $(MAKE) setup
-	-cd $(WROOT)/nskgmake; $(MAKE) linuxmklinks$(SQ_BUILD_TYPE)
+	cd $(WROOT)/nskgmake; $(MAKE) linuxmklinks$(SQ_BUILD_TYPE)
 
 #endif

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/sqf/sql/scripts/get_libhdfs_files
----------------------------------------------------------------------
diff --git a/core/sqf/sql/scripts/get_libhdfs_files b/core/sqf/sql/scripts/get_libhdfs_files
new file mode 100755
index 0000000..3417c09
--- /dev/null
+++ b/core/sqf/sql/scripts/get_libhdfs_files
@@ -0,0 +1,186 @@
+#!/bin/sh
+
+# This script downloads and/or makes the required libhdfs files
+# to be able to build Trafodion, which acts as a libhdfs client.
+#
+# Basically, what we need are three files:
+#
+# hdfs.h       (copied to $TGT_INC_DIR)
+# libhdfs.so   (copied to $TGT_LIB_DIR)
+# libhadoop.so (copied to $TGT_LIB_DIR)
+
+# Working dir in the Trafodion source tree to extract and build libhdfs files
+# (can be specified as an environment variable)
+if [[ -z ${LIBHDFS_TEMP_DIR} ]]; then
+  LIBHDFS_TEMP_DIR=${MY_SQROOT}/sql/libhdfs_files
+fi
+LOGFILE=${LIBHDFS_TEMP_DIR}/build.log
+
+# Hadoop source tar file to build libhdfs from
+HADOOP_SRC_MIRROR_URL=https://archive.apache.org/dist/hadoop/common/hadoop-2.6.0
+HADOOP_ID=hadoop-2.6.0
+HADOOP_SRC_ID=${HADOOP_ID}-src
+HADOOP_SRC_TAR=${HADOOP_SRC_ID}.tar.gz
+
+# files to build required version of Google Protocol Buffers
+PROTOBUF_MIRROR_URL=https://github.com/google/protobuf/releases/download/v2.5.0
+PROTOBUF_ID=protobuf-2.5.0
+PROTOBUF_TAR=${PROTOBUF_ID}.tar.gz
+
+# result of protobuf build
+PROTOBUF_TGT_ID=protobuf-tgt
+
+# Directories to copy the built libhdfs library and corresponding include file
+TGT_INC_DIR=$MY_SQROOT/export/include
+TGT_LIB_DIR=$MY_SQROOT/export/lib${SQ_MBTYPE}
+
+FORCE_BUILD=false
+VERBOSE=false
+
+USAGE="Usage $0 [ -f | --force ]\
+                [ -v | --verbose ]\
+                [ -d <temp dir> | --tempDir <temp dir> ]"
+
+while [[ $# > 0 ]]
+do
+  arg="$1"
+
+  case $arg in
+    -f|--force)
+       FORCE_BUILD=true
+       ;;
+
+    -v|--verbose)
+       VERBOSE=true
+       ;;
+
+    -d|--tempDir)
+       shift
+       if [[ $# < 1 ]]; then
+         echo "Expecting argument after -d or --tempDir"
+         exit 1
+       fi
+       LIBHDFS_TEMP_DIR="$1"
+       ;;
+
+    *)
+       echo "Unknown command line option: $arg"
+       echo "Usage $0 [ -f | --force ]"
+       exit 1
+    ;;
+  esac
+
+  shift
+done
+
+
+if [[ $FORCE_BUILD == true || \
+      ! -e ${TGT_INC_DIR}/hdfs.h || \
+      ! -e ${TGT_LIB_DIR}/libhdfs.so ]]; then
+
+  if [ ! -d $LIBHDFS_TEMP_DIR ]; then
+    mkdir $LIBHDFS_TEMP_DIR
+  fi
+
+  cd $LIBHDFS_TEMP_DIR
+
+  if [[ ! -f ${PROTOBUF_TAR} ]]; then
+    echo "Downloading Google Protocol Buffers..." | tee -a ${LOGFILE}
+    wget ${PROTOBUF_MIRROR_URL}/${PROTOBUF_TAR} >${LOGFILE}
+  fi
+
+  if [[ $FORCE_BUILD == true ]]; then
+    rm -rf ${LIBHDFS_TEMP_DIR}/${PROTOBUF_ID}
+    rm -rf ${LIBHDFS_TEMP_DIR}/${PROTOBUF_TGT_ID}
+  fi
+
+  if [[ ! -d ${PROTOBUF_ID} ]]; then
+    echo "Unpacking Google Protocol Buffer tar file..." | tee -a ${LOGFILE}
+    rm -rf ${LIBHDFS_TEMP_DIR}/${PROTOBUF_TGT_ID}
+    tar -xzf ${PROTOBUF_TAR} >>${LOGFILE}
+  fi
+
+  if [[ ! -d $PROTOBUF_TGT_ID ]]; then
+    cd ${PROTOBUF_ID}
+    echo "Building Google Protocol Buffers, this could take a while..." | tee -a ${LOGFILE}
+    if [[ $VERBOSE == true ]]; then
+      ./configure --prefix=${LIBHDFS_TEMP_DIR}/${PROTOBUF_TGT_ID} 2>&1 | tee -a ${LOGFILE}
+    else
+      ./configure --prefix=${LIBHDFS_TEMP_DIR}/${PROTOBUF_TGT_ID} 2>&1 >>${LOGFILE}
+    fi
+    if [[ $? != 0 ]]; then
+      echo "Error during configure step, exiting" | tee -a ${LOGFILE}
+      exit 1
+    fi
+    make 2>&1 >>${LOGFILE}
+    if [[ $? != 0 ]]; then
+      echo "Error during make step, exiting" | tee -a ${LOGFILE}
+      exit 1
+    fi
+    # skip the tests
+    # make check 2>&1 >>${LOGFILE}
+    if [[ $? != 0 ]]; then
+      echo "Error during check step, exiting" | tee -a ${LOGFILE}
+      exit 1
+    fi
+    make install 2>&1 >>${LOGFILE}
+    if [[ $? != 0 ]]; then
+      echo "Error during install step, exiting" | tee -a ${LOGFILE}
+      # remove partial results, if any
+      rm -rf ${LIBHDFS_TEMP_DIR}/${PROTOBUF_TGT_ID}
+      exit 1
+    fi
+  fi
+
+  cd $LIBHDFS_TEMP_DIR
+  export HADOOP_PROTOC_PATH=${LIBHDFS_TEMP_DIR}/${PROTOBUF_TGT_ID}/bin/protoc
+
+  if [[ ! -f ${HADOOP_SRC_TAR} ]]; then
+    echo "Downloading Hadoop tar file ${HADOOP_SRC_TAR}..." | tee -a ${LOGFILE}
+    wget ${HADOOP_SRC_MIRROR_URL}/${HADOOP_SRC_TAR} 2>&1 >>${LOGFILE}
+  fi
+
+  if [[ $FORCE_BUILD == true ]]; then
+    rm -rf ${LIBHDFS_TEMP_DIR}/${HADOOP_SRC_ID}
+  fi
+
+  if [[ ! -d ${HADOOP_SRC_ID} ]]; then
+    echo "Unpacking Hadoop tar file..." | tee -a ${LOGFILE}
+    tar -xzf ${HADOOP_SRC_TAR}
+  fi
+
+  if [[ ! -d ${LIBHDFS_TEMP_DIR}/${HADOOP_SRC_ID}/hadoop-dist/target ]]; then
+    cd ${HADOOP_SRC_ID}
+    echo "Building native library, this will take several minutes..." | tee -a ${LOGFILE}
+    if [[ $VERBOSE == true ]]; then
+      mvn package -Pdist,native -Dmaven.javadoc.skip=true -DskipTests -Dtar 2>&1 | tee -a ${LOGFILE}
+    else
+      mvn package -Pdist,native -Dmaven.javadoc.skip=true -DskipTests -Dtar 2>&1 >>${LOGFILE}
+    fi
+    if [[ $? != 0 ]]; then
+      echo "Error during Maven build step for libhdfs, exiting" | tee -a ${LOGFILE}
+      exit 1
+    fi
+  fi
+
+  echo "Copying include file and built libraries to Trafodion export dir..." | tee -a ${LOGFILE}
+  if [[ $VERBOSE == true ]]; then
+    set -x
+  fi
+  cp -f ${LIBHDFS_TEMP_DIR}/${HADOOP_SRC_ID}/hadoop-dist/target/${HADOOP_ID}/include/hdfs.h ${TGT_INC_DIR}
+  cp -Pf ${LIBHDFS_TEMP_DIR}/${HADOOP_SRC_ID}/hadoop-dist/target/${HADOOP_ID}/lib/native/libhdfs*.so* ${TGT_LIB_DIR}
+  cp -Pf ${LIBHDFS_TEMP_DIR}/${HADOOP_SRC_ID}/hadoop-dist/target/${HADOOP_ID}/lib/native/libhadoop*.so* ${TGT_LIB_DIR}
+
+  ls -l ${TGT_INC_DIR}/hdfs.h       >> ${LOGFILE}
+  ls -l ${TGT_LIB_DIR}/libhdfs.so   >> ${LOGFILE}
+  ls -l ${TGT_LIB_DIR}/libhadoop.so >> ${LOGFILE}
+
+  # Final check whether all the needed files are there
+  if [[ ! -r ${TGT_INC_DIR}/hdfs.h || \
+        ! -r ${TGT_LIB_DIR}/libhdfs.so ]]; then
+    echo "Error, not all files were created" | tee -a ${LOGFILE}
+    ls -l ${TGT_INC_DIR}/hdfs.h
+    ls -l ${TGT_LIB_DIR}/libhdfs.so
+    exit 1
+  fi
+fi

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseAuditControlPoint.java
----------------------------------------------------------------------
diff --git a/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseAuditControlPoint.java b/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseAuditControlPoint.java
deleted file mode 100644
index 11120ef..0000000
--- a/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseAuditControlPoint.java
+++ /dev/null
@@ -1,416 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.dtm;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.ObjectOutputStream;
-
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.Logger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.transactional.TransactionManager;
-import org.apache.hadoop.hbase.client.transactional.TransactionState;
-import org.apache.hadoop.hbase.client.transactional.CommitUnsuccessfulException;
-import org.apache.hadoop.hbase.client.transactional.UnknownTransactionException;
-import org.apache.hadoop.hbase.client.transactional.HBaseBackedTransactionLogger;
-import org.apache.hadoop.hbase.client.transactional.TransactionRegionLocation;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.StringTokenizer;
-
-import java.util.concurrent.ConcurrentHashMap;
-
-import java.lang.NullPointerException;
-
-public class HBaseAuditControlPoint {
-
-    static final Log LOG = LogFactory.getLog(HBaseAuditControlPoint.class);
-    private static long currControlPt;
-    private static HBaseAdmin admin;
-    private Configuration config;
-    private static String CONTROL_POINT_TABLE_NAME;
-    private static final byte[] CONTROL_POINT_FAMILY = Bytes.toBytes("cpf");
-    private static final byte[] ASN_HIGH_WATER_MARK = Bytes.toBytes("hwm");
-    private static HTable table;
-    private boolean useAutoFlush;
-    private boolean disableBlockCache;
-
-    public HBaseAuditControlPoint(Configuration config) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("Enter HBaseAuditControlPoint constructor()");
-      this.config = config;
-      CONTROL_POINT_TABLE_NAME = config.get("CONTROL_POINT_TABLE_NAME");
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(CONTROL_POINT_TABLE_NAME));
-      HColumnDescriptor hcol = new HColumnDescriptor(CONTROL_POINT_FAMILY);
-
-      disableBlockCache = false;
-      try {
-         String blockCacheString = System.getenv("TM_TLOG_DISABLE_BLOCK_CACHE");
-         if (blockCacheString != null){
-            disableBlockCache = (Integer.parseInt(blockCacheString) != 0);
-            if (LOG.isDebugEnabled()) LOG.debug("disableBlockCache != null");
-         }
-      }
-      catch (Exception e) {
-         if (LOG.isDebugEnabled()) LOG.debug("TM_TLOG_DISABLE_BLOCK_CACHE is not in ms.env");
-      }
-      LOG.info("disableBlockCache is " + disableBlockCache);
-      if (disableBlockCache) {
-         hcol.setBlockCacheEnabled(false);
-      }
-
-      desc.addFamily(hcol);
-      admin = new HBaseAdmin(config);
-
-      useAutoFlush = true;
-      try {
-         String autoFlush = System.getenv("TM_TLOG_AUTO_FLUSH");
-         if (autoFlush != null){
-            useAutoFlush = (Integer.parseInt(autoFlush) != 0);
-            if (LOG.isDebugEnabled()) LOG.debug("autoFlush != null");
-         }
-      }
-      catch (Exception e) {
-         if (LOG.isDebugEnabled()) LOG.debug("TM_TLOG_AUTO_FLUSH is not in ms.env");
-      }
-      LOG.info("useAutoFlush is " + useAutoFlush);
-
-      boolean lvControlPointExists = admin.tableExists(CONTROL_POINT_TABLE_NAME);
-      if (LOG.isDebugEnabled()) LOG.debug("HBaseAuditControlPoint lvControlPointExists " + lvControlPointExists);
-      currControlPt = -1;
-      if (lvControlPointExists == false) {
-         try {
-            if (LOG.isDebugEnabled()) LOG.debug("Creating the table " + CONTROL_POINT_TABLE_NAME);
-            admin.createTable(desc);
-            currControlPt = 1;
-         }
-         catch (TableExistsException e) {
-            LOG.error("Table " + CONTROL_POINT_TABLE_NAME + " already exists");
-         }
-      }
-      try {
-         if (LOG.isDebugEnabled()) LOG.debug("try new HTable");
-         table = new HTable(config, desc.getName());
-         table.setAutoFlushTo(this.useAutoFlush);
-      }
-      catch (IOException e) {
-         LOG.error("new HTable IOException");
-      }
-
-      if (currControlPt == -1){
-         try {
-            currControlPt = getCurrControlPt();
-         }
-         catch (Exception e2) {
-            if (LOG.isDebugEnabled()) LOG.debug("Exit getCurrControlPoint() exception " + e2);
-         }
-      }
-      if (LOG.isDebugEnabled()) LOG.debug("currControlPt is " + currControlPt);
-
-      if (LOG.isTraceEnabled()) LOG.trace("Exit constructor()");
-      return;
-    }
-
-   public long getCurrControlPt() throws Exception {
-      if (LOG.isTraceEnabled()) LOG.trace("getCurrControlPt:  start");
-      long highKey = -1;
-      if (LOG.isDebugEnabled()) LOG.debug("new Scan");
-      Scan s = new Scan();
-      s.setCaching(10);
-      s.setCacheBlocks(false);
-      if (LOG.isDebugEnabled()) LOG.debug("resultScanner");
-      ResultScanner ss = table.getScanner(s);
-      try {
-         long currKey;
-         String rowKey;
-         if (LOG.isDebugEnabled()) LOG.debug("entering for loop" );
-         for (Result r : ss) {
-            rowKey = new String(r.getRow());
-            if (LOG.isDebugEnabled()) LOG.debug("rowKey is " + rowKey );
-            currKey = Long.parseLong(rowKey);
-            if (LOG.isDebugEnabled()) LOG.debug("value is " + Long.parseLong(Bytes.toString(r.value())));
-            if (currKey > highKey) {
-               if (LOG.isDebugEnabled()) LOG.debug("Setting highKey to " + currKey);
-               highKey = currKey;
-            }
-         }
-      }
-      catch (Exception e) {
-        LOG.error("getCurrControlPt IOException" + e);
-        e.printStackTrace();
-      } finally {
-         ss.close();
-      }
-      if (LOG.isDebugEnabled()) LOG.debug("getCurrControlPt returning " + highKey);
-      return highKey;
-   }
-
-   public long putRecord(final long ControlPt, final long startingSequenceNumber) throws Exception {
-      if (LOG.isTraceEnabled()) LOG.trace("putRecord starting sequence number ("  + String.valueOf(startingSequenceNumber) + ")");
-      String controlPtString = new String(String.valueOf(ControlPt));
-      Put p = new Put(Bytes.toBytes(controlPtString));
-      p.add(CONTROL_POINT_FAMILY, ASN_HIGH_WATER_MARK, Bytes.toBytes(String.valueOf(startingSequenceNumber)));
-      try {
-         if (LOG.isTraceEnabled()) LOG.trace("try table.put with starting sequence number " + startingSequenceNumber);
-         table.put(p);
-         if (useAutoFlush == false) {
-            if (LOG.isTraceEnabled()) LOG.trace("flushing controlpoint record");
-            table.flushCommits();
-         }
-      }
-      catch (Exception e) {
-         LOG.error("HBaseAuditControlPoint:putRecord Exception" + e);
-         throw e;
-      }
-      if (LOG.isTraceEnabled()) LOG.trace("HBaseAuditControlPoint:putRecord returning " + ControlPt);
-      return ControlPt;
-   }
-
-   public ArrayList<String> getRecordList(String controlPt) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("getRecord");
-      ArrayList<String> transactionList = new ArrayList<String>();
-      Get g = new Get(Bytes.toBytes(controlPt));
-      Result r = table.get(g);
-      byte [] currValue = r.getValue(CONTROL_POINT_FAMILY, ASN_HIGH_WATER_MARK);
-      String recordString = new String(currValue);
-      if (LOG.isDebugEnabled()) LOG.debug("recordString is " + recordString);
-      StringTokenizer st = new StringTokenizer(recordString, ",");
-      while (st.hasMoreElements()) {
-        String token = st.nextElement().toString() ;
-        if (LOG.isDebugEnabled()) LOG.debug("token is " + token);
-        transactionList.add(token);
-      }
-
-      if (LOG.isTraceEnabled()) LOG.trace("getRecord - exit with list size (" + transactionList.size() + ")");
-      return transactionList;
-
-    }
-
-   public long getRecord(final String controlPt) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("getRecord " + controlPt);
-      long lvValue = -1;
-      Get g = new Get(Bytes.toBytes(controlPt));
-      String recordString;
-      try {
-         Result r = table.get(g);
-         byte [] currValue = r.getValue(CONTROL_POINT_FAMILY, ASN_HIGH_WATER_MARK);
-         try {
-            recordString = new String (Bytes.toString(currValue));
-            if (LOG.isDebugEnabled()) LOG.debug("recordString is " + recordString);
-            lvValue = Long.parseLong(recordString, 10);
-         }
-         catch (NullPointerException e){
-            if (LOG.isDebugEnabled()) LOG.debug("control point " + controlPt + " is not in the table");
-         }
-      }
-      catch (IOException e){
-          LOG.error("getRecord IOException");
-          throw e;
-      }
-      if (LOG.isTraceEnabled()) LOG.trace("getRecord - exit " + lvValue);
-      return lvValue;
-
-    }
-
-   public long getStartingAuditSeqNum() throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("getStartingAuditSeqNum");
-      String controlPtString = new String(String.valueOf(currControlPt));
-      long lvAsn;
-      if (LOG.isDebugEnabled()) LOG.debug("getStartingAuditSeqNum new get for control point " + currControlPt);
-      Get g = new Get(Bytes.toBytes(controlPtString));
-      if (LOG.isDebugEnabled()) LOG.debug("getStartingAuditSeqNum setting result");
-      Result r = table.get(g);
-      if (LOG.isDebugEnabled()) LOG.debug("getStartingAuditSeqNum currValue CONTROL_POINT_FAMILY is "
-                 + CONTROL_POINT_FAMILY + " ASN_HIGH_WATER_MARK " + ASN_HIGH_WATER_MARK);
-      byte [] currValue = r.getValue(CONTROL_POINT_FAMILY, ASN_HIGH_WATER_MARK);
-      if (LOG.isDebugEnabled()) LOG.debug("Starting asn setting recordString ");
-      String recordString = "";
-      try {
-         recordString = new String(currValue);
-      }
-      catch (NullPointerException e) {
-         if (LOG.isDebugEnabled()) LOG.debug("getStartingAuditSeqNum recordString is null");
-         lvAsn = 1;
-         if (LOG.isDebugEnabled()) LOG.debug("Starting asn is 1");
-         return lvAsn;
-      }
-      if (LOG.isDebugEnabled()) LOG.debug("getStartingAuditSeqNum recordString is good");
-      if (LOG.isDebugEnabled()) LOG.debug("Starting asn for control point " + currControlPt + " is " + recordString);
-      lvAsn = Long.valueOf(recordString);
-      if (LOG.isTraceEnabled()) LOG.trace("getStartingAuditSeqNum - exit returning " + lvAsn);
-      return lvAsn;
-    }
-
-   public long getNextAuditSeqNum(int nid) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum for node: " + nid);
-
-      // We need to open the appropriate control point table and read the value from it
-      HTableInterface remoteTable;
-      String lv_tName = new String("TRAFODION._DTM_.TLOG" + String.valueOf(nid) + "_CONTROL_POINT");
-      HConnection remoteConnection = HConnectionManager.createConnection(this.config);
-      remoteTable = remoteConnection.getTable(TableName.valueOf(lv_tName));
-
-      long highValue = -1;
-      try {
-         Scan s = new Scan();
-         s.setCaching(10);
-         s.setCacheBlocks(false);
-         if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum resultScanner");
-         ResultScanner ss = remoteTable.getScanner(s);
-         try {
-            long currValue;
-            String rowKey;
-            if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum entering for loop" );
-            for (Result r : ss) {
-               rowKey = new String(r.getRow());
-               if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum rowKey is " + rowKey );
-               currValue =  Long.parseLong(Bytes.toString(r.value()));
-               if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum value is " + currValue);
-               if (currValue > highValue) {
-                  if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum Setting highValue to " + currValue);
-                  highValue = currValue;
-               }
-            }
-         }
-         catch (Exception e) {
-           LOG.error("getNextAuditSeqNum IOException" + e);
-           e.printStackTrace();
-         } finally {
-            ss.close();
-         }
-      }
-      catch (IOException e) {
-         LOG.error("getNextAuditSeqNum IOException setting up scan for " + lv_tName);
-         e.printStackTrace();
-      }
-      finally {
-         try {
-            remoteTable.close();
-            remoteConnection.close();
-         }
-         catch (IOException e) {
-            LOG.error("getNextAuditSeqNum IOException closing table or connection for " + lv_tName);
-            e.printStackTrace();
-         }
-      }
-      if (LOG.isTraceEnabled()) LOG.trace("getNextAuditSeqNum returning " + (highValue + 1));
-      return (highValue + 1);
-    }
-
-
-   public long doControlPoint(final long sequenceNumber) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("doControlPoint start");
-      try {
-         currControlPt++;
-         if (LOG.isTraceEnabled()) LOG.trace("doControlPoint interval (" + currControlPt + "), sequenceNumber (" + sequenceNumber+ ") try putRecord");
-         putRecord(currControlPt, sequenceNumber);
-      }
-      catch (Exception e) {
-         LOG.error("doControlPoint Exception" + e);
-      }
-
-      if (LOG.isTraceEnabled()) LOG.trace("doControlPoint - exit");
-      return currControlPt;
-   }
-
-   public boolean deleteRecord(final long controlPoint) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("deleteRecord start for control point " + controlPoint);
-      String controlPtString = new String(String.valueOf(controlPoint));
-
-      try {
-         List<Delete> list = new ArrayList<Delete>();
-         Delete del = new Delete(Bytes.toBytes(controlPtString));
-         if (LOG.isDebugEnabled()) LOG.debug("deleteRecord  (" + controlPtString + ") ");
-         table.delete(del);
-      }
-      catch (Exception e) {
-         LOG.error("deleteRecord IOException");
-      }
-
-      if (LOG.isTraceEnabled()) LOG.trace("deleteRecord - exit");
-      return true;
-   }
-
-   public boolean deleteAgedRecords(final long controlPoint) throws IOException {
-      if (LOG.isTraceEnabled()) LOG.trace("deleteAgedRecords start - control point " + controlPoint);
-      String controlPtString = new String(String.valueOf(controlPoint));
-
-      Scan s = new Scan();
-      s.setCaching(10);
-      s.setCacheBlocks(false);
-      ArrayList<Delete> deleteList = new ArrayList<Delete>();
-      ResultScanner ss = table.getScanner(s);
-      try {
-         String rowKey;
-         for (Result r : ss) {
-            rowKey = new String(r.getRow());
-            if (Long.parseLong(rowKey) < controlPoint) {
-               if (LOG.isDebugEnabled()) LOG.debug("Adding  (" + rowKey + ") to delete list");
-               Delete del = new Delete(rowKey.getBytes());
-               deleteList.add(del);
-            }
-         }
-         if (LOG.isDebugEnabled()) LOG.debug("attempting to delete list with " + deleteList.size() + " elements");
-         table.delete(deleteList);
-      }
-      catch (Exception e) {
-         LOG.error("deleteAgedRecords IOException");
-      }finally {
-         ss.close();
-      }
-
-      if (LOG.isTraceEnabled()) LOG.trace("deleteAgedRecords - exit");
-      return true;
-   }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/77eab6ba/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseTmZK.java
----------------------------------------------------------------------
diff --git a/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseTmZK.java b/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseTmZK.java
deleted file mode 100644
index 27411ec..0000000
--- a/core/sqf/src/seatrans/tm/hbasetmlib2/HBaseTmZK.java
+++ /dev/null
@@ -1,247 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.dtm;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.StringTokenizer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.transactional.TransactionRegionLocation;
-
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Zookeeper client-side communication class for the DTM
- */
-public class HBaseTmZK implements Abortable{
-	private final String baseNode = "/hbase/Trafodion/recovery/TM";
-	static final Log LOG = LogFactory.getLog(HBaseTmZK.class);
-	
-	ZooKeeperWatcher zooKeeper;
-	Set<String>     nodeList;	
-	String zkNode;
-	short dtmID;
-	
-	/**
-	 * @param conf
-	 * @throws Exception
-	 */
-	public HBaseTmZK(final Configuration conf) throws Exception {
-                if (LOG.isTraceEnabled()) LOG.trace("HBaseTmZK(conf) -- ENTRY");
-		this.dtmID = 0;
-		this.zkNode = baseNode + "0";
-		this.zooKeeper = new ZooKeeperWatcher(conf, "TM Recovery", this, true);
-	}
-	
-	/**
-	 * @param conf
-	 * @param dtmID
-	 * @throws Exception
-	 */
-	public HBaseTmZK(final Configuration conf, final short dtmID) throws Exception {
-        if (LOG.isTraceEnabled()) LOG.trace("HBaseTmZK(conf, dtmID) -- ENTRY");
-		this.dtmID = dtmID;
-		this.zkNode = baseNode + String.format("%d", dtmID);
-		this.zooKeeper = new ZooKeeperWatcher(conf, "TM Recovery", this, true);
-	}
-
-	/**
-	 * @param znode
-	 * @return
-	 * @throws KeeperException
-	 */
-	private byte [] checkData (String znode) throws KeeperException, InterruptedException {
-		return ZKUtil.getData(zooKeeper, znode);
-	}
-	
-	/**
-	 * @return
-	 */
-	public short getTMID() {
-		return dtmID;
-	}
-	
-	
-	/**
-	 * @return
-	 * @throws KeeperException
-	 */
-	private List<String> getChildren() throws KeeperException {
-		return ZKUtil.listChildrenNoWatch(zooKeeper, zkNode);
-	}
-
-	/**
-	 * @return
-	 * @throws KeeperException
-	 */
-	public Map<String,byte []> checkForRecovery() throws InterruptedException, KeeperException {
-                // if (LOG.isTraceEnabled()) LOG.trace("checkForRecovery -- ENTRY");
-		if(ZKUtil.nodeHasChildren(zooKeeper, zkNode)) {
-			List<String> nodeChildren = new ArrayList<String>();
-			Map<String, byte []> nodeDataMap = new HashMap<String, byte []>();
-			nodeChildren = getChildren();
-
-			for(String node : nodeChildren) {
-
-                        if (LOG.isTraceEnabled()) LOG.trace("checkForRecovery -- found node: '" + node + "'");
-                        byte [] nodeData = checkData(zkNode +"/" + node);
-                        if (LOG.isTraceEnabled()) LOG.trace("checkForRecovery -- found node: " + node + " node data " + nodeData.toString());
-				nodeDataMap.put(node, nodeData);
-			}
-                        if (LOG.isTraceEnabled()) LOG.trace("checkForRecovery -- EXIT returning " + nodeDataMap.size() + " regions");
-                                return nodeDataMap;
-                }
-                else {
-                        // if (LOG.isTraceEnabled()) LOG.trace(zkNode + " is currently not present.");
-                        // if (LOG.isTraceEnabled()) LOG.trace("checkForRecovery -- EXIT -- node not present");
-                        return null;
-                }
-	}
-
-	/**
-	 * @param toDelete
-	 * @throws KeeperException
-	 */
-	public void deleteRegionEntry(Map.Entry<String,byte[]> toDelete ) throws KeeperException {
-           LOG.info("deleteRegionEntry -- ENTRY -- key: " + toDelete.getKey() + " value: " + new String(toDelete.getValue()));
-           ZKUtil.deleteNodeFailSilent(zooKeeper, zkNode + "/" + toDelete.getKey());
-           LOG.info("deleteRegionEntry -- EXIT ");
-        }
-
-	/**
-	 * @param node
-	 * @param hostName
-	 * @param portNumber
-	 * @param encodedName
-	 * @param data
-	 * @throws IOException
-	 */
-        public void createRecoveryzNode(String hostName, int portNumber, String encodedName, byte [] data) throws IOException {
-           LOG.info("HBaseTmZK:createRecoveryzNode: hostName: " + hostName + " port: " + portNumber +
-                     " encodedName: " + encodedName + " data: " + new String(data));
-           // default zNodePath for recovery
-           String zNodeKey = hostName + "," + portNumber + "," + encodedName;
-
-           LOG.info("HBaseTmZK:createRecoveryzNode: ZKW Post region recovery znode" + this.dtmID + " zNode Path " + zkNode);
-           // create zookeeper recovery zNode, call ZK ...
-           try {
-              if (ZKUtil.checkExists(zooKeeper, zkNode) == -1) {
-                 // create parent nodename
-                 LOG.info("HBaseTmZK:createRecoveryzNode:: ZKW create parent zNodes " + zkNode);
-                 ZKUtil.createWithParents(zooKeeper, zkNode);
-              }
-              ZKUtil.createAndFailSilent(zooKeeper, zkNode + "/" + zNodeKey, data);
-           } catch (KeeperException e) {
-              throw new IOException("HBaseTmZK:createRecoveryzNode: ZKW Unable to create recovery zNode: " + zkNode + " , throwing IOException " + e);
-           }
-        }
-        /**
-         ** @param data
-         ** #throws IOException
-         **/
-        public void createGCzNode(byte [] data) throws IOException {
-            String zNodeGCPath = "/hbase/Trafodion/GC";
-            try {
-                if (ZKUtil.checkExists(zooKeeper, zNodeGCPath) == -1) {
-                   if (LOG.isTraceEnabled()) LOG.trace("Trafodion table data clean up no znode path created " + zNodeGCPath);
-                    ZKUtil.createWithParents(zooKeeper, zNodeGCPath);
-                }
-                String zNodeKey = dtmID+"";
-                ZKUtil.createSetData(zooKeeper, zNodeGCPath + "/" + zNodeKey, data);
-            } catch (KeeperException e) {
-                throw new IOException("HBaseTmZK:createGCzNode: ZKW Unable to create GC zNode: " + zNodeGCPath +"  , throwing IOException " + e);
-            }
-        }
-
-	/**
-	 * @param node
-	 * @param recovTable
-	 * @throws IOException
-	 */
-        public void postAllRegionEntries(HTable recovTable) throws IOException {
-           LOG.info("HBaseTmZK:postAllRegionEntries: recovTable: " + recovTable );
-           NavigableMap<HRegionInfo, ServerName> regionMap = recovTable.getRegionLocations();
-           Iterator<Map.Entry<HRegionInfo, ServerName>> it =  regionMap.entrySet().iterator();
-           while(it.hasNext()) { // iterate entries.
-              NavigableMap.Entry<HRegionInfo, ServerName> pairs = it.next();
-              HRegionInfo region = pairs.getKey();
-              LOG.info("postAllRegionEntries: region: " + region.getRegionNameAsString());
-              ServerName serverValue = regionMap.get(region);
-              String hostAndPort = new String(serverValue.getHostAndPort());
-              StringTokenizer tok = new StringTokenizer(hostAndPort, ":");
-              String hostName = new String(tok.nextElement().toString());
-              int portNumber = Integer.parseInt(tok.nextElement().toString());
-              byte [] lv_byte_region_info = region.toByteArray();
-              try{
-                 LOG.info("Calling createRecoveryzNode for encoded region: " + region.getEncodedName());
-                 createRecoveryzNode(hostName, portNumber, region.getEncodedName(), lv_byte_region_info);
-              }
-              catch (Exception e2){
-                 LOG.error("postAllRegionEntries exception in createRecoveryzNode " + region.getTable().getNameAsString() +
-                           " exception: " + e2);
-              }
-           }// while
-        }
-	
-	/* (non-Javadoc)
-	 * @see org.apache.hadoop.hbase.Abortable#abort(java.lang.String, java.lang.Throwable)
-	 */
-	@Override
-	public void abort(String arg0, Throwable arg1) {
-		// TODO Auto-generated method stub
-		
-	}
-
-	/* (non-Javadoc)
-	 * @see org.apache.hadoop.hbase.Abortable#isAborted()
-	 */
-	@Override
-	public boolean isAborted() {
-		// TODO Auto-generated method stub
-		return false;
-	}
-}