You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by db...@apache.org on 2015/10/02 18:16:30 UTC

[1/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Repository: incubator-trafodion
Updated Branches:
  refs/heads/master ed5888e56 -> ae022f188


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/ustat/UstatUtil.java
----------------------------------------------------------------------
diff --git a/core/sql/ustat/UstatUtil.java b/core/sql/ustat/UstatUtil.java
deleted file mode 100644
index d773c4b..0000000
--- a/core/sql/ustat/UstatUtil.java
+++ /dev/null
@@ -1,442 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package com.hp.mx_ustat;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.util.Properties;
-import java.util.Enumeration;
-
-
-public class UstatUtil extends Thread
-{
-
-   static BufferedWriter bw=null;
-   static boolean doneflag=false;
-   static StringBuffer outputStr=null;
-   static StringBuffer errStr=null;
-   static boolean nextCommand=false;
-   static boolean errorFlag=false;
-   static boolean statusFlag=false;
-   static Runtime rt=Runtime.getRuntime();
-   static boolean srunStatus=false;
-   final static String scriptIndexFile=".scriptIndex";  // script Index property file which contains the list of scripts that can be executed
-   static String lineSeperator=System.getProperty("line.separator");
-
-
-   public static void runStatsProfile(String arguments,String[] output) throws IOException
-   {
-      Process p;
-      String command=" ";
-
-      String os = System.getProperty("os.name").toLowerCase();
-
-      String cmd_path;
-
-      if ( os.indexOf("linux") >=0 ) {
-         cmd_path = "sh " + System.getenv("MY_SQROOT") + "/export/lib";
-      } else { // assume NSK
-         cmd_path = "/usr/tandem";
-      }
-
-      String cmd= cmd_path + "/mx_ustat/stats_profile ";
-      cmd=cmd+arguments;
-
-      p = rt.exec(cmd);
-      try
-      {
-         execute(command, p, output, false);
-      } catch (IOException ioe)
-      {
-         statusFlag = false;
-      }
-   }
-
-   public static void USASstop() throws IOException
-   {
-      Process p;
-      String command=" ";
-
-      String os = System.getProperty("os.name").toLowerCase();
-      String cmd_path;
-
-      if ( os.indexOf("linux") >=0 ) {
-         cmd_path = "sh " + System.getenv("MY_SQROOT") + "/export/lib";
-      } else { // assume NSK
-         cmd_path = "/usr/tandem";
-      }
-
-      String cmd= cmd_path + "/mx_ustat/StopAutoStats.sh";
-
-      String[] output=cmd.split("\\s+"); // Unused.
-
-      p = rt.exec(cmd);
-      try
-      {
-         execute(command, p, output, false);
-      } catch (IOException ioe)
-      {
-         statusFlag = false;
-      }
-   }
-
-/*
-   public static void  handleSrun(String command, String[] output) throws IOException
-   {
-
-      Properties props=null;
-      props=new Properties();
-      String[] envList={};
-
-      try
-      {
-         props.load(new FileInputStream("/usr/tandem/nvscript/admin/.scriptIndex"));
-      } catch (FileNotFoundException fnfe)
-      {
-         output[0]="Could not find the index file.";
-         return;
-      }
-
-      String[] commandArr=command.split("\\s+");
-      if (props.getProperty(commandArr[0].trim()) == null)
-      {
-         if (commandArr[0] != null && !commandArr[0].trim().equals(""))
-         {
-            output[0]= "Invalid script.";
-         }
-         if (props.size() >0)
-         {
-            output[0]="The valid scripts are:" + lineSeperator + lineSeperator;
-            Enumeration scriptNames=props.propertyNames();
-            while (scriptNames.hasMoreElements())
-            {
-               String scriptName=(String)scriptNames.nextElement();
-               output[0]+=format(scriptName,(String)props.get(scriptName))+ lineSeperator;
-            }
-         }
-         outputStr=null;
-         errStr=null;
-         return;
-      }
-
-      srunStatus = true;
-      Process p=rt.exec("/usr/bin/sh eval "+command,envList,new File("/usr/tandem/nvscript/script"));
-      try
-      {
-         execute(command, p, output, srunStatus);
-      } catch (IOException ioe)
-      {
-         statusFlag = false;
-         outputStr.append("Could not create the sub process"+ioe);
-      }
-   }
-*/
-   public static void execute(String command,Process p, String[] output, boolean cmdStatus) throws IOException
-   {
-
-      outputStr=new StringBuffer();
-      errStr=new StringBuffer();
-      output[0]="";
-      statusFlag = false;
-
-      InputStream is = p.getInputStream();
-      OutputStream os = p.getOutputStream();
-      InputStream es = p.getErrorStream();
-
-      InputStreamReader isr = new InputStreamReader(is);
-      InputStreamReader iser = new InputStreamReader(es);
-      OutputStreamWriter osw = new OutputStreamWriter(os);
-
-      final BufferedReader br=new BufferedReader(isr);
-      final BufferedReader ber=new BufferedReader(iser);
-
-      bw = new BufferedWriter(osw);
-
-      // output thread
-      class OutputThread extends Thread
-      {
-
-         StringBuffer outputBuf=null;
-         OutputThread(StringBuffer outputBuf)
-         {
-            this.outputBuf=outputBuf;
-         }
-
-         public void run()
-         {
-            int i=0;
-            try
-            {
-               while ((i=br.read()) != -1)
-               {
-                  statusFlag = !statusFlag?true:statusFlag;
-                  if (errorFlag)
-                  {
-                     Thread.yield();
-                     errorFlag=false;
-                     try
-                     {
-                        sleep(100);
-                     } catch (InterruptedException ie)
-                     {
-                     }
-                  }
-                  if (nextCommand)
-                  {
-                     br.readLine();
-                     nextCommand=false;
-                  }else
-                  {
-                     outputBuf.append((char)i);
-                  }
-               }
-               doneflag=true;
-            } catch (IOException ote)
-            {
-               System.out.println("Error occurred in output Thread "+ote);
-            }
-         }
-      };
-
-      OutputThread outputt=new OutputThread(outputStr);
-      outputt.start();
-
-      // error thread
-      class ErrorThread extends Thread
-      {
-
-         StringBuffer outputBuf=null;
-         ErrorThread(StringBuffer outputBuf)
-         {
-            this.outputBuf=outputBuf;
-         }
-
-         public void run()
-         {
-            int i=0;
-            try
-            {
-               while ((i=ber.read()) != -1)
-               {
-                  errorFlag=true;
-                  outputBuf.append((char)i);
-               }
-            }catch (IOException ete)
-            {
-               System.out.println(" Error occurred in error thread "+ete);
-            }
-         }
-      };
-
-      ErrorThread errort=new ErrorThread(errStr);
-      errort.start();
-
-      // input thread
-      try
-      {
-         p.waitFor();
-         outputt.join();
-         errort.join();
-         if (!cmdStatus)
-         {
-            if (errStr.length() > 0)
-            {
-               errStr.delete(0, errStr.length());
-	       //     errStr.append("An internal server error has occurred. Please contact support.");
-            }
-         }
-         int count = errStr.indexOf("/sh:");
-         if (count > 0)
-            errStr.delete(0, count+5);
-
-         outputStr.append(errStr);
-      } catch (InterruptedException e)
-      {
-         // TODO Auto-generated catch block
-         //.printStackTrace();
-      }
-      isr=null;
-      iser=null;
-      osw=null;
-      bw=null;
-      outputt=null;
-      errort=null;
-      cmdStatus=false;
-
-      output[0]=outputStr.toString();
-      outputStr=null;
-      errStr=null;
-   }
-/*
-   private static String format(String scriptName,String description)
-   {
-
-      if (scriptName == null)
-      {
-         return null;
-      }
-      StringBuffer sb=null;
-      sb=new StringBuffer(scriptName);
-      while (sb.length() < 12)
-      {
-         sb.append(" ");
-      }
-      if (description != null)
-      {
-         sb.append("-");
-         sb.append(description);
-      }
-      return sb.toString().replaceAll(lineSeperator,lineSeperator + "            ");
-   }
-*/
-/*
-   public static void getTaclInfo(String command,String[] output) throws IOException
-   {
-
-      String[] commandArr=command.split("\\s+");
-      Process p;
-
-      if (commandArr.length == 1 && commandArr[0].equalsIgnoreCase("sutver"))
-      {
-         p = rt.exec("/usr/bin/sh eval  gtacl -c 'sutver'");
-      }
-      else if (commandArr.length == 2 && commandArr[0].equalsIgnoreCase("vproc") && commandArr[1].equalsIgnoreCase("$SYSTEM.ZMXODBC.MXOSRVR"))
-      {
-         p = rt.exec("/usr/bin/sh eval gtacl -c 'vproc $SYSTEM.ZMXODBC.MXOSRVR'");
-      }
-      else
-      {
-         output[0] = handleExceptions(commandArr[0]);
-         return;
-      }
-      try
-      {
-         execute(command, p, output, false);
-      } catch (IOException ioe)
-      {
-         statusFlag = false;
-      }
-   }
-
-   public static void onlineDBdump(String command,String[] output) throws IOException
-   {
-      handleDbaCmd(command, output);
-   }
-
-   public static void handleDbaCmd(String command,String[] output) throws IOException
-   {
-      String[] envList = {};
-      String[] commandArr=command.split("\\s+");
-      Process p = null;
-      String dbaScriptName = null;
-
-      int len = commandArr.length;
-      if (commandArr[0].equalsIgnoreCase("dbonlinedump"))
-         dbaScriptName = "dbonlinedump";
-      else if (commandArr[0].equalsIgnoreCase("updatestats"))
-         dbaScriptName = "updatestats";
-
-      if (dbaScriptName != null)
-      {
-         switch (len)
-         {
-            case 1:
-               p = rt.exec("/usr/bin/sh eval " + dbaScriptName, envList, new File("/usr/tandem/nvscript/dbascripts"));
-               break;
-            case 2:
-               if (commandArr[1].equalsIgnoreCase("INFO"))
-                  p = rt.exec("/usr/bin/sh eval " + dbaScriptName, envList, new File("/usr/tandem/nvscript/dbascripts"));
-
-               else
-                  output[0] = handleExceptions(commandArr[0]);
-               break;
-            case 3:
-               if (commandArr[1].equalsIgnoreCase("AT"))
-                  p = rt.exec("/usr/bin/sh eval " + dbaScriptName + " AT " + commandArr[2], envList, new File("/usr/tandem/nvscript/dbascripts"));
-
-               else
-                  output[0] = handleExceptions(commandArr[0]);
-               break;
-            default:
-               output[0] = handleExceptions(commandArr[0]);
-               return;
-         }
-      }
-      else
-      {
-         output[0] = handleExceptions(commandArr[0]);
-         return;
-      }
-
-      try
-      {
-         execute(command, p, output, false);
-      } catch (IOException ioe)
-      {
-         statusFlag = false;
-      }
-   }
-*/
-
-   public static String handleExceptions(String str)
-   {
-
-      str = "Invalid Command.";
-      return str;
-   }
-
-}
-
-/*
-DROP PROCEDURE NEO.HP_USTAT.STATS_PROFILE;
-CREATE PROCEDURE NEO.HP_USTAT.STATS_PROFILE
-  (
-    IN cmd VARCHAR(4000),
-    OUT response VARCHAR(240)
-  )
-  EXTERNAL NAME 'UstatUtil.runStatsProfile'
-  EXTERNAL PATH '/usr/tandem/mx_ustat'
-  LANGUAGE JAVA
-  PARAMETER STYLE JAVA
-  NO SQL
-  DYNAMIC RESULT SETS 0
-  ;
-DROP PROCEDURE NEO.HP_USTAT.STOP_AUTOMATED_STATS;
-CREATE PROCEDURE NEO.HP_USTAT.STOP_AUTOMATED_STATS
-  ()
-  EXTERNAL NAME 'UstatUtil.USASstop'
-  EXTERNAL PATH '/usr/tandem/mx_ustat'
-  LANGUAGE JAVA
-  PARAMETER STYLE JAVA
-  NO SQL
-  DYNAMIC RESULT SETS 0
-  ;
-
-*/


[9/9] incubator-trafodion git commit: Merge [TRAFODION-1502] PR 98 Build remaining Java files using maven

Posted by db...@apache.org.
Merge [TRAFODION-1502] PR 98 Build remaining Java files using maven


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/ae022f18
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/ae022f18
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/ae022f18

Branch: refs/heads/master
Commit: ae022f18810e592fac3cf6eeb19db39ba7089141
Parents: ed5888e d7f1dae
Author: Dave Birdsall <db...@apache.org>
Authored: Fri Oct 2 16:15:24 2015 +0000
Committer: Dave Birdsall <db...@apache.org>
Committed: Fri Oct 2 16:15:24 2015 +0000

----------------------------------------------------------------------
 core/sqf/sqenvcom.sh                            |    1 -
 core/sqf/src/seatrans/hbase-trx/Makefile        |    2 +-
 core/sql/executor/ByteArrayList.java            |   54 -
 core/sql/executor/HBaseClient.java              | 1596 ------------------
 core/sql/executor/HBaseClient_JNI.cpp           |   30 +-
 core/sql/executor/HBaseClient_JNI.h             |    2 +-
 core/sql/executor/HBulkLoadClient.java          |  533 ------
 core/sql/executor/HTableClient.h                |   65 -
 core/sql/executor/HTableClient.java             | 1334 ---------------
 core/sql/executor/HiveClient.java               |  301 ----
 core/sql/executor/OrcFileReader.cpp             |    4 +-
 core/sql/executor/OrcFileReader.java            |  518 ------
 core/sql/executor/ResultIterator.java           |  133 --
 core/sql/executor/ResultKeyValueList.java       |  100 --
 core/sql/executor/RowToInsert.java              |   44 -
 core/sql/executor/RowsToInsert.java             |   57 -
 core/sql/executor/SequenceFileReader.cpp        |    4 +-
 core/sql/executor/SequenceFileReader.java       |  448 -----
 core/sql/executor/SequenceFileWriter.java       |  467 -----
 core/sql/executor/StringArrayList.java          |   47 -
 .../executor/org_trafodion_sql_HTableClient.h   |   43 +
 core/sql/nskgmake/Makerules.build               |   13 -
 core/sql/nskgmake/Makerules.linux               |   13 -
 core/sql/nskgmake/Makerules.mk                  |   73 +-
 core/sql/nskgmake/executor/Makefile             |   54 -
 core/sql/nskgmake/ustat/Makefile                |   13 -
 core/sql/pom.xml                                |   49 +
 core/sql/qmscommon/QRLogger.cpp                 |   18 +-
 .../java/org/trafodion/sql/ByteArrayList.java   |   54 +
 .../java/org/trafodion/sql/HBaseClient.java     | 1596 ++++++++++++++++++
 .../java/org/trafodion/sql/HBulkLoadClient.java |  533 ++++++
 .../java/org/trafodion/sql/HTableClient.java    | 1337 +++++++++++++++
 .../main/java/org/trafodion/sql/HiveClient.java |  301 ++++
 .../java/org/trafodion/sql/OrcFileReader.java   |  500 ++++++
 .../java/org/trafodion/sql/ResultIterator.java  |  133 ++
 .../org/trafodion/sql/ResultKeyValueList.java   |  100 ++
 .../java/org/trafodion/sql/RowToInsert.java     |   44 +
 .../java/org/trafodion/sql/RowsToInsert.java    |   57 +
 .../org/trafodion/sql/SequenceFileReader.java   |  448 +++++
 .../org/trafodion/sql/SequenceFileWriter.java   |  467 +++++
 .../java/org/trafodion/sql/StringArrayList.java |   47 +
 .../org/trafodion/sql/ustat/ChgAutoList.java    |  426 +++++
 .../java/org/trafodion/sql/ustat/UstatUtil.java |  442 +++++
 core/sql/ustat/ChgAutoList.java                 |  426 -----
 core/sql/ustat/UstatUtil.java                   |  442 -----
 45 files changed, 6610 insertions(+), 6759 deletions(-)
----------------------------------------------------------------------



[5/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Posted by db...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/OrcFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/OrcFileReader.java b/core/sql/executor/OrcFileReader.java
deleted file mode 100644
index 6e5eb75..0000000
--- a/core/sql/executor/OrcFileReader.java
+++ /dev/null
@@ -1,518 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.apache.hadoop.hive.ql.io.orc;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.*;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.lang.Integer;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import org.apache.hadoop.hive.conf.*;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.apache.hadoop.hive.serde2.objectinspector.*;
-
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-
-import org.apache.hive.common.util.HiveTestUtils;
-
-import static org.junit.Assert.assertEquals;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertNull;
-
-public class OrcFileReader
-{
-
-    Configuration               m_conf;
-    Path                        m_file_path;
-    
-    Reader                      m_reader;
-    List<OrcProto.Type>         m_types;
-    StructObjectInspector       m_oi;
-    List<? extends StructField> m_fields;
-    RecordReader                m_rr;
-    String																						lastError = null;
-    Reader.Options														m_options;
-
-public class OrcRowReturnSQL
-{
-		int m_row_length;
-		int m_column_count;
-		long m_row_number;
-		byte[] m_row_ba = new byte[4096];
-}
-
-    OrcRowReturnSQL													rowData;	//TEMP!!
-
-
-    OrcFileReader() {
-	m_conf = new Configuration();
-	rowData = new OrcRowReturnSQL();	//TEMP: was in fetch
-    }
-
-//********************************************************************************
-
-//  ORIGINAL VERSION BEFORE ADDING SUPPORT FOR COLUMN SELECTION
-    public String open(String pv_file_name) throws IOException {
-//    pv_file_name= pv_file_name + "/000000_0";
-
-	m_file_path = new Path(pv_file_name);
-
-		try{
-				m_reader = OrcFile.createReader(m_file_path, OrcFile.readerOptions(m_conf));
-		} catch (java.io.FileNotFoundException e1) {
-						return "file not found";
-		}
-	if (m_reader == null)
-			return "open failed!";
-	m_types = m_reader.getTypes();
-	m_oi = (StructObjectInspector) m_reader.getObjectInspector();
-	m_fields = m_oi.getAllStructFieldRefs();
-	
-	try{
-			m_rr = m_reader.rows();
-	} catch (java.io.IOException e1) {
-					return (e1.getMessage());
-	}
-	
-	if (m_rr == null)
-			return "open:RecordReader is null";
-	return null;
-    }
-
-//********************************************************************************
-/*
-    public String open(String pv_file_name) throws Exception {
-//    pv_file_name= pv_file_name + "/000000_0";
-	m_file_path = new Path(pv_file_name);
-
-		try{
-				m_reader = OrcFile.createReader(m_file_path, OrcFile.readerOptions(m_conf));
-		} catch (java.io.FileNotFoundException e1) {
-						return "file not found";
-		}
-	if (m_reader == null)
-			return "open failed!";
-	m_types = m_reader.getTypes();
-	m_oi = (StructObjectInspector) m_reader.getObjectInspector();
-	m_fields = m_oi.getAllStructFieldRefs();
-	
-//	m_rr = m_reader.rows();		//RESTORE THIS as working code!
-//						boolean[] includes = new boolean[29];
-  						boolean[] includes = new boolean[] 					{true,true,false,false,false,false,false,false,false,false,false,false,
-  											false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true};
-  						 m_options = new Reader.Options();
-//  						my_options.include(includes);
-//  						System.out.println("Array size: " + includes.length);
- 					m_rr = m_reader.rowsOptions(m_options.include(includes));
-// 					m_rr = m_reader.rowsOptions(m_options.include(new boolean[] {false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false}));
-//{true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true}));
-
-	return null;
-    }
-*/
-//********************************************************************************
-    
-	public String close()
-	{
-				m_reader = null;
-				m_rr = null; 
-				m_file_path = null;            
-    return null;
-	}
-
-
-    public void printFileInfo() throws Exception {
-
-	System.out.println("Reader: " + m_reader);
-
-
-	System.out.println("# Rows: " + m_reader.getNumberOfRows());
-	System.out.println("# Types in the file: " + m_types.size());
-	for (int i=0; i < m_types.size(); i++) {
-	    System.out.println("Type " + i + ": " + m_types.get(i).getKind());
-	}
-
-	System.out.println("Compression: " + m_reader.getCompression());
-	if (m_reader.getCompression() != CompressionKind.NONE) {
-	    System.out.println("Compression size: " + m_reader.getCompressionSize());
-	}
-
-	m_oi = (StructObjectInspector) m_reader.getObjectInspector();
-	
-	System.out.println("object inspector type category: " + m_oi.getCategory());
-	System.out.println("object inspector type name    : " + m_oi.getTypeName());
-
-	System.out.println("Number of columns in the table: " + m_fields.size());
-
-	// Print the type info:
-	for (int i = 0; i < m_fields.size(); i++) {
-	    System.out.println("Column " + i + " name: " + m_fields.get(i).getFieldName());
-	    ObjectInspector lv_foi = m_fields.get(i).getFieldObjectInspector();
-	    System.out.println("Column " + i + " type category: " + lv_foi.getCategory());
-	    System.out.println("Column " + i + " type name: " + lv_foi.getTypeName());
-	}
-
-    }
-
-    public boolean seekToRow(long pv_rowNumber) throws IOException {
-
-	if (m_reader == null) {
-	    return false;
-	}
-
-	if ((pv_rowNumber < 0) ||
-	    (pv_rowNumber >= m_reader.getNumberOfRows())) {
-	    return false;
-	}
-
-	m_rr.seekToRow(pv_rowNumber);
-
-	return true;
-    }
-
-    public String seeknSync(long pv_rowNumber) throws IOException {
-	if (m_reader == null) {
-	    return "Looks like a file has not been opened. Call open() first.";
-	}
-
-	if ((pv_rowNumber < 0) ||
-	    (pv_rowNumber >= m_reader.getNumberOfRows())) {
-	    return "Invalid rownumber: " + pv_rowNumber + " provided.";
-	}
-
-	m_rr.seekToRow(pv_rowNumber);
-
-	return null;
-    }
-
-    public long getNumberOfRows() throws IOException {
-
-	return m_reader.getNumberOfRows();
-
-    }
-
-    public long getPosition() throws IOException {
-
-	return m_rr.getRowNumber();
-
-    }
-
-    // Dumps the content of the file. The columns are '|' separated.
-    public void readFile_String() throws Exception {
-
-	seeknSync(0);
-	OrcStruct lv_row = null;
-	Object lv_field_val = null;
-   	StringBuilder lv_row_string = new StringBuilder(1024);
-	while (m_rr.hasNext()) {
-	    lv_row = (OrcStruct) m_rr.next(lv_row);
-	    lv_row_string.setLength(0);
-	    for (int i = 0; i < m_fields.size(); i++) {
-		lv_field_val = lv_row.getFieldValue(i);
-		if (lv_field_val != null) {
-		    lv_row_string.append(lv_field_val);
-		}
-		lv_row_string.append('|');
-	    }
-	    System.out.println(lv_row_string);
-	}
-
-    }
-
-
-    // Dumps the contents of the file as ByteBuffer.
-    public void readFile_ByteBuffer() throws Exception {
-
-	OrcStruct lv_row = null;
-	Object lv_field_val = null;
-   	ByteBuffer lv_row_buffer;
-
-	seeknSync(0);
-	while (m_rr.hasNext()) {
-	    byte[] lv_row_ba = new byte[4096];
-	    lv_row_buffer = ByteBuffer.wrap(lv_row_ba);
-	    lv_row = (OrcStruct) m_rr.next(lv_row);
-	    for (int i = 0; i < m_fields.size(); i++) {
-		lv_field_val = lv_row.getFieldValue(i);
-		if (lv_field_val == null) {
-		    lv_row_buffer.putInt(0);
-		    continue;
-		}
-		String lv_field_val_str = lv_field_val.toString();
-		lv_row_buffer.putInt(lv_field_val_str.length());
-		if (lv_field_val != null) {
-		    lv_row_buffer.put(lv_field_val_str.getBytes());
-		}
-	    }
-	    System.out.println(lv_row_buffer);
-	    //	    System.out.println(new String(lv_row_buffer.array()));
-	}
-    }
-
-    public String getNext_String(char pv_ColSeparator) throws Exception {
-
-	if ( ! m_rr.hasNext()) {
-	    return null;
-	}
-
-	OrcStruct lv_row = null;
-	Object lv_field_val = null;
-   	StringBuilder lv_row_string = new StringBuilder(1024);
-
-	lv_row = (OrcStruct) m_rr.next(lv_row);
-	for (int i = 0; i < m_fields.size(); i++) {
-	    lv_field_val = lv_row.getFieldValue(i);
-	    if (lv_field_val != null) {
-		lv_row_string.append(lv_field_val);
-	    }
-	    lv_row_string.append(pv_ColSeparator);
-	}
-	
-	return lv_row_string.toString();
-    }
-
-    // returns the next row as a byte array
-    public byte[] fetchNextRow() throws Exception {
-
-	if ( ! m_rr.hasNext()) {
-	    return null;
-	}
-
-//	OrcStruct lv_row = (OrcStruct) m_rr.next(null);
- OrcStruct lv_row = (OrcStruct) m_rr.next(null);
-	Object lv_field_val = null;
-   	ByteBuffer lv_row_buffer;
-
-	byte[] lv_row_ba = new byte[4096];
-	lv_row_buffer = ByteBuffer.wrap(lv_row_ba);
-	for (int i = 0; i < m_fields.size(); i++) {
-	    lv_field_val = lv_row.getFieldValue(i);
-	    if (lv_field_val == null) {
-  		lv_row_buffer.putInt(0);
-		continue;
-	    }
-	    String lv_field_val_str = lv_field_val.toString();
-	    lv_row_buffer.putInt(lv_field_val_str.length());
-	    if (lv_field_val != null) {
-		lv_row_buffer.put(lv_field_val_str.getBytes());
-	    }
-	}
-	return lv_row_buffer.array();
-    }
-    
-    
-//****************************************************************************
-	
-//THIS IS THE ORIGINAL FORM BEFORE ADDING SUPPORT FOR COLUMN SELECTION !!!!
-public OrcRowReturnSQL fetchNextRowObj() throws Exception
-{
-//		int	lv_integerLength = Integer.Bytes;
-		int	lv_integerLength = 4;
-//		OrcRowReturnSQL rowData = new OrcRowReturnSQL();
-	 
-	 	if ( ! m_rr.hasNext()) {
-	    return null;
-	}
-
-	OrcStruct lv_row = (OrcStruct) m_rr.next(null);
-	Object lv_field_val = null;
-   	ByteBuffer lv_row_buffer;
-
-//	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
-	lv_row_buffer = ByteBuffer.wrap(rowData.m_row_ba);
-	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
-	
-	rowData.m_row_length = 0;
-	rowData.m_column_count = m_fields.size();
-	rowData.m_row_number = m_rr.getRowNumber();
-	
-	for (int i = 0; i < m_fields.size(); i++) {
-	    lv_field_val = lv_row.getFieldValue(i);
-	    if (lv_field_val == null) {
-  		lv_row_buffer.putInt(0);
-  		rowData.m_row_length = rowData.m_row_length + lv_integerLength;
-		continue;
-	    }
-	    String lv_field_val_str = lv_field_val.toString();
-	    lv_row_buffer.putInt(lv_field_val_str.length());
-  			rowData.m_row_length = rowData.m_row_length + lv_integerLength;
-	    if (lv_field_val != null) {
-		lv_row_buffer.put(lv_field_val_str.getBytes());
-  		rowData.m_row_length = rowData.m_row_length + lv_field_val_str.length();
-	    }
-	}
-    	 
-	 return rowData;
-	
-}
-
-//****************************************************************************
-/*
-public OrcRowReturnSQL fetchNextRowObj() throws Exception
-{
-//		int	lv_integerLength = Integer.Bytes;
-		int	lv_integerLength = 4;
-		boolean[]	lv_include;
-		
-		OrcRowReturnSQL rowData = new OrcRowReturnSQL();
-	 
-	 	if ( ! m_rr.hasNext()) {
-	    return null;
-	}
-
-	OrcStruct lv_row = (OrcStruct) m_rr.next(null);
-	Object lv_field_val = null;
-   	ByteBuffer lv_row_buffer;
-
-//	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
-	lv_row_buffer = ByteBuffer.wrap(rowData.m_row_ba);
-	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
-//	rowData.m_column_count = m_fields.size();
-	rowData.m_column_count = 0;;
-	rowData.m_row_number = m_rr.getRowNumber();
-	lv_include = m_options.getInclude();
-	
-	for (int i = 0; i < m_fields.size(); i++) {
-					if (lv_include[i+1] == false) continue;
-	    lv_field_val = lv_row.getFieldValue(i);
-	    if (lv_field_val == null) {
-  				lv_row_buffer.putInt(0);
-  				rowData.m_row_length = rowData.m_row_length + lv_integerLength;
-						rowData.m_column_count++;;
-						continue;
-	    }
-	    String lv_field_val_str = lv_field_val.toString();
-	    lv_row_buffer.putInt(lv_field_val_str.length());
-  			rowData.m_row_length = rowData.m_row_length + lv_integerLength;
-	    if (lv_field_val != null) {
-		lv_row_buffer.put(lv_field_val_str.getBytes());
-  		rowData.m_row_length = rowData.m_row_length + lv_field_val_str.length();
-				rowData.m_column_count++;;
-
-	    }
-	}
-    	 
-	 return rowData;
-	
-}
-*/
-//****************************************************************************
-String getLastError() {
-      return lastError;
-  }
-
-//****************************************************************************
-public boolean isEOF() throws Exception
-{ 
-	if (m_rr.hasNext())
-	{
-	    return false;
-	}
-	else
-			{
-					return true;
-			}
-}  
-//****************************************************************************
- public String fetchNextRow(char pv_ColSeparator) throws Exception {
-
-	if ( ! m_rr.hasNext()) {
-	    return null;
-	}
-
-	OrcStruct lv_row = null;
-	Object lv_field_val = null;
-   	StringBuilder lv_row_string = new StringBuilder(1024);
-
-	lv_row = (OrcStruct) m_rr.next(lv_row);
-	for (int i = 0; i < m_fields.size(); i++) {
-	    lv_field_val = lv_row.getFieldValue(i);
-	    if (lv_field_val != null) {
-		lv_row_string.append(lv_field_val);
-	    }
-	    lv_row_string.append(pv_ColSeparator);
-	}
-	
-	return lv_row_string.toString();
-    }
-    
-
-
-    public static void main(String[] args) throws Exception
-    {
-	System.out.println("OrcFile Reader main");
-
-	OrcFileReader lv_this = new OrcFileReader();
-
-	lv_this.open(args[0]);
-
-	lv_this.printFileInfo();
-
-	lv_this.readFile_String();
-
-	lv_this.readFile_ByteBuffer();
-
-	// Gets rows as byte[]  (starts at row# 4)
-	boolean lv_done = false;
-	if (lv_this.seeknSync(4) == null) {
-	    while (! lv_done) {
-		System.out.println("Next row #: " + lv_this.getPosition());
-		byte[] lv_row_bb = lv_this.fetchNextRow();
-		if (lv_row_bb != null) {
-		    System.out.println("First 100 bytes of lv_row_bb: " + new String(lv_row_bb, 0, 100));
-		    System.out.println("Length lv_row_bb: " + lv_row_bb.length);
-		}
-		else {
-		    lv_done = true;
-		}
-	    }
-	}
-
-	// Gets rows as String (starts at row# 10)
-	lv_done = false;
-	String lv_row_string;
-	if (lv_this.seeknSync(10) == null) {
-	    while (! lv_done) {
-		lv_row_string = lv_this.getNext_String('|');
-		if (lv_row_string != null) {
-		    System.out.println(lv_row_string);
-		}
-		else {
-		    lv_done = true;
-		}
-	    }
-	}
-System.out.println("Shows the change in place");
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/ResultIterator.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/ResultIterator.java b/core/sql/executor/ResultIterator.java
deleted file mode 100644
index 14ef422..0000000
--- a/core/sql/executor/ResultIterator.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-
-
-public class ResultIterator {
-	ResultScanner   scanner;
-	Result[]        resultSet;
-	Result          row = null;
-	scanFetchStep   step;
-	List<KeyValue>  kvList;
-	int 			listIndex = 0;
-	int             cellIndex;
-	int				numKVs;
-	boolean         isSingleRow = false;
-	
-	private enum scanFetchStep {
-		SCAN_FETCH_NEXT_ROW,
-		SCAN_FETCH_NEXT_COL,
-		SCAN_FETCH_CLOSE
-	} ;
-
-	public ResultIterator(ResultScanner scanner) {
-		this.scanner = scanner;
-		resultSet = null;
-		step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
-	}
-	
-	public ResultIterator(Result[] results) {
-		this.scanner = null;
-		resultSet = results;
-		step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
-	}
-	
-	public ResultIterator(Result result) {
-		this.scanner = null;
-		resultSet = null;
-		row = result;
-		isSingleRow = true;
-		step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
-	}
-	
-	KeyValue nextCell() throws IOException {
-		while (true)
-		{
-			switch (step)
-			{
-				case SCAN_FETCH_NEXT_ROW:
-				{
-				        if (isSingleRow == false) {				        
-        					if (scanner != null)
-        						row = scanner.next();
-        					else {
-        						if (listIndex == resultSet.length) {
-        							step = scanFetchStep.SCAN_FETCH_CLOSE;
-        							break;
-        						}							
-        						row = resultSet[listIndex];
-        						listIndex++;
-        					}
-        				}
-					
-					if (row == null || row.isEmpty()) {
-						step = scanFetchStep.SCAN_FETCH_CLOSE;
-						break;
-					}
-					
-					kvList = row.list();
-					cellIndex = 0;
-					numKVs = kvList.size();
-	
-					step = scanFetchStep.SCAN_FETCH_NEXT_COL;
-				}
-				break;
-	
-				case SCAN_FETCH_NEXT_COL:
-				{
-					KeyValue kv = kvList.get(cellIndex);
-					cellIndex++;
-					if (kv == null) {
-					        if (isSingleRow)
-						        step = scanFetchStep.SCAN_FETCH_CLOSE;
-						else
-						        step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
-						break;
-					}
-	
-					if (cellIndex == numKVs)
-					        if (isSingleRow)
-						        step = scanFetchStep.SCAN_FETCH_CLOSE;
-						else
-						        step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
-	
-					return kv;
-				}
-				
-				case SCAN_FETCH_CLOSE:
-				{
-					return null;
-				}
-	
-			}// switch
-		} // while
-		
-	}
-	
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/ResultKeyValueList.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/ResultKeyValueList.java b/core/sql/executor/ResultKeyValueList.java
deleted file mode 100644
index 54eed48..0000000
--- a/core/sql/executor/ResultKeyValueList.java
+++ /dev/null
@@ -1,100 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.List;
-import java.io.*;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Result;
-import java.nio.*;
-
-public class ResultKeyValueList {
-	Result result;
-	List<KeyValue> kvList;
-
-	public ResultKeyValueList(Result result) {
-		super();
-		this.result = result;
-		kvList = result.list();
-	}
-
-	byte[] getRowID() {
-	        if (result == null)
-	                return null;
-	        else
-		        return result.getRow();
-	}
-
-	byte[] getAllKeyValues() {
-        if (kvList == null)
-           return null;
-        int numCols = kvList.size();
-        byte[] rowID = result.getRow();
-        int bufSize = rowID.length;
-        bufSize += (64 * numCols);
-        for (int i=0; i<numCols; i++) {
-          bufSize += kvList.get(i).getLength();
-        }
-        ByteBuffer buf = ByteBuffer.allocate(bufSize);
-        buf.order(ByteOrder.LITTLE_ENDIAN);
-        // move in numCols
-        buf.putInt(numCols);
-        // move in rowID length and rowID
-        buf.putInt(rowID.length);
-        buf.put(rowID);;
-        // move in all descriptors
-        for (int i=0; i<numCols; i++) {
-          copyKVs(buf, kvList.get(i));
-        }
-        return buf.array();
-    }
-
-	void copyKVs(ByteBuffer buf, KeyValue kv)
-	{
-	    buf.putInt(kv.getLength());
-        int offset = kv.getOffset();
-		buf.putInt(kv.getValueLength());
-		buf.putInt(kv.getValueOffset() - offset);
-		buf.putInt(kv.getQualifierLength());
-		buf.putInt(kv.getQualifierOffset() - offset);
-		buf.putInt(kv.getFamilyLength());
-		buf.putInt(kv.getFamilyOffset() - offset);
-		buf.putLong(kv.getTimestamp());
-		buf.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
-	}
-
-
-	int getSize() {
-	        if (kvList == null)
-	                return 0;
-	        else
-		        return kvList.size();
-	}
-
-	KeyValue getEntry(int i) {
-	        if (kvList == null)
-	                return null;
-	        else
-		        return kvList.get(i);
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/RowToInsert.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/RowToInsert.java b/core/sql/executor/RowToInsert.java
deleted file mode 100644
index 92d8fbc..0000000
--- a/core/sql/executor/RowToInsert.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.Vector;
-
-public class RowToInsert extends Vector<RowToInsert.ColToInsert> {
-
-	public class ColToInsert {
-		public byte[] qualName;
-		public byte[] colValue;
-	}
-
-	private static final long serialVersionUID = 5066470006717527862L;
-
-	public void addColumn(byte[] name, byte[] value) {
-		ColToInsert col = new ColToInsert();
-		col.qualName = name;
-		col.colValue = value;
-		add(col);
-	}
-
-}
-
-

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/RowsToInsert.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/RowsToInsert.java b/core/sql/executor/RowsToInsert.java
deleted file mode 100644
index 8ca82bf..0000000
--- a/core/sql/executor/RowsToInsert.java
+++ /dev/null
@@ -1,57 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.Vector;
-
-public class RowsToInsert  extends Vector<RowsToInsert.RowInfo> {
-
-    public class RowInfo {
-	public byte[] rowId;
-	public Vector<RowsToInsert.ColToInsert> columns;
-    }
-
-    public class ColToInsert {
-	public byte[] qualName;
-	public byte[] colValue;
-    }
-
-    private static final long serialVersionUID = 5066470006717527863L;
-
-    public void addRowId(byte[] rowId) {
-	RowInfo rowInfo = new RowInfo();
-	rowInfo.rowId = rowId;
-	rowInfo.columns = new Vector<RowsToInsert.ColToInsert>();
-	rowInfo.columns.clear();
-	add(rowInfo);
-    }
-
-    public void addColumn(byte[] name, byte[] value) {
-	ColToInsert col = new ColToInsert();
-	col.qualName = name;
-	col.colValue = value;
-	if (size() > 0)
-	    get(size()-1).columns.add(col);
-	//	RowInfo.columns.add(col);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/SequenceFileReader.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileReader.cpp b/core/sql/executor/SequenceFileReader.cpp
index 065389f..4b93f2a 100644
--- a/core/sql/executor/SequenceFileReader.cpp
+++ b/core/sql/executor/SequenceFileReader.cpp
@@ -71,7 +71,7 @@ SequenceFileReader::~SequenceFileReader()
 //////////////////////////////////////////////////////////////////////////////
 SFR_RetCode SequenceFileReader::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/SequenceFileReader";
+  static char className[]="org/trafodion/sql/SequenceFileReader";
   SFR_RetCode rc; 
 
   if (javaMethodsInitialized_)
@@ -489,7 +489,7 @@ SequenceFileWriter::~SequenceFileWriter()
 //////////////////////////////////////////////////////////////////////////////
 SFW_RetCode SequenceFileWriter::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/SequenceFileWriter";
+  static char className[]="org/trafodion/sql/SequenceFileWriter";
   SFW_RetCode rc;
   
   if (javaMethodsInitialized_)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/SequenceFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileReader.java b/core/sql/executor/SequenceFileReader.java
deleted file mode 100644
index dbbe5c6..0000000
--- a/core/sql/executor/SequenceFileReader.java
+++ /dev/null
@@ -1,448 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-/**
- * 
- */
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.util.ReflectionUtils;
-//import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
-//import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-//import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-
-
-public class SequenceFileReader {
-
-  Configuration conf = null;           // File system configuration
-  SequenceFile.Reader reader = null;   // The HDFS SequenceFile reader object.
-  Writable key = null;
-  Writable row = null;
-//    LazySimpleSerDe serde = null;
-  boolean isEOF = false;
-  String lastError = null;  
-    
-	/**
-	 * Class Constructor
-	 */
-	SequenceFileReader() {
-    	conf = new Configuration();
-    	conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
-  }
-    
-  String getLastError() {
-      return lastError;
-  }
-    
-	/**
-	 * Initialize the SerDe object. Needed only before calling fetchArrayOfColumns(). 
-	 * @param numColumns The number of columns in the table.
-	 * @param fieldDelim The delimiter between fields.
-	 * @param columns A comma delimited list of column names. 
-	 * @param colTypes A comma delimited list of column types.
-	 * @param nullFormat NULL representation.
-	 */
-//	public void initSerDe(String numColumns, String fieldDelim, String columns, String colTypes, String nullFormat) throws IllegalStateException {
-//		
-//            serde = new LazySimpleSerDe();
-//            Properties tbl = new Properties();
-//            tbl.setProperty("serialization.format", numColumns);
-//            tbl.setProperty("field.delim", fieldDelim);
-//            tbl.setProperty("columns", columns);
-//            tbl.setProperty("columns.types", colTypes);
-//            tbl.setProperty("serialization.null.format", colTypes);
-//            serde.initialize(conf, tbl);
-//	}
-	
-	/**
-	 * Open the SequenceFile for reading.
-	 * @param path The HDFS path to the file.
-	 */
-	public String open(String path) throws IOException {
-
-        Path filename = new Path(path);
-        	
-        reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(filename));
-	
-        key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
-        row = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-        
-        return null;
-            
-	}
-	
-	/**
-	 * Get the current position in the file.
-	 * @return The current position or -1 if error.
-	 */
-	public long getPosition() throws IOException {
-
-    lastError = null;		
-		if (reader == null) {
-			lastError = "open() was not called first.";
-			return -1;
-		}
-		
-        return reader.getPosition();
-	}	
-	
-    /**
-     * Have we reached the end of the file yet?
-     * @return
-     */
-  public boolean isEOF() {
-		return isEOF;
-	}
-
-	/**
-	 * Seek to the specified position in the file, and then to the beginning 
-	 * of the record after the next sync mark.
-	 * @param pos Required file position.
-	 * @return null if OK, or error message.
-	 */
-	public String seeknSync(long pos) throws IOException {
-
-		if (reader == null) {
-			return "open() was not called first.";
-		}
-		
-			reader.sync(pos);
-			return null;
-	}
-	
-	/**
-	 * Fetch the next row as an array of columns.
-	 * @return An array of columns.
-	 */
-//	public String[] fetchArrayOfColumns() throws IllegalStateException {
-//		if (reader == null)
-//			throw new IllegalStateException("open() was not called first.");
-//		if (serde == null)
-//			throw new IllegalStateException("initSerDe() was not called first.");
-//		
-//		ArrayList<String> result = new ArrayList<String>();
-//            boolean theresMore = reader.next(key, row);
-//            if (!theresMore)
-//            	return null;
-//            StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector();
-//            List<? extends StructField> fieldRefs = soi.getAllStructFieldRefs();
-//            Object data = serde.deserialize(row);
-//            
-//            for (StructField fieldRef : fieldRefs) {
-//                ObjectInspector oi = fieldRef.getFieldObjectInspector();
-//                Object obj = soi.getStructFieldData(data, fieldRef);
-//                Object column = convertLazyToJava(obj, oi);
-//                if (column == null)
-//                	result.add(null);
-//                else
-//                	result.add(column.toString());
-//              }
-//    		String[] resultArray = new String[result.size()];
-//    		result.toArray(resultArray);
-//    		return resultArray;
-//	}
-	
-	/**
-	 * Fetch the next row as a single String, that still needs to be parsed.
-	 * @return The next row.
-	 */
-	public String fetchNextRow() throws IOException {
-
-    lastError = null;		
-		if (reader == null) {		
-			lastError = "open() was not called first.";
-			return null;
-		}
-		
-			boolean result = reader.next(key, row);
-			if (result)	{
-				return row.toString();
-			}
-			else {				
-				return null;
-			}
-	}
-	
-	/**
-	 * @param minSize Minimum size of the result. If the file is compressed, 
-	 * the result may be much larger. The reading starts at the current 
-	 * position in the file, and stops once the limit has been reached.
-	 * @return An array of result rows.
-	 * @throws IllegalStateException
-	 */
-	public String[] fetchArrayOfRows(int minSize) throws IOException {
-
-    lastError = "";		
-		if (reader == null) {		
-			lastError = "open() was not called first.";
-			return null;
-		}
-		
-		ArrayList<String> result = new ArrayList<String>();
-		long initialPos = getPosition();
-		boolean stop = false;
-		do {
-			String newRow = fetchNextRow();
-			
-			if (newRow==null && lastError!=null)
-			  return null;
-			  
-			boolean reachedEOF = (newRow == null || newRow == "");
-			if (!reachedEOF)
-				result.add(newRow);
-			
-			long bytesRead = getPosition() - initialPos;
-			stop = reachedEOF || (bytesRead > minSize);
-		} while (!stop);
-		
-		String[] resultArray = new String[result.size()];
-		result.toArray(resultArray);
-		return resultArray;
-	}
-	
-	/**
-	 * Read a block of data from the file and return it as an array of rows.
-	 * First sync to startOffset, and skip the first row, then keep reading
-	 * Until passing stopOffset and passing the next Sync marker.
-	 * @param startOffset
-	 * @param stopOffset
-	 * @return
-	 * @throws IllegalStateException
-	 * @throws IOException
-	 */
-	public String[] fetchArrayOfRows(int startOffset, int stopOffset)
-                  throws IOException  {
-
-    lastError = "";		
-		if (reader == null) {		
-			lastError = "open() was not called first.";
-			return null;
-		}
-		
-		seeknSync(startOffset);
-		
-		ArrayList<String> result = new ArrayList<String>();
-		boolean stop = false;
-		do {
-			long startingPosition = getPosition();
-			String newRow = fetchNextRow();
-
-			if (newRow==null && lastError!=null)
-			  return null;
-			  
-			boolean reachedEOF = (newRow == null || newRow == "");
-			
-			boolean reachedSize = (startingPosition > stopOffset);
-			boolean lastSyncSeen = (reachedSize && reader.syncSeen());
-			// Stop reading if there is no more data, or if we have read 
-			// enough bytes and have seen the Sync mark.
-			stop = reachedEOF || (reachedSize && lastSyncSeen);
-			
-			if (!stop)
-				result.add(newRow);
-			
-		} while (!stop);
-		
-		String[] resultArray = new String[result.size()];
-		result.toArray(resultArray);
-		return resultArray;
-	}
-	
-	/**
-	 * Fetch the next row from the file.
-	 * @param stopOffset File offset at which to start looking for a sync marker
-	 * @return The next row, or null if we have reached EOF or have passed stopOffset and then
-	 *         the sync marker.
-	 */
-	public String fetchNextRow(long stopOffset) throws IOException {
-
-    lastError = "";		
-		if (reader == null) {		
-			lastError = "open() was not called first.";
-			return null;
-		}
-
-		long startingPosition = getPosition();
-		
-		String newRow = fetchNextRow();
-		
-    if (newRow==null && lastError!=null)
-	    return null;
-
-		if (newRow == null)
-			isEOF = true;
-		
-		if (newRow == "")
-			newRow = null;
-		
-		// If we have already read past the stopOffset on a previous row, 
-		// and have seen the sync marker, then this row belongs to the next block.
-		if ((startingPosition > stopOffset) && reader.syncSeen())
-			newRow = null;
-		
-		return newRow;
-	}
-	
-	/**
-	 * Close the reader.
-	 */
-	public String close() {
-
-    lastError = "";		
-		if (reader == null) {		
-			lastError = "open() was not called first.";
-			return null;
-		}
-
-      IOUtils.closeStream(reader);            
-    
-    return null;
-	}
-
-	private boolean ReadnPrint(int start, int end) 
-                       throws IOException {
-		System.out.println("Beginning position: " + getPosition());
-		String[] batch;
-    batch = fetchArrayOfRows(start, end);
-    if (batch==null)
-      return false;
-      
-		boolean theresMore = (batch.length > 0);
-		for (String newRow : batch)
-			System.out.println(newRow);
-		System.out.println("Ending position: " + getPosition());
-		System.out.println("===> Buffer Split <===");
-		return theresMore;
-	}
-
-	private boolean ReadnPrint2(int start, int end) throws IOException {
-			System.out.println("Read from: " + start + " to: " + end + ".");
-			seeknSync(start);
-			System.out.println("Beginning position: " + getPosition());
-			String newRow = null;
-			do {
-				newRow = fetchNextRow(end);
-				
-				if (newRow != null)
-					System.out.println(newRow);
-			} while (newRow != null); 
-			
-		System.out.println("Ending position: " + getPosition());
-		System.out.println("===> Buffer Split <===");
-		return !isEOF();
-	}
-
-	/**
-	 * @param args
-	 * @throws IOException 
-	 */
-	public static void main(String[] args) throws IOException {
-		
-		SequenceFileReader sfReader = new SequenceFileReader();
-		byte[] fieldDelim = new byte[2];
-		fieldDelim[0] = 1;
-		fieldDelim[1] = 0;
-		//sfReader.initSerDe("19", "\01",
-                //           "p_promo_sk,p_promo_id,p_start_date_sk,p_end_date_sk,p_item_sk,p_cost,p_response_target,p_promo_name,p_channel_dmail,p_channel_email,p_channel_catalog,p_channel_tv,p_channel_radio,p_channel_press,p_channel_event,p_channel_demo,p_channel_details,p_purpose,p_discount_active",
-                //           "int,string,int,int,int,float,int,string,string,string,string,string,string,string,string,string,string,string,string",
-                //          "NULL");
-                          
-		//sfReader.open("hdfs://localhost:9000/user/hive/warehouse/promotion_seq/000000_0");
-		sfReader.seeknSync(300);
-
-		int opType = 4;
-		switch (opType)
-		{
-//		case 1:
-//			boolean theresMoreRows = true;
-//			do {
-//				String[] columns = sfReader.fetchArrayOfColumns();
-//				theresMoreRows = (columns != null);
-//				if (theresMoreRows)
-//				{
-//					for (String col : columns)
-//					{
-//						if (col == null)
-//							System.out.print("<NULL>, ");
-//						else
-//							System.out.print(col + ", ");
-//					}
-//					System.out.println();
-//				}
-//			} while (theresMoreRows); 
-//			break;
-			
-		case 2: // Return row as String
-			String row;
-			do {
-				row = sfReader.fetchNextRow();
-				if (row != null)
-					System.out.println(row);
-			} while (row != null);
-			break;
-			
-		case 3:
-		case 4:
-			int size = 3000;
-			int start = 0;
-			int end = size;
-			boolean theresMore3 = true;
-			
-			while (theresMore3) {
-				if (opType == 3)
-					theresMore3 = sfReader.ReadnPrint(start, end);
-				else
-					theresMore3 = sfReader.ReadnPrint2(start, end);
-				start += size;
-				end += size;				
-			}
-			break;
-
-		}
-		
-		sfReader.close();
-	}
-
-//	private static Object convertLazyToJava(Object o, ObjectInspector oi) {
-//	    Object obj = ObjectInspectorUtils.copyToStandardObject(o, oi, ObjectInspectorCopyOption.JAVA);
-//
-//	    // for now, expose non-primitive as a string
-//	    // TODO: expose non-primitive as a structured object while maintaining JDBC compliance
-//	    if (obj != null && oi.getCategory() != ObjectInspector.Category.PRIMITIVE) {
-//	      obj = obj.toString();
-//	    }
-//
-//	    return obj;
-//	  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/SequenceFileWriter.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileWriter.java b/core/sql/executor/SequenceFileWriter.java
deleted file mode 100644
index 5d12fbf..0000000
--- a/core/sql/executor/SequenceFileWriter.java
+++ /dev/null
@@ -1,467 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-/**
- * 
- */
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.TableSnapshotScanner;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.compress.CodecPool;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.io.compress.*;
-import org.apache.hadoop.io.compress.zlib.*;
-import org.apache.hadoop.fs.*;
-
-import java.io.*;
-import java.util.List;
-
-import org.apache.hadoop.util.*;
-import org.apache.hadoop.io.*;
-import org.apache.log4j.Logger;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.FsPermission;
-public class SequenceFileWriter {
-
-    static Logger logger = Logger.getLogger(SequenceFileWriter.class.getName());
-    Configuration conf = null;           // File system configuration
-    HBaseAdmin admin = null;
-    
-    SequenceFile.Writer writer = null;
-
-    FSDataOutputStream fsOut = null;
-    OutputStream outStream = null;
-    
-    FileSystem  fs = null;
-    /**
-     * Class Constructor
-     */
-    SequenceFileWriter() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
-    {
-      init("", "");
-      conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
-    }
-    
-	
-    public String open(String path)	{
-      try {
-        Path filename = new Path(path);
-        writer = SequenceFile.createWriter(conf, 
-          	       SequenceFile.Writer.file(filename),
-          	       SequenceFile.Writer.keyClass(ByteWritable.class),
-          	       SequenceFile.Writer.valueClass(BytesWritable.class),
-          	       SequenceFile.Writer.compression(CompressionType.NONE));
-        return null;
-      } catch (Exception e) {
-        //e.printStackTrace();
-        return e.getMessage();
-      }	
-    }
-	
-    public String open(String path, int compressionType)	{
-      try {
-        Path filename = new Path(path);
-        
-        CompressionType compType=null;
-        switch (compressionType) {
-          case 0:
-            compType = CompressionType.NONE;
-            break;
-            
-          case 1:
-            compType = CompressionType.RECORD;
-            break;
-            
-          case 2:
-            compType = CompressionType.BLOCK;
-            break;
-          
-          default:
-            return "Wrong argument for compression type.";
-        }
-        
-        writer = SequenceFile.createWriter(conf, 
-          	                               SequenceFile.Writer.file(filename),
-          	                               SequenceFile.Writer.keyClass(BytesWritable.class),
-          	                               SequenceFile.Writer.valueClass(Text.class),
-          	                               SequenceFile.Writer.compression(compType));
-        return null;
-      } catch (Exception e) {
-        //e.printStackTrace();
-        return e.getMessage();
-      }	
-    }
-	
-    public String write(String data) {
-		  if (writer == null)
-			  return "open() was not called first.";
-			
-      try {
-	      writer.append(new BytesWritable(), new Text(data.getBytes()));
-        return null;
-    	} catch (IOException e) {
-    	  //e.printStackTrace();
-        return e.getMessage();
-    	}
-    }
-	
-    public String close() {
-		  if (writer == null)
-			  return "open() was not called first.";
-			
-      try {
-        writer.close();
-        return null;
-      } catch (Exception e) {
-        //e.printStackTrace();
-        return e.getMessage();
-      }
-    }
-    
-    
-    
-    boolean hdfsCreate(String fname , boolean compress) throws IOException
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - started" );
-      Path filePath = null;
-      if (!compress || (compress && fname.endsWith(".gz")))
-        filePath = new Path(fname);
-      else
-        filePath = new Path(fname + ".gz");
-        
-      fs = FileSystem.get(filePath.toUri(),conf);
-      fsOut = fs.create(filePath, true);
-      
-      outStream = fsOut;
-      
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - file created" );
-      if (compress)
-      {
-        GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, conf);
-        Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
-        try 
-        {
-          outStream = gzipCodec.createOutputStream(fsOut, gzipCompressor);
-        }
-        catch (IOException e)
-        {
-        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() --exception :" + e);
-          throw e;
-        }
-      }
-      
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - compressed output stream created" );
-      return true;
-    }
-    
-    boolean hdfsWrite(byte[] buff, long len) throws Exception,OutOfMemoryError
-    {
-
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - started" );
-      try
-      {
-        outStream.write(buff);
-        outStream.flush();
-      }
-      catch (Exception e)
-      {
-        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() -- exception: " + e);
-        throw e;
-      }
-      catch (OutOfMemoryError e1)
-      {
-        logger.debug("SequenceFileWriter.hdfsWrite() -- OutOfMemory Error: " + e1);
-        throw e1;
-      }
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - bytes written and flushed:" + len  );
-      
-      return true;
-    }
-    
-    boolean hdfsClose() throws IOException
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - started" );
-      try
-      {
-        outStream.close();
-        fsOut.close();
-      }
-      catch (IOException e)
-      {
-        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - exception:" + e);
-        throw e;
-      }
-      return true;
-    }
-
-    
-    public boolean hdfsMergeFiles(String srcPathStr, String dstPathStr) throws Exception
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - start");
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - source Path: " + srcPathStr + 
-                                               ", destination File:" + dstPathStr );
-      try 
-      {
-        Path srcPath = new Path(srcPathStr );
-        srcPath = srcPath.makeQualified(srcPath.toUri(), null);
-        FileSystem srcFs = FileSystem.get(srcPath.toUri(),conf);
-  
-        Path dstPath = new Path(dstPathStr);
-        dstPath = dstPath.makeQualified(dstPath.toUri(), null);
-        FileSystem dstFs = FileSystem.get(dstPath.toUri(),conf);
-        
-        if (dstFs.exists(dstPath))
-        {
-          if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - destination files exists" );
-          // for this prototype we just delete the file-- will change in next code drops
-          dstFs.delete(dstPath, false);
-           // The caller should already have checked existence of file-- throw exception 
-           //throw new FileAlreadyExistsException(dstPath.toString());
-        }
-        
-        Path tmpSrcPath = new Path(srcPath, "tmp");
-
-        FileSystem.mkdirs(srcFs, tmpSrcPath,srcFs.getFileStatus(srcPath).getPermission());
-        logger.debug("SequenceFileWriter.hdfsMergeFiles() - tmp folder created." );
-        Path[] files = FileUtil.stat2Paths(srcFs.listStatus(srcPath));
-        for (Path f : files)
-        {
-          srcFs.rename(f, tmpSrcPath);
-        }
-        // copyMerge and use false for the delete option since it removes the whole directory
-        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - copyMerge" );
-        FileUtil.copyMerge(srcFs, tmpSrcPath, dstFs, dstPath, false, conf, null);
-        
-        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - delete intermediate files" );
-        srcFs.delete(tmpSrcPath, true);
-      }
-      catch (IOException e)
-      {
-        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() --exception:" + e);
-        throw e;
-      }
-      
-      
-      return true;
-    }
-    public boolean hdfsCleanUnloadPath(String uldPathStr
-                         /*, boolean checkExistence, String mergeFileStr*/) throws Exception
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - start");
-      logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - unload Path: " + uldPathStr );
-      
-      try 
-      {
-      Path uldPath = new Path(uldPathStr );
-      uldPath = uldPath.makeQualified(uldPath.toUri(), null);
-      FileSystem srcFs = FileSystem.get(uldPath.toUri(),conf);
-      if (!srcFs.exists(uldPath))
-      {
-        //unload location does not exist. hdfscreate will create it later
-        //nothing to do 
-        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -- unload location does not exist." );
-        return true;
-      }
-       
-      Path[] files = FileUtil.stat2Paths(srcFs.listStatus(uldPath));
-      logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - delete files" );
-      for (Path f : files){
-        srcFs.delete(f, false);
-      }
-      }
-      catch (IOException e)
-      {
-        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -exception:" + e);
-        throw e;
-      }
-      
-      return true;
-    }
-
-  public boolean hdfsExists(String filePathStr) throws Exception 
-  {
-    logger.debug("SequenceFileWriter.hdfsExists() - start");
-    logger.debug("SequenceFileWriter.hdfsExists() - Path: " + filePathStr);
-
-    try 
-    {
-        //check existence of the merge Path
-       Path filePath = new Path(filePathStr );
-       filePath = filePath.makeQualified(filePath.toUri(), null);
-       FileSystem mergeFs = FileSystem.get(filePath.toUri(),conf);
-       if (mergeFs.exists( filePath))
-       {
-       logger.debug("SequenceFileWriter.hdfsExists() - Path: "
-       + filePath + " exists" );
-         return true;
-       }
-
-    } catch (IOException e) {
-      logger.debug("SequenceFileWriter.hdfsExists() -exception:" + e);
-      throw e;
-    }
-    return false;
-  }
-
-  public boolean hdfsDeletePath(String pathStr) throws Exception
-  {
-    if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() - start - Path: " + pathStr);
-    try 
-    {
-      Path delPath = new Path(pathStr );
-      delPath = delPath.makeQualified(delPath.toUri(), null);
-      FileSystem fs = FileSystem.get(delPath.toUri(),conf);
-      fs.delete(delPath, true);
-    }
-    catch (IOException e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() --exception:" + e);
-      throw e;
-    }
-    
-    return true;
-  }
-
-  private boolean init(String zkServers, String zkPort) 
-      throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
-  {
-    logger.debug("SequenceFileWriter.init(" + zkServers + ", " + zkPort + ") called.");
-    if (conf != null)		
-       return true;		
-    conf = HBaseConfiguration.create();		
-    if (zkServers.length() > 0)		
-      conf.set("hbase.zookeeper.quorum", zkServers);		
-    if (zkPort.length() > 0)		
-      conf.set("hbase.zookeeper.property.clientPort", zkPort);		
-    HBaseAdmin.checkHBaseAvailable(conf);
-    return true;
-  }
-  
-  public boolean createSnapshot( String tableName, String snapshotName)
-      throws MasterNotRunningException, IOException, SnapshotCreationException, 
-      InterruptedException, ZooKeeperConnectionException, ServiceException
-  {
-    try 
-    {
-      if (admin == null)
-        admin = new HBaseAdmin(conf);
-      admin.snapshot(snapshotName, tableName);
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Snapshot created: " + snapshotName);
-    }
-    catch (Exception e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Exception: " + e);
-      throw e;
-    }
-    return true;
-  }
-  public boolean verifySnapshot( String tableName, String snapshotName)
-      throws MasterNotRunningException, IOException, SnapshotCreationException, 
-      InterruptedException, ZooKeeperConnectionException, ServiceException
-  {
-    try 
-    {
-      if (admin == null)
-        admin = new HBaseAdmin(conf);
-      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
-
-      for (SnapshotDescription snpd : lstSnaps) 
-      {
-        if (snpd.getName().compareTo(snapshotName) == 0 && 
-            snpd.getTable().compareTo(tableName) == 0)
-        {
-          if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Snapshot verified: " + snapshotName);
-          return true;
-        }
-      }
-    }
-    catch (Exception e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Exception: " + e);
-      throw e;
-    }
-    return false;
-  }
- 
-  public boolean deleteSnapshot( String snapshotName)
-      throws MasterNotRunningException, IOException, SnapshotCreationException, 
-      InterruptedException, ZooKeeperConnectionException, ServiceException
-  {
-    try 
-    {
-      if (admin == null)
-        admin = new HBaseAdmin(conf);
-      admin.deleteSnapshot(snapshotName);
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Snapshot deleted: " + snapshotName);
-    }
-    catch (Exception e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Exception: " + e);
-      throw e;
-    }
-
-    return true;
-  }
-
-  public boolean release()  throws IOException
-  {
-    if (admin != null)
-    {
-      admin.close();
-      admin = null;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/StringArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/StringArrayList.java b/core/sql/executor/StringArrayList.java
deleted file mode 100644
index b0b7ed5..0000000
--- a/core/sql/executor/StringArrayList.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.ArrayList;
-
-public class StringArrayList extends ArrayList<String> {
-
-	private static final long serialVersionUID = -3557219338406352735L;
-
-	void addElement(String st) {
-	        add(st);
-	}
-
-	String getElement(int i) {
-	    if (size() == 0)
-		return null;
-	    else if (i < size())
-		return get(i);
-	    else
-		return null;
-	}
-
-        int getSize() {
-           return size();
-	}
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/org_trafodion_sql_HTableClient.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/org_trafodion_sql_HTableClient.h b/core/sql/executor/org_trafodion_sql_HTableClient.h
new file mode 100644
index 0000000..e3c8837
--- /dev/null
+++ b/core/sql/executor/org_trafodion_sql_HTableClient.h
@@ -0,0 +1,43 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_trafodion_sql_HTableClient */
+
+#ifndef _Included_org_trafodion_sql_HTableClient
+#define _Included_org_trafodion_sql_HTableClient
+#ifdef __cplusplus
+extern "C" {
+#endif
+#undef org_trafodion_sql_HTableClient_GET_ROW
+#define org_trafodion_sql_HTableClient_GET_ROW 1L
+#undef org_trafodion_sql_HTableClient_BATCH_GET
+#define org_trafodion_sql_HTableClient_BATCH_GET 2L
+#undef org_trafodion_sql_HTableClient_SCAN_FETCH
+#define org_trafodion_sql_HTableClient_SCAN_FETCH 3L
+/*
+ * Class:     org_trafodion_sql_HTableClient
+ * Method:    setResultInfo
+ * Signature: (J[I[I[I[I[I[I[J[[B[[B[III)I
+ */
+JNIEXPORT jint JNICALL Java_org_trafodion_sql_HTableClient_setResultInfo
+  (JNIEnv *, jobject, jlong, jintArray, jintArray, jintArray, jintArray, jintArray, jintArray, jlongArray, jobjectArray, jobjectArray, jintArray, jint, jint);
+
+/*
+ * Class:     org_trafodion_sql_HTableClient
+ * Method:    cleanup
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_org_trafodion_sql_HTableClient_cleanup
+  (JNIEnv *, jobject, jlong);
+
+/*
+ * Class:     org_trafodion_sql_HTableClient
+ * Method:    setJavaObject
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_trafodion_sql_HTableClient_setJavaObject
+  (JNIEnv *, jobject, jlong);
+
+#ifdef __cplusplus
+}
+#endif
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/Makerules.build
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/Makerules.build b/core/sql/nskgmake/Makerules.build
index 629cfbb..af55cbe 100755
--- a/core/sql/nskgmake/Makerules.build
+++ b/core/sql/nskgmake/Makerules.build
@@ -47,11 +47,6 @@ LSRC           :=
 YSRC           :=
 YINC           :=
 LEX_PREFIX     := yy
-JSRC           :=
-SPECIAL_JSRC   :=
-JARPREFIX      :=
-JARFILE        :=
-JAR_APPEND     :=
 CFLAGS         :=
 CXXFLAGS       :=
 LDFLAGS        :=
@@ -242,15 +237,7 @@ ifdef INSTALL_OBJ
   $(call find_first,$(srcfile),$(SRCPATH)))))
 endif
 
-# These are the rules dealing with Java.
-JAVA_OBJS :=
-
-ifdef JSRC
-  $(eval $(call JAR_BUILD_template))
-endif
-
 # These dependencies will rebuild all of the objects in a directory if the
 # makefile for that directory changes.
 # $(OBJ_PATHS): $(OBJDIR)/Makefile
 # $(INSTALL_OBJ): $(OBJDIR)/Makefile
-# $(JAVA_OBJS): $(OBJDIR)/Makefile

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/Makerules.linux
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/Makerules.linux b/core/sql/nskgmake/Makerules.linux
index 2d07734..8bf6257 100755
--- a/core/sql/nskgmake/Makerules.linux
+++ b/core/sql/nskgmake/Makerules.linux
@@ -123,9 +123,6 @@ endif
 #      should be set up in other makefiles (e.g. sqlci and tdm_arkcmp makefiles).
 #EARLY_DLLS :=
 
-ifeq ($(SQ_BUILD_TYPE),debug)
-   JAVA_COMPILE_FLAGS=-g
-endif
 LIBHDFS_INC=-I$(HADOOP_INC_DIR)
 LIBHDFS_LIB=-ljvm -lhdfs
 LIBHDFS_SO=libhdfs.so
@@ -410,11 +407,6 @@ linuxcleandebug linuxdebugclean linuxcleanrelease linuxreleaseclean : clean
 	   echo rm -f $(NSKBIN)/$$i ;\
 	   rm -f $(NSKBIN)/$$i ;\
 	 done;\
-	 for i in $(notdir $(FINAL_JARS));\
-	 do \
-	   echo rm -f $(NSK_SQ)/export/lib/$$i ;\
-	   rm -f $(NSK_SQ)/export/lib/$$i ;\
-	 done;\
 	 for i in $(filter $(POSSIBLE_NO_EXPORT_EXE_NAMES),\
 	            $(notdir $(FINAL_EXES)));\
 	 do \
@@ -452,11 +444,6 @@ linuxmklinksdebug linuxmklinksrelease: copytoolslibs
 	   echo ln -sf $$OUTDIR/$$i $(NSKBIN);\
 	   ln -sf $$OUTDIR/$$i $(NSKBIN);\
 	 done;\
-	 for i in $(notdir $(FINAL_JARS));\
-	 do \
-	   echo ln -sf $$OUTDIR/$$i $(NSK_SQ)/export/lib/;\
-	   ln -sf $$OUTDIR/$$i $(NSK_SQ)/export/lib/;\
-	 done;\
 	 for i in $(filter $(POSSIBLE_NO_EXPORT_EXE_NAMES),\
 	            $(notdir $(FINAL_EXES)));\
 	 do \

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/Makerules.mk
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/Makerules.mk b/core/sql/nskgmake/Makerules.mk
index a0e93a2..206a298 100755
--- a/core/sql/nskgmake/Makerules.mk
+++ b/core/sql/nskgmake/Makerules.mk
@@ -70,8 +70,6 @@ SHELL := sh
 YACC       = export BISON_PKGDATADIR=$(TOPDIR)/toolbin/bison; export M4=$(TOPDIR)/toolbin/m4; $(TOPDIR)/toolbin/bison.exe -p $(YACC_VAR_PREFIX)
 LEX        = $(TOPDIR)/toolbin/flex.exe -P$(YACC_VAR_PREFIX)
 AWK       := awk.exe
-JAVAC     := $(JAVA_HOME)/bin/javac
-JAR       := $(JAVA_HOME)/bin/jar
 
 # Build everything by default
 .DEFAULT_GOAL := buildall
@@ -96,9 +94,6 @@ LINK_LIB_DLL_ECHO_RULE = @echo "Creating export file and DLL .lib file $@";
 LINK_DLL_ECHO_RULE = @echo "Linking DLL library $@";
 BUILD_RC_ECHO_RULE = @echo "Building resource file $@";
 LINK_EXE_ECHO_RULE = @echo "Linking executable $@";
-JAVAC_ECHO_RULE = @echo "Compiling $<";
-JAR_APPEND_ECHO_RULE = @echo "Appending to jar file $(JARFILE)";
-JAR_ECHO_RULE = @echo "Creating jar file $(JARFILE)";
 LEX_ECHO_RULE = @echo "Generating C++ code from lex file $<";
 YACC_ECHO_RULE = @echo "Generating C++ code from yacc file $<";
 GENERATE_ECHO_RULE = @echo "Generating file $@";
@@ -158,7 +153,6 @@ FINAL_LIBS :=
 FINAL_DLLS :=
 FINAL_EXES :=
 FINAL_INSTALL_OBJS :=
-FINAL_JARS :=
 
 # These rules are used as part of a mechanism to compile the files
 # located in different source locations.  This template is called from
@@ -188,67 +182,6 @@ $(C_OBJ) : C_OBJ:=$(C_OBJ)
 $(C_OBJ) : C_INC_OVERRIDE:=$(C_INC_OVERRIDE)
 endef
 
-# The build_java_rule rules and the JAVA_BUILD_template are used for
-# compiling java classes into class files.
-compile_java_rule = $(JAVAC) ${JAVA_COMPILE_FLAGS} -d $(TARGOBJDIR)/java -classpath '$(CLASSPATH)' $<
-
-append_jar_rule = cp $(JAR_APPEND) $$(JARFILE);$(JAR) uvmf $$(JARMANIFEST) $$(JARFILE) -C $$(TARGOBJDIR)/java $$(PACKAGE)
-compile_jar_rule = $(JAR) cvmf $$(JARMANIFEST) $$(JARFILE)_temp -C $$(TARGOBJDIR)/java $$(PACKAGE) -C $$(TARGOBJDIR)/java $$(ORCPACKAGE); mv -f $$(JARFILE)_temp $$(JARFILE)
-
-build_java_rule = $(JAVAC_ECHO_RULE) \
-		HEADING="Compiling $(<) --> $(@)"; $(starting_logfile) \
-		CMD="$(compile_java_rule)"; $(capture_output)
-
-define JAVA_BUILD_template
-  _dummy := $(if $(wildcard $(TARGOBJDIR)/java),,$(shell mkdir -p $(TARGOBJDIR)/java))
-  ifneq (.,$(PACKAGE))
-    $(TARGOBJDIR)/java/$(PACKAGE)/$(basename $(notdir $1)).class: $(1)
-	$$(build_java_rule)
-  else
-    $(TARGOBJDIR)/java/$(basename $(notdir $1)).class: $(1)
-	$$(build_java_rule)
-  endif
-endef
-
-# The build_jar_rule creates the .jar file from the individual .class files.
-define JAR_BUILD_template
-  ifneq (.,$(PACKAGE))
-  JAVA_OBJS := $$(patsubst %.java,$$(TARGOBJDIR)/java/$$(PACKAGE)/%.class,\
-                 $$(JSRC) $$(SPECIAL_JSRC))
-  else
-  JAVA_OBJS := $$(patsubst %.java,$$(TARGOBJDIR)/java/%.class,\
-                 $$(JSRC) $$(SPECIAL_JSRC))
-  PACKAGE := .
-  endif
-  JARFILE := $$(RESULTDIR)/$$(JARPREFIX).jar
-  FINAL_JARS += $$(JARFILE)
-
-  # Rules for building jar files
-  ifneq (,$(JAR_APPEND))
-  $$(JARFILE): $$(JAVA_OBJS) $(JAR_APPEND) $$(JARMANIFEST)
-	$$(JAR_APPEND_ECHO_RULE) \
-	HEADING="Compiling $$(<) --> $$(@)"; $$(starting_logfile) \
-	CMD="$(append_jar_rule)"; $$(capture_output)
-  else
-  $$(JARFILE): $$(JAVA_OBJS) $$(JARMANIFEST)
-	$$(JAR_ECHO_RULE) \
-	HEADING="Compiling $$(<) --> $$(@)"; $$(starting_logfile) \
-	CMD="$(compile_jar_rule)"; $$(capture_output)
-  endif
-
-  # Rules for compiling java files
-  $$(foreach srcfile,$$(JSRC),$$(eval $$(call JAVA_BUILD_template,\
-  $$(call find_first,$$(srcfile),$$(SRCPATH)))))
-
-  # Make sure these variables are instantiated correctly.
-  $$(JARFILE): TARGOBJDIR:=$$(TARGOBJDIR)
-  $$(JARFILE): JARFILE:=$$(JARFILE)
-  $$(JARFILE): PACKAGE:=$$(PACKAGE)
-  $$(JARFILE): CLASSPATH:=$$(CLASSPATH)
-  $$(JARFILE): JAVA_OBJS := $(JAVA_OBJS)
-    $$(JARFILE): JARMANIFEST := $(JARMANIFEST)
-endef
-
 compile_c_resultobj_rule = $(CXX) $(DEBUG_FLAGS) $(SQLCLIOPT) $(ALL_INCLUDES) -o $@ -c $<
 
 build_c_resultobj_rule = $(COMPILE_ECHO_RULE) \
@@ -335,16 +268,16 @@ endif
 
 .PHONY: $(MAKECMDGOALS)
 
-# DLL's should be build before executables, so we are adding this dependency here.
+# DLLs should be built before executables, so we are adding this dependency here.
 # $(FINAL_EXES): $(FINAL_DLLS)
 
-# Some (soon maybe all) Java files get built through Maven
+# Java files get built through Maven
 mavenbuild:
 	set -o pipefail && cd ..; $(MAVEN) -f pom.xml package -DskipTests | tee maven_build.log | grep -e '\[INFO\] Building' -e '\[INFO\] BUILD SUCCESS' -e 'ERROR'
 	cp -pf ../target/*.jar $(MY_SQROOT)/export/lib
 
 # This is where the top-level is declared to build everything.
-buildall: $(FINAL_LIBS) $(FINAL_DLLS) $(FINAL_INSTALL_OBJS) $(FINAL_JARS) $(FINAL_EXES) mavenbuild
+buildall: $(FINAL_LIBS) $(FINAL_DLLS) $(FINAL_INSTALL_OBJS) $(FINAL_EXES) mavenbuild
 
 clean:
 	@echo "Removing intermediate objects for $(TARGTYPE)/$(ARCHBITS)/$(FLAVOR)"

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/executor/Makefile
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/executor/Makefile b/core/sql/nskgmake/executor/Makefile
index d624b11..dd23a6e 100755
--- a/core/sql/nskgmake/executor/Makefile
+++ b/core/sql/nskgmake/executor/Makefile
@@ -139,57 +139,3 @@ EXTERN_LIBS := $(SP_EXPORT_LIB)/libwrappersq.so
 endif
 SYS_LIBS := -lrt -lpthread
 SRCPATH := bin executor runtimestats porting_layer qmscommon
-
-### Java stuff - for building trafodion-HBaseAccess
-################################################
-
-PACKAGE := org/trafodion/sql/HBaseAccess
-ORCPACKAGE := org/apache/hadoop/hive/ql/io/orc
-JARPREFIX := trafodion-HBaseAccess-$(TRAFODION_VER)
-
-JSRC := RowToInsert.java \
-        RowsToInsert.java \
-        ResultIterator.java \
-        StringArrayList.java \
-        ByteArrayList.java \
-        ResultKeyValueList.java \
-        SequenceFileWriter.java \
-        SequenceFileReader.java \
-        HTableClient.java \
-        HBaseClient.java \
-        HiveClient.java \
-        HBulkLoadClient.java \
-        OrcFileReader.java
-
-# Set up explicit dependencies (necessary for parallel builds... and for correctness).
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowsToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultIterator.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ByteArrayList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultKeyValueList.class
-
-$(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowsToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class
-
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowsToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultIterator.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/StringArrayList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ByteArrayList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultKeyValueList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class
-
-
-$(TARGOBJDIR)/java/$(PACKAGE)/HiveClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/StringArrayList.class
-
-  JARMANIFEST := trafodion-HBaseAccess.jar.mf
-
-JSRC_DIR  := $(MY_SQROOT)/../sql/executor
-CLASSPATH := $(TARGOBJDIR)/java:$(JSRC_DIR):$(ENV_CLASSPATH)
-
-
-$(JARMANIFEST) : $(MY_SQROOT)/export/include/SCMBuildMan.mf $(TOPDIR)/executor/trafodion-HBaseAccess.jar.version
-	cat  $(TOPDIR)/executor/trafodion-HBaseAccess.jar.version >$@
-	cat $(MY_SQROOT)/export/include/SCMBuildMan.mf >>$@

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/ustat/Makefile
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/ustat/Makefile b/core/sql/nskgmake/ustat/Makefile
index 6fe06a1..78c8192 100755
--- a/core/sql/nskgmake/ustat/Makefile
+++ b/core/sql/nskgmake/ustat/Makefile
@@ -33,21 +33,8 @@ CPPSRC := hs_cli.cpp \
 	hs_util.cpp \
 	vers_libustat.cpp
 
-JSRC := ChgAutoList.java \
-	UstatUtil.java
-
-PACKAGE := com/hp/mx_ustat
-JARPREFIX := mx_ustat
-  JARMANIFEST := $(TOPDIR)/ustat/mx_ustat.jar.mf
-CLASSPATH := $(TARGOBJDIR)/java
-
 YSRC := hs_yacc.y
 LSRC := hs_lex.ll
 
 YACC_VAR_PREFIX := ystat
 LEX_PREFIX := ystat
-
-# Explicit dependencies needed
-$(JARMANIFEST) : $(MY_SQROOT)/export/include/SCMBuildMan.mf $(TOPDIR)/ustat/mx_ustat.jar.version
-	cat  $(TOPDIR)/ustat/mx_ustat.jar.version >$@
-	cat $(MY_SQROOT)/export/include/SCMBuildMan.mf >>$@

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/pom.xml
----------------------------------------------------------------------
diff --git a/core/sql/pom.xml b/core/sql/pom.xml
index 53901fd..82c1d11 100755
--- a/core/sql/pom.xml
+++ b/core/sql/pom.xml
@@ -20,13 +20,62 @@
  */
 -->
   <repositories>
+    <repository>
+      <id>cloudera</id>
+      <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
+    </repository>
   </repositories>
 
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <hadoop.version>2.5.0</hadoop.version>
+    <hbase.version>0.98.1-cdh5.1.0</hbase.version>
+    <hbase-trx.id>hbase-trx-cdh5_3</hbase-trx.id>
+    <hive.version>0.13.1</hive.version>
+    <thrift.version>0.9.0</thrift.version>
     <java.version>1.7</java.version>
   </properties>
 
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <version>${hbase.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <version>${hbase.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase.client.transactional</groupId>
+      <artifactId>${hbase-trx.id}</artifactId>
+      <version>${env.TRAFODION_VER}</version>
+      <scope>system</scope>
+      <systemPath>${env.MY_SQROOT}/export/lib/${hbase-trx.id}-${env.TRAFODION_VER}.jar</systemPath>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-jdbc</artifactId>
+      <version>${hive.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-metastore</artifactId>
+      <version>${hive.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.thrift</groupId>
+      <artifactId>libthrift</artifactId>
+      <version>${thrift.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <version>2.5.0</version>
+    </dependency>
+  </dependencies>
+
   <groupId>org.trafodion.sql</groupId>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>trafodion-sql</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/qmscommon/QRLogger.cpp
----------------------------------------------------------------------
diff --git a/core/sql/qmscommon/QRLogger.cpp b/core/sql/qmscommon/QRLogger.cpp
index bd775e8..364c0d2 100644
--- a/core/sql/qmscommon/QRLogger.cpp
+++ b/core/sql/qmscommon/QRLogger.cpp
@@ -61,14 +61,14 @@ std::string CAT_SQL_HBASE                     =  "SQL.HBase";
 // these categories are currently not used 
 std::string CAT_SQL_QMP                       = "SQL.Qmp";
 std::string CAT_SQL_QMM                       = "SQL.Qmm";
-std::string CAT_SQL_COMP_QR_DESC_GEN          = "SQL.Comp.DescGen";
-std::string CAT_SQL_COMP_QR_HANDLER           = "SQL.Comp.QRHandler";
-std::string CAT_SQL_COMP_QR_COMMON            = "SQL.COMP.QRCommon";
-std::string CAT_SQL_COMP_QR_IPC               = "SQL.COMP.QRCommon.IPC";
-std::string CAT_SQL_COMP_MV_REFRESH           = "SQL.COMP.MV.REFRESH";
-std::string CAT_SQL_COMP_MVCAND               = "SQL.Comp.MVCandidates";
-std::string CAT_SQL_MEMORY                    = "SQL.Memory";
-std::string CAT_SQL_COMP_RANGE                = "SQL.COMP.Range";
+std::string CAT_SQL_COMP_QR_DESC_GEN          = "SQL.COMP"; // ".DescGen";
+std::string CAT_SQL_COMP_QR_HANDLER           = "SQL.COMP"; // ".QRHandler";
+std::string CAT_SQL_COMP_QR_COMMON            = "SQL.COMP"; // ".QRCommon";
+std::string CAT_SQL_COMP_QR_IPC               = "SQL.COMP"; // ".QRCommon.IPC";
+std::string CAT_SQL_COMP_MV_REFRESH           = "SQL.COMP"; // ".MV.REFRESH";
+std::string CAT_SQL_COMP_MVCAND               = "SQL.COMP"; // ".MVCandidates";
+std::string CAT_SQL_MEMORY                    = "SQL.COMP"; // ".Memory";
+std::string CAT_SQL_COMP_RANGE                = "SQL.COMP"; // ".Range";
 std::string CAT_QR_TRACER                     = "QRCommon.Tracer";
 std::string CAT_SQL_QMS                       = "SQL.Qms";
 std::string CAT_SQL_QMS_MAIN                  = "SQL.Qms.Main";
@@ -78,7 +78,7 @@ std::string CAT_SQL_MVMEMO_STATS              = "SQL.Qms.MvmemoStats";
 std::string CAT_SQL_QMS_GRP_LATTCE_INDX       = "SQL.Qms.LatticeIndex";
 std::string CAT_SQL_QMS_MATCHTST_MVDETAILS    = "SQL.Qms.MatchTest";
 std::string CAT_SQL_QMS_XML                   = "SQL.Qms.XML";
-std::string CAT_SQL_COMP_XML                  = "SQL.Comp.XML";
+std::string CAT_SQL_COMP_XML                  = "SQL.COMP"; // ".XML";
 
 // **************************************************************************
 // **************************************************************************

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java b/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java
new file mode 100644
index 0000000..46b81fe
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java
@@ -0,0 +1,54 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.ArrayList;
+
+public class ByteArrayList extends ArrayList<byte[]> {
+
+	private static final long serialVersionUID = -3557219337406352735L;
+
+	void addElement(byte[] ba) {
+	        add(ba);
+	}
+
+	byte[] getElement(int i) {
+	    if (size() == 0)
+		return null;
+	    else if (i < size())
+		return get(i);
+	    else
+		return null;
+	}
+
+        int getSize() {
+           return size();
+	}
+
+        int getEntrySize(int i) {
+          return get(i).length;
+        }
+
+        byte[] getEntry(int i) {
+          return get(i);
+        }
+}


[6/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Posted by db...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HBulkLoadClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBulkLoadClient.java b/core/sql/executor/HBulkLoadClient.java
deleted file mode 100644
index ff574d4..0000000
--- a/core/sql/executor/HBulkLoadClient.java
+++ /dev/null
@@ -1,533 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Iterator;
-import java.io.File;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.Logger;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.compress.*;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.regionserver.BloomType; 
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.trafodion.sql.HBaseAccess.HTableClient;
-//import org.trafodion.sql.HBaseAccess.HBaseClient;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.io.compress.CodecPool;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.hbase.TableName;
-
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-
-import org.apache.hive.jdbc.HiveDriver;
-import java.sql.Connection;
-import java.sql.Statement;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.lang.ClassNotFoundException;
-
-public class HBulkLoadClient
-{
-  
-  private final static FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx");
-  private final static FsPermission PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x");
-  private final static String BULKLOAD_STAGING_DIR = "hbase.bulkload.staging.dir";
-  private final static long MAX_HFILE_SIZE = 10737418240L; //10 GB
-  
-  public static int BLOCKSIZE = 64*1024;
-  public static String COMPRESSION = Compression.Algorithm.NONE.getName();
-  String lastError;
-  static Logger logger = Logger.getLogger(HBulkLoadClient.class.getName());
-  Configuration config;
-  HFile.Writer writer;
-  String hFileLocation;
-  String hFileName;
-  long maxHFileSize = MAX_HFILE_SIZE;
-  FileSystem fileSys = null;
-  String compression = COMPRESSION;
-  int blockSize = BLOCKSIZE;
-  DataBlockEncoding dataBlockEncoding = DataBlockEncoding.NONE;
-  FSDataOutputStream fsOut = null;
-
-  public HBulkLoadClient()
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient() called.");
-  }
-
-  public HBulkLoadClient(Configuration conf) throws IOException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient(...) called.");
-    config = conf;
-  }
-
-  public String getLastError() {
-    return lastError;
-  }
-
-  void setLastError(String err) {
-      lastError = err;
-  }
-  public boolean initHFileParams(String hFileLoc, String hFileNm, long userMaxSize /*in MBs*/, String tblName,
-                                 String sampleTblName, String sampleTblDDL) 
-  throws UnsupportedOperationException, IOException, SQLException, ClassNotFoundException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.initHFileParams() called.");
-    
-    hFileLocation = hFileLoc;
-    hFileName = hFileNm;
-    
-    HTable myHTable = new HTable(config, tblName);
-    HTableDescriptor hTbaledesc = myHTable.getTableDescriptor();
-    HColumnDescriptor[] hColDescs = hTbaledesc.getColumnFamilies();
-    if (hColDescs.length > 2 )  //2 column family , 1 for user data, 1 for transaction metadata
-    {
-      myHTable.close();
-      throw new UnsupportedOperationException ("only two families are supported.");
-    }
-    
-    compression= hColDescs[0].getCompression().getName();
-    blockSize= hColDescs[0].getBlocksize();
-    dataBlockEncoding = hColDescs[0].getDataBlockEncoding();
-    
-    if (userMaxSize == 0)
-    {
-      if (hTbaledesc.getMaxFileSize()==-1)
-      {
-        maxHFileSize = MAX_HFILE_SIZE;
-      }
-      else
-      {
-        maxHFileSize = hTbaledesc.getMaxFileSize();
-      }
-    }
-    else 
-      maxHFileSize = userMaxSize * 1024 *1024;  //maxSize is in MBs
-
-    myHTable.close();
-
-    if (sampleTblDDL.length() > 0)
-    {
-      Class.forName("org.apache.hive.jdbc.HiveDriver");
-      Connection conn = DriverManager.getConnection("jdbc:hive2://", "hive", "");
-      Statement stmt = conn.createStatement();
-      stmt.execute("drop table if exists " + sampleTblName);
-      //System.out.println("*** DDL for Hive sample table is: " + sampleTblDDL);
-      stmt.execute(sampleTblDDL);
-    }
-
-    return true;
-  }
-  public boolean doCreateHFile() throws IOException, URISyntaxException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doCreateHFile() called.");
-    
-    if (hFileLocation == null )
-      throw new NullPointerException(hFileLocation + " is not set");
-    if (hFileName == null )
-      throw new NullPointerException(hFileName + " is not set");
-    
-    closeHFile();
-    
-    if (fileSys == null)
-     fileSys = FileSystem.get(config); 
-
-    Path hfilePath = new Path(new Path(hFileLocation ), hFileName + "_" +  System.currentTimeMillis());
-    hfilePath = hfilePath.makeQualified(hfilePath.toUri(), null);
-
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.createHFile Path: " + hfilePath);
-
-    try
-    {
-      HFileContext hfileContext = new HFileContextBuilder()
-                                 .withBlockSize(blockSize)
-                                 .withCompression(Compression.getCompressionAlgorithmByName(compression))
-                                 .withDataBlockEncoding(dataBlockEncoding)
-                                 .build();
-
-      writer =    HFile.getWriterFactory(config, new CacheConfig(config))
-                     .withPath(fileSys, hfilePath)
-                     .withFileContext(hfileContext)
-                     .withComparator(KeyValue.COMPARATOR)
-                     .create();
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.createHFile Path: " + writer.getPath() + "Created");
-    }
-    catch (IOException e)
-    {
-       if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doCreateHFile Exception" + e.getMessage());
-       throw e;
-    }
-    return true;
-  }
-  
-  public boolean isNewFileNeeded() throws IOException
-  {
-    if (writer == null)
-      return true;
-    
-    if (fileSys == null)
-      fileSys = FileSystem.get(writer.getPath().toUri(),config);
-    
-    if (fileSys.getFileStatus(writer.getPath()).getLen() > maxHFileSize)
-     return true;
-
-    return false;
-  }
-
-  public boolean addToHFile(short rowIDLen, Object rowIDs,
-                Object rows) throws IOException, URISyntaxException
-  {
-     if (logger.isDebugEnabled()) logger.debug("Enter addToHFile() ");
-     Put put;
-    if (isNewFileNeeded())
-    {
-      doCreateHFile();
-    }
-     ByteBuffer bbRows, bbRowIDs;
-     short numCols, numRows;
-     short colNameLen;
-     int colValueLen;
-     byte[] colName, colValue, rowID;
-     short actRowIDLen;
-
-     bbRowIDs = (ByteBuffer)rowIDs;
-     bbRows = (ByteBuffer)rows;
-     numRows = bbRowIDs.getShort();
-     HTableClient htc = new HTableClient();
-     long now = System.currentTimeMillis();
-     for (short rowNum = 0; rowNum < numRows; rowNum++) 
-     {
-        byte rowIDSuffix  = bbRowIDs.get();
-        if (rowIDSuffix == '1')
-           actRowIDLen = (short)(rowIDLen+1);
-        else
-           actRowIDLen = rowIDLen;
-        rowID = new byte[actRowIDLen];
-        bbRowIDs.get(rowID, 0, actRowIDLen);
-        numCols = bbRows.getShort();
-        for (short colIndex = 0; colIndex < numCols; colIndex++)
-        {
-            colNameLen = bbRows.getShort();
-            colName = new byte[colNameLen];
-            bbRows.get(colName, 0, colNameLen);
-            colValueLen = bbRows.getInt();
-            colValue = new byte[colValueLen];
-            bbRows.get(colValue, 0, colValueLen);
-            KeyValue kv = new KeyValue(rowID,
-                                htc.getFamily(colName), 
-                                htc.getName(colName), 
-                                now,
-                                colValue);
-            writer.append(kv);
-        } 
-    }
-    if (logger.isDebugEnabled()) logger.debug("End addToHFile() ");
-       return true;
-  }
-
-  public boolean closeHFile() throws IOException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.closeHFile() called." + ((writer == null) ? "NULL" : "NOT NULL"));
-
-    if (writer == null)
-      return false;
-    
-    writer.close();
-    return true;
-  }
-
-  private boolean createSnapshot( String tableName, String snapshotName)
-  throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException
-  {
-    HBaseAdmin admin = null;
-    try 
-    {
-      admin = new HBaseAdmin(config);
-      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
-      if (! lstSnaps.isEmpty())
-      {
-        for (SnapshotDescription snpd : lstSnaps) 
-        {
-            if (snpd.getName().compareTo(snapshotName) == 0)
-            {
-              if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.createSnapshot() -- deleting: " + snapshotName + " : " + snpd.getName());
-              admin.deleteSnapshot(snapshotName);
-            }
-        }
-      }
-      admin.snapshot(snapshotName, tableName);
-   }
-    catch (Exception e)
-    {
-      //log exeception and throw the exception again to teh parent
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.createSnapshot() - Exception: " + e);
-      throw e;
-    }
-    finally
-    {
-      //close HBaseAdmin instance 
-      if (admin !=null)
-        admin.close();
-    }
-    return true;
-  }
-  
-  private boolean restoreSnapshot( String snapshotName, String tableName)
-  throws IOException, RestoreSnapshotException
-  {
-    HBaseAdmin admin = null;
-    try
-    {
-      admin = new HBaseAdmin(config);
-      if (! admin.isTableDisabled(tableName))
-          admin.disableTable(tableName);
-      
-      admin.restoreSnapshot(snapshotName);
-  
-      admin.enableTable(tableName);
-    }
-    catch (Exception e)
-    {
-      //log exeception and throw the exception again to the parent
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.restoreSnapshot() - Exception: " + e);
-      throw e;
-    }
-    finally
-    {
-      //close HBaseAdmin instance 
-      if (admin != null) 
-        admin.close();
-    }
-
-    return true;
-  }
-  private boolean deleteSnapshot( String snapshotName, String tableName)
-      throws IOException
-  {
-    
-    HBaseAdmin admin = null;
-    boolean snapshotExists = false;
-    try
-    {
-      admin = new HBaseAdmin(config);
-      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
-      if (! lstSnaps.isEmpty())
-      {
-        for (SnapshotDescription snpd : lstSnaps) 
-        {
-          //System.out.println("here 1: " + snapshotName + snpd.getName());
-          if (snpd.getName().compareTo(snapshotName) == 0)
-          {
-            //System.out.println("deleting: " + snapshotName + " : " + snpd.getName());
-            snapshotExists = true;
-            break;
-          }
-        }
-      }
-      if (!snapshotExists)
-        return true;
-      if (admin.isTableDisabled(tableName))
-          admin.enableTable(tableName);
-      admin.deleteSnapshot(snapshotName);
-    }
-    catch (Exception e)
-    {
-      //log exeception and throw the exception again to the parent
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.restoreSnapshot() - Exception: " + e);
-      throw e;
-    }
-    finally 
-    {
-      //close HBaseAdmin instance 
-      if (admin != null) 
-        admin.close();
-    }
-    return true;
-  }
-  
-  private void doSnapshotNBulkLoad(Path hFilePath, String tableName, HTable table, LoadIncrementalHFiles loader, boolean snapshot)
-  throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException, RestoreSnapshotException
-  {
-    HBaseAdmin admin = new HBaseAdmin(config);
-    String snapshotName= null;
-    if (snapshot)
-    {
-      snapshotName = tableName + "_SNAPSHOT";
-      createSnapshot(tableName, snapshotName);
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot created: " + snapshotName);
-    }
-    try
-    {
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - bulk load started ");
-      loader.doBulkLoad(hFilePath, table);
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - bulk load is done ");
-    }
-    catch (IOException e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - Exception: " + e.toString());
-      if (snapshot)
-      {
-        restoreSnapshot(snapshotName, tableName);
-        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot restored: " + snapshotName);
-        deleteSnapshot(snapshotName, tableName);
-        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
-        throw e;
-      }
-    }
-    finally
-    {
-      if  (snapshot)
-      {
-        deleteSnapshot(snapshotName, tableName);
-        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
-      }
-    }
-    
-  }
-  public boolean doBulkLoad(String prepLocation, String tableName, boolean quasiSecure, boolean snapshot) throws Exception
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - start");
-    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - Prep Location: " + prepLocation + 
-                                             ", Table Name:" + tableName + 
-                                             ", quasisecure : " + quasiSecure +
-                                             ", snapshot: " + snapshot);
-
-      
-    HTable table = new HTable(config, tableName);
-    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(config);    
-    Path prepPath = new Path(prepLocation );
-    prepPath = prepPath.makeQualified(prepPath.toUri(), null);
-    FileSystem prepFs = FileSystem.get(prepPath.toUri(),config);
-    
-    Path[] hFams = FileUtil.stat2Paths(prepFs.listStatus(prepPath));
-
-    if (quasiSecure)
-    {
-      throw new Exception("HBulkLoadClient.doBulkLoad() - cannot perform load. Trafodion on secure HBase mode is not implemented yet");
-    }
-    else
-    {
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfiles permissions");
-      for (Path hfam : hFams) 
-      {
-         Path[] hfiles = FileUtil.stat2Paths(prepFs.listStatus(hfam));
-         prepFs.setPermission(hfam,PERM_ALL_ACCESS );
-         for (Path hfile : hfiles)
-         {
-           if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfile permissions:" + hfile);
-           prepFs.setPermission(hfile,PERM_ALL_ACCESS);
-           
-         }
-         //create _tmp dir used as temp space for Hfile processing
-         FileSystem.mkdirs(prepFs, new Path(hfam,"_tmp"), PERM_ALL_ACCESS);
-      }
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - bulk load started. Loading directly from preparation directory");
-      doSnapshotNBulkLoad(prepPath,tableName,  table,  loader,  snapshot);
-      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - bulk load is done ");
-    }
-    return true;
-  }
-
-  public boolean bulkLoadCleanup(String location) throws Exception
-  {
-      Path dir = new Path(location );
-      dir = dir.makeQualified(dir.toUri(), null);
-      FileSystem fs = FileSystem.get(dir.toUri(),config);
-      fs.delete(dir, true);
-      
-      return true;
-
-  }
-  
-  public boolean release( ) throws IOException {
-    if (writer != null)
-    {
-       writer.close();
-       writer = null;
-    }
-    if (fileSys !=null)
-    {
-      fileSys.close();
-      fileSys = null;
-    }
-    if (config != null) 
-    {
-      config = null;
-    }
-    if (hFileLocation != null)
-    {
-      hFileLocation = null;
-    }
-    if (hFileName != null)
-    {
-      hFileName = null;
-    }
-
-    if (compression != null)
-    {
-      compression = null;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HTableClient.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/HTableClient.h b/core/sql/executor/HTableClient.h
deleted file mode 100644
index 1a0faa6..0000000
--- a/core/sql/executor/HTableClient.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* DO NOT EDIT THIS FILE - it is machine generated */
-#include <jni.h>
-/* Header for class org_trafodion_sql_HBaseAccess_HTableClient */
-
-#ifndef _Included_org_trafodion_sql_HBaseAccess_HTableClient
-#define _Included_org_trafodion_sql_HBaseAccess_HTableClient
-#ifdef __cplusplus
-extern "C" {
-#endif
-#undef org_trafodion_sql_HBaseAccess_HTableClient_GET_ROW
-#define org_trafodion_sql_HBaseAccess_HTableClient_GET_ROW 1L
-#undef org_trafodion_sql_HBaseAccess_HTableClient_BATCH_GET
-#define org_trafodion_sql_HBaseAccess_HTableClient_BATCH_GET 2L
-#undef org_trafodion_sql_HBaseAccess_HTableClient_SCAN_FETCH
-#define org_trafodion_sql_HBaseAccess_HTableClient_SCAN_FETCH 3L
-/*
- * Class:     org_trafodion_sql_HBaseAccess_HTableClient
- * Method:    setResultInfo
- * Signature: (J[I[I[I[I[I[I[J[[B[[B[II)I
- */
-JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setResultInfo
-  (JNIEnv *, jobject, jlong, jintArray, jintArray, jintArray, jintArray, jintArray, jintArray, jlongArray, jobjectArray, jobjectArray, jintArray, jint, jint);
-
-/*
- * Class:     org_trafodion_sql_HBaseAccess_HTableClient
- * Method:    cleanup
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_cleanup
-  (JNIEnv *, jobject, jlong);
-
-/*
- * Class:     org_trafodion_sql_HBaseAccess_HTableClient
- * Method:    setJavaObject
- * Signature: (J)I
- */
-JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setJavaObject
-  (JNIEnv *, jobject, jlong);
-
-#ifdef __cplusplus
-}
-#endif
-#endif
-/* Header for class org_trafodion_sql_HBaseAccess_HTableClient_ScanHelper */
-
-#ifndef _Included_org_trafodion_sql_HBaseAccess_HTableClient_ScanHelper
-#define _Included_org_trafodion_sql_HBaseAccess_HTableClient_ScanHelper
-#ifdef __cplusplus
-extern "C" {
-#endif
-#ifdef __cplusplus
-}
-#endif
-#endif
-/* Header for class org_trafodion_sql_HBaseAccess_HTableClient_SnapshotScanHelper */
-
-#ifndef _Included_org_trafodion_sql_HBaseAccess_HTableClient_SnapshotScanHelper
-#define _Included_org_trafodion_sql_HBaseAccess_HTableClient_SnapshotScanHelper
-#ifdef __cplusplus
-extern "C" {
-#endif
-#ifdef __cplusplus
-}
-#endif
-#endif

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HTableClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HTableClient.java b/core/sql/executor/HTableClient.java
deleted file mode 100644
index a1bb00f..0000000
--- a/core/sql/executor/HTableClient.java
+++ /dev/null
@@ -1,1334 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-import org.trafodion.sql.HBaseAccess.*;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.NavigableSet;
-
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.nio.ByteBuffer;
-import java.nio.LongBuffer;
-import java.nio.ByteOrder;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
-import org.apache.hadoop.hbase.client.transactional.RMInterface;
-import org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient;
-import org.apache.hadoop.hbase.client.transactional.TransactionState;
-
-import org.apache.log4j.Logger;
-
-// H98 coprocessor needs
-import java.util.*;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
-import org.apache.hadoop.hbase.client.coprocessor.*;
-import org.apache.hadoop.hbase.coprocessor.*;
-import org.apache.hadoop.hbase.coprocessor.example.*;
-import org.apache.hadoop.hbase.ipc.*;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.*;
-import org.apache.hadoop.hbase.util.*;
-
-//import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
-import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
-import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
-
-// classes to do column value filtering
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.RandomRowFilter;
-
-import org.apache.hadoop.hbase.client.TableSnapshotScanner;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileUtil;
-import java.util.UUID;
-import java.security.InvalidParameterException;
-
-public class HTableClient {
-	private static final int GET_ROW = 1;
-	private static final int BATCH_GET = 2;
-	private static final int SCAN_FETCH = 3;
-	private boolean useTRex;
-	private boolean useTRexScanner;
-	private String tableName;
-
-	private ResultScanner scanner = null;
-        private ScanHelper scanHelper = null;
-	Result[] getResultSet = null;
-	String lastError;
-        RMInterface table = null;
-        ByteArrayList coprocAggrResult = null;
-        private boolean writeToWAL = false;
-	int numRowsCached = 1;
-	int numColsInScan = 0;
-	int[] kvValLen = null;
-	int[] kvValOffset = null;
-	int[] kvQualLen = null;
-	int[] kvQualOffset = null;
-	int[] kvFamLen = null;
-	int[] kvFamOffset = null;
-	long[] kvTimestamp = null;
-	byte[][] kvBuffer = null;
-	byte[][] rowIDs = null;
-	int[] kvsPerRow = null;
-        static ExecutorService executorService = null;
-        Future future = null;
-	boolean preFetch = false;
-	int fetchType = 0;
-	long jniObject = 0;
-	SnapshotScanHelper snapHelper = null;
-
-	 class SnapshotScanHelper
-	 {
-	   Path snapRestorePath = null;
-	   HBaseAdmin admin  = null;
-	   Configuration conf = null;
-	   SnapshotDescription snpDesc = null;
-	   String tmpLocation = null;
-	   FileSystem fs  = null;
-
-	   SnapshotScanHelper( Configuration cnfg , String tmpLoc, String snapName) 
-	       throws IOException
-	   {
-	     conf = cnfg;
-	     admin = new HBaseAdmin(conf);
-	     tmpLocation = tmpLoc;
-	     setSnapshotDescription(snapName);
-	     Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
-	     fs = rootDir.getFileSystem(conf);
-	     setSnapRestorePath();
-	   }
-
-	   String getTmpLocation()
-	   {
-	     return tmpLocation;
-	   }
-	   String getSnapshotName()
-	   {
-	     if (snpDesc == null)
-	       return null;
-	     return snpDesc.getName();
-	   }
-	   void setSnapRestorePath() throws IOException
-	   {
-	     String restoreDirStr = tmpLocation + getSnapshotDescription().getName(); ;
-	     snapRestorePath = new Path(restoreDirStr);
-	     snapRestorePath = snapRestorePath.makeQualified(fs.getUri(), snapRestorePath);
-	   }
-	   Path getSnapRestorePath() throws IOException
-	   {
-	     return snapRestorePath;
-	   }
-	   boolean snapshotExists() throws IOException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.snapshotExists() called. ");
-	     return !admin.listSnapshots(snpDesc.getName()).isEmpty();
-	   }
-	   void deleteSnapshot() throws IOException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot() called. ");
-	     if (snapshotExists())
-	     {
-	       admin.deleteSnapshot(snpDesc.getName());
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " deleted.");
-	     }
-	     else
-	     {
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " does not exist.");
-	     }
-	   }
-	   void deleteRestorePath() throws IOException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath() called. ");
-	     if (fs.exists(snapRestorePath))
-	     {
-	       fs.delete(snapRestorePath, true);
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath(). restorePath: " + snapRestorePath + " deleted.");
-	     }
-	     else
-	     {
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath(). restorePath: " + snapRestorePath  + " does not exist.");
-	     }
-	   }
-	   
-	   void createTableSnapshotScanner(int timeout, int slp, long nbre, Scan scan) throws InterruptedException
-	   {
-	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner() called. ");
-	     int xx=0;
-	     while (xx < timeout)
-	     {
-         xx++;
-	       scanner = null;
-	       try
-	       {
-	         scanner = new TableSnapshotScanner(table.getConfiguration(), snapHelper.getSnapRestorePath(), snapHelper.getSnapshotName(), scan);
-	       }
-	       catch(IOException e )
-	       {
-	         if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner(). espNumber: " + nbre  + 
-	             " snapshot " + snpDesc.getName() + " TableSnapshotScanner Exception :" + e);
-	         Thread.sleep(slp);
-	         continue;
-	       }
-	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner(). espNumber: " + 
-	           nbre + " snapshot " + snpDesc.getName() +  " TableSnapshotScanner Done - Scanner:" + scanner );
-	       break;
-	     }
-	   }
-	   void setSnapshotDescription( String snapName)
-	   {
-       if (snapName == null )
-         throw new InvalidParameterException ("snapshotName is null.");
-       
-	     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
-	     builder.setTable(Bytes.toString(table.getTableName()));
-	     builder.setName(snapName);
-	     builder.setType(SnapshotDescription.Type.FLUSH);
-	     snpDesc = builder.build();
-	   }
-	   SnapshotDescription getSnapshotDescription()
-	   {
-	     return snpDesc;
-	   }
-
-	   public void release() throws IOException
-	   {
-	     if (admin != null)
-	     {
-	       admin.close();
-	       admin = null;
-	     }
-	   }
-	 }
-
-	class ScanHelper implements Callable {
-            public Result[] call() throws Exception {
-                return scanner.next(numRowsCached);
-            }
-        }
-	 
-	static Logger logger = Logger.getLogger(HTableClient.class.getName());;
-
-        static public  byte[] getFamily(byte[] qc) {
-	   byte[] family = null;
-
-	   if (qc != null && qc.length > 0) {
-	       int pos = Bytes.indexOf(qc, (byte) ':');
-	       if (pos == -1) 
-	          family = Bytes.toBytes("cf1");
-	       else
-	          family = Arrays.copyOfRange(qc, 0, pos);
-           }	
-	   return family;
-	}
-
-        static public byte[] getName(byte[] qc) {
-	   byte[] name = null;
-
-	   if (qc != null && qc.length > 0) {
-	      int pos = Bytes.indexOf(qc, (byte) ':');
-	      if (pos == -1) 
-	         name = qc;
-	      else
-	         name = Arrays.copyOfRange(qc, pos + 1, qc.length);
-	   }	
-	   return name;
-	}
-
-	public boolean setWriteBufferSize(long writeBufferSize) throws IOException {
-		if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::setWriteBufferSize, size  : " + writeBufferSize);
-	    table.setWriteBufferSize(writeBufferSize);
-	    return true;
-	  }
-	 public long getWriteBufferSize() {
-		 if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::getWriteBufferSize, size return : " + table.getWriteBufferSize());
-		 return table.getWriteBufferSize();
-	 }
-	public boolean setWriteToWAL(boolean v) {
-		if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::setWriteToWALL, size  : " + v);
-	    writeToWAL = v;
-	    return true;
-	  }
- 
-	public boolean init(String tblName,
-              boolean useTRex) throws IOException 
-        {
-	    if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::init, tableName: " + tblName);
-	    this.useTRex = useTRex;
-	    tableName = tblName;
-	    
-	    if ( !this.useTRex ) {
-		this.useTRexScanner = false;
-	    }
-	    else {
-
-		// If the parameter useTRex is false, then do not go thru this logic
-
-		String useTransactions = System.getenv("USE_TRANSACTIONS");
-		if (useTransactions != null) {
-		    int lv_useTransactions = (Integer.parseInt(useTransactions));
-		    if (lv_useTransactions == 0) {
-			this.useTRex = false;
-		    }
-		}
-	    
-		this.useTRexScanner = true;
-		String useTransactionsScanner = System.getenv("USE_TRANSACTIONS_SCANNER");
-		if (useTransactionsScanner != null) {
-		    int lv_useTransactionsScanner = (Integer.parseInt(useTransactionsScanner));
-		    if (lv_useTransactionsScanner == 0) {
-			this.useTRexScanner = false;
-		    }
-		}
-	    }
-
-	    table = new RMInterface(tblName);
-	    if (logger.isDebugEnabled()) logger.debug("Exit HTableClient::init, table object: " + table);
-	    return true;
-	}
-
-	public String getLastError() {
-		String ret = lastError;
-		lastError = null;
-		return ret;
-	}
-
-	void setLastError(String err) {
-		lastError = err;
-	}
-
-	String getTableName() {
-		return tableName;
-	}
-
-	String getHTableName() {
-		if (table == null)
-			return null;
-		else
-			return new String(table.getTableName());
-	}
-
-	void resetAutoFlush() {
-		table.setAutoFlush(true, true);
-	}
-
-	public boolean startScan(long transID, byte[] startRow, byte[] stopRow,
-                                 Object[]  columns, long timestamp,
-                                 boolean cacheBlocks, int numCacheRows,
-                                 Object[] colNamesToFilter, 
-                                 Object[] compareOpList, 
-                                 Object[] colValuesToCompare,
-                                 float samplePercent,
-                                 boolean inPreFetch,
-                                 boolean useSnapshotScan,
-                                 int snapTimeout,
-                                 String snapName,
-                                 String tmpLoc,
-                                 int espNum,
-                                 int versions)
-	        throws IOException, Exception {
-	  if (logger.isTraceEnabled()) logger.trace("Enter startScan() " + tableName + " txid: " + transID+ " CacheBlocks: " + cacheBlocks + " numCacheRows: " + numCacheRows + " Bulkread: " + useSnapshotScan);
-
-	  Scan scan;
-
-	  if (startRow != null && startRow.toString() == "")
-	    startRow = null;
-	  if (stopRow != null && stopRow.toString() == "")
-	    stopRow = null;
-
-	  if (startRow != null && stopRow != null)
-	    scan = new Scan(startRow, stopRow);
-	  else
-	    scan = new Scan();
-
-          if (versions != 0)
-            {
-              if (versions == -1)
-                scan.setMaxVersions();
-              else if (versions == -2)
-                {
-                  scan.setMaxVersions();
-                  scan.setRaw(true);
-                  columns = null;
-                }
-              else if (versions > 0)
-               {
-                 scan.setMaxVersions(versions);
-               }
-           }
-
-          if (cacheBlocks == true) {
-              scan.setCacheBlocks(true);
-              // Disable block cache for full table scan
-              if (startRow == null && stopRow == null)
-                  scan.setCacheBlocks(false);
-          }
-	  else
-              scan.setCacheBlocks(false);
-          
-	  scan.setCaching(numCacheRows);
-	  numRowsCached = numCacheRows;
-	  if (columns != null) {
-	    numColsInScan = columns.length;
-	    for (int i = 0; i < columns.length ; i++) {
-	      byte[] col = (byte[])columns[i];
-	      scan.addColumn(getFamily(col), getName(col));
-	    }
-	  }
-	  else
-	    numColsInScan = 0;
-	  if (colNamesToFilter != null) {
-	    FilterList list = new FilterList(FilterList.Operator.MUST_PASS_ALL);
-
-	    for (int i = 0; i < colNamesToFilter.length; i++) {
-	      byte[] colName = (byte[])colNamesToFilter[i];
-	      byte[] coByte = (byte[])compareOpList[i];
-	      byte[] colVal = (byte[])colValuesToCompare[i];
-
-	      if ((coByte == null) || (colVal == null)) {
-	        return false;
-	      }
-
-	      String coStr = new String(coByte);
-	      CompareOp co = CompareOp.valueOf(coStr);
-
-	      SingleColumnValueFilter filter1 = 
-	          new SingleColumnValueFilter(getFamily(colName), getName(colName), 
-	              co, colVal);
-	      list.addFilter(filter1);
-	    }
-
-	    if (samplePercent > 0.0f)
-	      list.addFilter(new RandomRowFilter(samplePercent));
-	    scan.setFilter(list);
-	  } else if (samplePercent > 0.0f) {
-	    scan.setFilter(new RandomRowFilter(samplePercent));
-	  }
-
-	  if (!useSnapshotScan || transID != 0)
-	  {
-	    if (useTRexScanner && (transID != 0)) {
-	      scanner = table.getScanner(transID, scan);
-	    } else {
-	      scanner = table.getScanner(scan);
-	    }
-	    if (logger.isTraceEnabled()) logger.trace("startScan(). After getScanner. Scanner: " + scanner);
-	  }
-	  else
-	  {
-	    snapHelper = new SnapshotScanHelper(table.getConfiguration(), tmpLoc,snapName);
-
-	    if (logger.isTraceEnabled()) 
-	      logger.trace("[Snapshot Scan] HTableClient.startScan(). useSnapshotScan: " + useSnapshotScan + 
-	                   " espNumber: " + espNum + 
-	                   " tmpLoc: " + snapHelper.getTmpLocation() + 
-	                   " snapshot name: " + snapHelper.getSnapshotName());
-	    
-	    if (!snapHelper.snapshotExists())
-	      throw new Exception ("Snapshot " + snapHelper.getSnapshotName() + " does not exist.");
-
-	    snapHelper.createTableSnapshotScanner(snapTimeout, 5, espNum, scan);
-	    if (scanner==null)
-	      throw new Exception("Cannot create Table Snapshot Scanner");
-	  }
-    
-          if (useSnapshotScan)
-             preFetch = false;
-          else
-	     preFetch = inPreFetch;
-	  if (preFetch)
-	  {
-	    scanHelper = new ScanHelper(); 
-            future = executorService.submit(scanHelper);
-	  }
-          fetchType = SCAN_FETCH;
-	  if (logger.isTraceEnabled()) logger.trace("Exit startScan().");
-	  return true;
-	}
-
-	public int  startGet(long transID, byte[] rowID, 
-                     Object[] columns,
-		     long timestamp) throws IOException {
-
-	    if (logger.isTraceEnabled()) logger.trace("Enter startGet(" + tableName + 
-			     " #cols: " + ((columns == null) ? 0:columns.length ) +
-			     " rowID: " + new String(rowID));
-		fetchType = GET_ROW;
-		Get get = new Get(rowID);
-		if (columns != null)
-		{
-			for (int i = 0; i < columns.length; i++) {
-				byte[] col = (byte[]) columns[i];
-				get.addColumn(getFamily(col), getName(col));
-			}
-			numColsInScan = columns.length;
-		}
-		else
-			numColsInScan = 0;
-			
-		Result getResult;
-		if (useTRex && (transID != 0)) {
-			getResult = table.get(transID, get);
-		} else {
-			getResult = table.get(get);
-		}
-		if (getResult == null
-                    || getResult.isEmpty()) {
-                        setJavaObject(jniObject);
-			return 0;
-		}
-		if (logger.isTraceEnabled()) logger.trace("startGet, result: " + getResult);
-		pushRowsToJni(getResult);
-		return 1;
-
-	}
-
-	// The TransactionalTable class is missing the batch get operation,
-	// so work around it.
-	private Result[] batchGet(long transactionID, List<Get> gets)
-			throws IOException {
-		if (logger.isTraceEnabled()) logger.trace("Enter batchGet(multi-row) " + tableName);
-		Result [] results = new Result[gets.size()];
-		int i=0;
-		for (Get g : gets) {
-			Result r = table.get(transactionID, g);
-			results[i++] = r;
-		}
-		return results;
-	}
-
-	public int startGet(long transID, Object[] rows,
-			Object[] columns, long timestamp)
-                        throws IOException {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter startGet(multi-row) " + tableName);
-
-		List<Get> listOfGets = new ArrayList<Get>();
-		for (int i = 0; i < rows.length; i++) {
-			byte[] rowID = (byte[])rows[i]; 
-			Get get = new Get(rowID);
-			listOfGets.add(get);
-			if (columns != null)
-			{
-				for (int j = 0; j < columns.length; j++ ) {
-					byte[] col = (byte[])columns[j];
-					get.addColumn(getFamily(col), getName(col));
-				}
-			}
-		}
-		if (columns != null)
-			numColsInScan = columns.length;
-		else
-			numColsInScan = 0;
-		if (useTRex && (transID != 0)) {
-			getResultSet = batchGet(transID, listOfGets);
-                        fetchType = GET_ROW; 
-		} else {
-			getResultSet = table.get(listOfGets);
-			fetchType = BATCH_GET;
-		}
-		if (getResultSet != null && getResultSet.length > 0) {
-                	 pushRowsToJni(getResultSet);
-			return getResultSet.length;
-		}
-		else {
-			setJavaObject(jniObject);
-			return 0;
-		}
-	}
-
-	public int getRows(long transID, short rowIDLen, Object rowIDs,
-			Object[] columns)
-                        throws IOException {
-            
-		if (logger.isTraceEnabled()) logger.trace("Enter getRows " + tableName);
-
-		ByteBuffer bbRowIDs = (ByteBuffer)rowIDs;
-		List<Get> listOfGets = new ArrayList<Get>();
-		short numRows = bbRowIDs.getShort();
-		short actRowIDLen ;
-		byte rowIDSuffix;
-		byte[] rowID;
-
-		for (int i = 0; i < numRows; i++) {
-                        rowIDSuffix  = bbRowIDs.get();
-                        if (rowIDSuffix == '1')
-		           actRowIDLen = (short)(rowIDLen+1);
-                        else
-                           actRowIDLen = rowIDLen; 	
-			rowID = new byte[actRowIDLen];
-			bbRowIDs.get(rowID, 0, actRowIDLen);
-			Get get = new Get(rowID);
-			listOfGets.add(get);
-			if (columns != null) {
-				for (int j = 0; j < columns.length; j++ ) {
-					byte[] col = (byte[])columns[j];
-					get.addColumn(getFamily(col), getName(col));
-				}
-			}
-		}
-		if (columns != null)
-			numColsInScan = columns.length;
-		else
-			numColsInScan = 0;
-		if (useTRex && (transID != 0)) {
-			getResultSet = batchGet(transID, listOfGets);
-                        fetchType = GET_ROW; 
-		} else {
-			getResultSet = table.get(listOfGets);
-			fetchType = BATCH_GET;
-		}
-		if (getResultSet.length != numRows)
-                   throw new IOException("Number of rows retunred is not equal to requested number of rows");
- 		pushRowsToJni(getResultSet);
-		return getResultSet.length;
-	}
-
-	public int fetchRows() throws IOException, 
-			InterruptedException, ExecutionException {
-		int rowsReturned = 0;
-
-		if (logger.isTraceEnabled()) logger.trace("Enter fetchRows(). Table: " + tableName);
-		if (getResultSet != null)
-		{
-			rowsReturned = pushRowsToJni(getResultSet);
-			getResultSet = null;
-			return rowsReturned;
-		}
-		else
-		{
-			if (scanner == null) {
-				String err = "  fetchRows() called before scanOpen().";
-				logger.error(err);
-				setLastError(err);
-				return -1;
-			}
-			Result[] result = null;
-			if (preFetch)
-			{
-				result = (Result[])future.get();
-				rowsReturned = pushRowsToJni(result);
-				future = null;
-				if ((rowsReturned <= 0 || rowsReturned < numRowsCached))
-					return rowsReturned;
-                                future = executorService.submit(scanHelper);
-			}
-			else
-			{
-				result = scanner.next(numRowsCached);
-				rowsReturned = pushRowsToJni(result);
-			}
-			return rowsReturned;
-		}
-	}
-
-	protected int pushRowsToJni(Result[] result) 
-			throws IOException {
-		if (result == null || result.length == 0)
-			return 0; 
-		int rowsReturned = result.length;
-		int numTotalCells = 0;
-		if (numColsInScan == 0)
-		{
-			for (int i = 0; i < result.length; i++) {	
-				numTotalCells += result[i].size();
-			}
-		}
-		else
-		// There can be maximum of 2 versions per kv
-		// So, allocate place holder to keep cell info
-		// for that many KVs
-			numTotalCells = 2 * rowsReturned * numColsInScan;
-		int numColsReturned;
-		Cell[] kvList;
-		Cell kv;
-
-		if (kvValLen == null ||
-	 		(kvValLen != null && numTotalCells > kvValLen.length))
-		{
-			kvValLen = new int[numTotalCells];
-			kvValOffset = new int[numTotalCells];
-			kvQualLen = new int[numTotalCells];
-			kvQualOffset = new int[numTotalCells];
-			kvFamLen = new int[numTotalCells];
-			kvFamOffset = new int[numTotalCells];
-			kvTimestamp = new long[numTotalCells];
-			kvBuffer = new byte[numTotalCells][];
-		}
-               
-		if (rowIDs == null || (rowIDs != null &&
-				rowsReturned > rowIDs.length))
-		{
-			rowIDs = new byte[rowsReturned][];
-			kvsPerRow = new int[rowsReturned];
-		}
-		int cellNum = 0;
-		boolean colFound = false;
-		for (int rowNum = 0; rowNum < rowsReturned ; rowNum++)
-		{
-			rowIDs[rowNum] = result[rowNum].getRow();
-			kvList = result[rowNum].rawCells();
-			numColsReturned = kvList.length;
-			if ((cellNum + numColsReturned) > numTotalCells)
-				throw new IOException("Insufficient cell array pre-allocated");
-			kvsPerRow[rowNum] = numColsReturned;
-			for (int colNum = 0 ; colNum < numColsReturned ; colNum++, cellNum++)
-			{ 
-				kv = kvList[colNum];
-				kvValLen[cellNum] = kv.getValueLength();
-				kvValOffset[cellNum] = kv.getValueOffset();
-				kvQualLen[cellNum] = kv.getQualifierLength();
-				kvQualOffset[cellNum] = kv.getQualifierOffset();
-				kvFamLen[cellNum] = kv.getFamilyLength();
-				kvFamOffset[cellNum] = kv.getFamilyOffset();
-				kvTimestamp[cellNum] = kv.getTimestamp();
-				kvBuffer[cellNum] = kv.getValueArray();
-				colFound = true;
-			}
-		}
-		int cellsReturned;
-		if (colFound)
-                	cellsReturned = cellNum++;
-		else
-			cellsReturned = 0;
-		if (cellsReturned == 0)
-			setResultInfo(jniObject, null, null,
-				null, null, null, null,
-				null, null, rowIDs, kvsPerRow, cellsReturned, rowsReturned);
-		else 
-			setResultInfo(jniObject, kvValLen, kvValOffset,
-				kvQualLen, kvQualOffset, kvFamLen, kvFamOffset,
-				kvTimestamp, kvBuffer, rowIDs, kvsPerRow, cellsReturned, rowsReturned);
-		return rowsReturned;	
-	}		
-	
-	protected int pushRowsToJni(Result result) 
-			throws IOException {
-		int rowsReturned = 1;
-		int numTotalCells;
-		if (numColsInScan == 0)
-			numTotalCells = result.size();
-		else
-		// There can be maximum of 2 versions per kv
-		// So, allocate place holder to keep cell info
-		// for that many KVs
-			numTotalCells = 2 * rowsReturned * numColsInScan;
-		int numColsReturned;
-		Cell[] kvList;
-		Cell kv;
-
-		if (kvValLen == null ||
-	 		(kvValLen != null && numTotalCells > kvValLen.length))
-		{
-			kvValLen = new int[numTotalCells];
-			kvValOffset = new int[numTotalCells];
-			kvQualLen = new int[numTotalCells];
-			kvQualOffset = new int[numTotalCells];
-			kvFamLen = new int[numTotalCells];
-			kvFamOffset = new int[numTotalCells];
-			kvTimestamp = new long[numTotalCells];
-			kvBuffer = new byte[numTotalCells][];
-		}
-		if (rowIDs == null)
-		{
-			rowIDs = new byte[rowsReturned][];
-			kvsPerRow = new int[rowsReturned];
-		}
-		kvList = result.rawCells();
- 		if (kvList == null)
-			numColsReturned = 0; 
-		else
-			numColsReturned = kvList.length;
-		if ((numColsReturned) > numTotalCells)
-			throw new IOException("Insufficient cell array pre-allocated");
- 		rowIDs[0] = result.getRow();
-		kvsPerRow[0] = numColsReturned;
-		for (int colNum = 0 ; colNum < numColsReturned ; colNum++)
-		{ 
-			kv = kvList[colNum];
-			kvValLen[colNum] = kv.getValueLength();
-			kvValOffset[colNum] = kv.getValueOffset();
-			kvQualLen[colNum] = kv.getQualifierLength();
-			kvQualOffset[colNum] = kv.getQualifierOffset();
-			kvFamLen[colNum] = kv.getFamilyLength();
-			kvFamOffset[colNum] = kv.getFamilyOffset();
-			kvTimestamp[colNum] = kv.getTimestamp();
-			kvBuffer[colNum] = kv.getValueArray();
-		}
-		if (numColsReturned == 0)
-			setResultInfo(jniObject, null, null,
-				null, null, null, null,
-				null, null, rowIDs, kvsPerRow, numColsReturned, rowsReturned);
-		else
-			setResultInfo(jniObject, kvValLen, kvValOffset,
-				kvQualLen, kvQualOffset, kvFamLen, kvFamOffset,
-				kvTimestamp, kvBuffer, rowIDs, kvsPerRow, numColsReturned, rowsReturned);
-		return rowsReturned;	
-	}		
-	
-	public boolean deleteRow(final long transID, byte[] rowID, 
-				 Object[] columns,
-				 long timestamp,
-                                 boolean asyncOperation) throws IOException {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter deleteRow(" + new String(rowID) + ", "
-			     + timestamp + ") " + tableName);
-
-		final Delete del;
-		if (timestamp == -1)
-			del = new Delete(rowID);
-		else
-			del = new Delete(rowID, timestamp);
-
-		if (columns != null) {
-			for (int i = 0; i < columns.length ; i++) {
-				byte[] col = (byte[]) columns[i];
-				del.deleteColumns(getFamily(col), getName(col));
-			}
-		}
-               	if (asyncOperation) {
-			future = executorService.submit(new Callable() {
- 				public Object call() throws Exception {
-					boolean res = true;
-					if (useTRex && (transID != 0)) 
-				           table.delete(transID, del);
-				        else
-				           table.delete(del);
-				        return new Boolean(res);
-				}
-			});
-			return true;
-		}
-		else {
-	          	if (useTRex && (transID != 0)) 
-				table.delete(transID, del);
-			else
-				table.delete(del);
-		}
-		if (logger.isTraceEnabled()) logger.trace("Exit deleteRow");
-		return true;
-	}
-
-	public boolean deleteRows(final long transID, short rowIDLen, Object rowIDs,
-		      long timestamp,
-                      boolean asyncOperation) throws IOException {
-
-	        if (logger.isTraceEnabled()) logger.trace("Enter deleteRows() " + tableName);
-
-		final List<Delete> listOfDeletes = new ArrayList<Delete>();
-		listOfDeletes.clear();
-		ByteBuffer bbRowIDs = (ByteBuffer)rowIDs;
-		short numRows = bbRowIDs.getShort();
-                byte[] rowID;		
-		byte rowIDSuffix;
-		short actRowIDLen;
-       
-		for (short rowNum = 0; rowNum < numRows; rowNum++) {
-                        rowIDSuffix  = bbRowIDs.get();
-                        if (rowIDSuffix == '1')
-		           actRowIDLen = (short)(rowIDLen+1);
-                        else
-                           actRowIDLen = rowIDLen; 	
-			rowID = new byte[actRowIDLen];
-			bbRowIDs.get(rowID, 0, actRowIDLen);
-
-			Delete del;
-			if (timestamp == -1)
-			    del = new Delete(rowID);
-			else
-			    del = new Delete(rowID, timestamp);
-			listOfDeletes.add(del);
-		}
-                if (asyncOperation) {
-                        future = executorService.submit(new Callable() {
-                                public Object call() throws Exception {
-                                    boolean res = true;
-				   if (useTRex && (transID != 0)) 
-				      table.delete(transID, listOfDeletes);
-				   else
-				      table.delete(listOfDeletes);
-				   return new Boolean(res);
-				}
-			});
-			return true;
-		}
-		else {
-			if (useTRex && (transID != 0)) 
-		    	   table.delete(transID, listOfDeletes);
-			else
-		  	   table.delete(listOfDeletes);
-		}
-		if (logger.isTraceEnabled()) logger.trace("Exit deleteRows");
-		return true;
-	}
-
-         public byte[] intToByteArray(int value) {
-	     return new byte[] {
-		 (byte)(value >>> 24),
-		 (byte)(value >>> 16),
-		 (byte)(value >>> 8),
-		 (byte)value};
-	 }
-    
-	public boolean checkAndDeleteRow(long transID, byte[] rowID, 
-					 byte[] columnToCheck, byte[] colValToCheck,
-					 long timestamp) throws IOException {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter checkAndDeleteRow(" + new String(rowID) + ", "
-			     + new String(columnToCheck) + ", " + new String(colValToCheck) + ", " + timestamp + ") " + tableName);
-
-			Delete del;
-			if (timestamp == -1)
-				del = new Delete(rowID);
-			else
-				del = new Delete(rowID, timestamp);
-
-			byte[] family = null;
-			byte[] qualifier = null;
-
-			if (columnToCheck.length > 0) {
-				family = getFamily(columnToCheck);
-				qualifier = getName(columnToCheck);
-			}
-			
-			boolean res;
-			if (useTRex && (transID != 0)) {
-			    res = table.checkAndDelete(transID, rowID, family, qualifier, colValToCheck, del);
-			} else {
-			    res = table.checkAndDelete(rowID, family, qualifier, colValToCheck, del);
-			}
-
-			if (res == false)
-			    return false;
-		return true;
-	}
-
-	public boolean putRow(final long transID, final byte[] rowID, Object row,
-		byte[] columnToCheck, final byte[] colValToCheck,
-		final boolean checkAndPut, boolean asyncOperation) throws IOException, InterruptedException, 
-                          ExecutionException 
-	{
-		if (logger.isTraceEnabled()) logger.trace("Enter putRow() " + tableName);
-
-	 	final Put put;
-		ByteBuffer bb;
-		short numCols;
-		short colNameLen;
-                int colValueLen;
-		byte[] family = null;
-		byte[] qualifier = null;
-		byte[] colName, colValue;
-
-		bb = (ByteBuffer)row;
-		put = new Put(rowID);
-		numCols = bb.getShort();
-		for (short colIndex = 0; colIndex < numCols; colIndex++)
-		{
-			colNameLen = bb.getShort();
-			colName = new byte[colNameLen];
-			bb.get(colName, 0, colNameLen);
-			colValueLen = bb.getInt();	
-			colValue = new byte[colValueLen];
-			bb.get(colValue, 0, colValueLen);
-			put.add(getFamily(colName), getName(colName), colValue); 
-			if (checkAndPut && colIndex == 0) {
-				family = getFamily(colName);
-				qualifier = getName(colName);
-			} 
-		}
-		if (columnToCheck != null && columnToCheck.length > 0) {
-			family = getFamily(columnToCheck);
-			qualifier = getName(columnToCheck);
-		}
-		final byte[] family1 = family;
-		final byte[] qualifier1 = qualifier;
-		if (asyncOperation) {
-			future = executorService.submit(new Callable() {
-				public Object call() throws Exception {
-					boolean res = true;
-
-					if (checkAndPut) {
-		    				if (useTRex && (transID != 0)) 
-							res = table.checkAndPut(transID, rowID, 
-								family1, qualifier1, colValToCheck, put);
-		    				else 
-							res = table.checkAndPut(rowID, 
-								family1, qualifier1, colValToCheck, put);
-					}
-					else {
-		    				if (useTRex && (transID != 0)) 
-							table.put(transID, put);
-		    				else 
-							table.put(put);
-					}
-					return new Boolean(res);
-				}
-			});
-			return true;
-		} else {
-		 	boolean result = true;
-			if (checkAndPut) {
-		    		if (useTRex && (transID != 0)) 
-					result = table.checkAndPut(transID, rowID, 
-						family1, qualifier1, colValToCheck, put);
-		   		else 
-					result = table.checkAndPut(rowID, 
-						family1, qualifier1, colValToCheck, put);
-			}
-			else {
-		    		if (useTRex && (transID != 0)) 
-					table.put(transID, put);
-		    		else 
-					table.put(put);
-			}
-			return result;
-		}	
-	}
-
-	public boolean insertRow(long transID, byte[] rowID, 
-                         Object row, 
-			 long timestamp,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
-		return putRow(transID, rowID, row, null, null, 
-				false, asyncOperation);
-	}
-
-	public boolean putRows(final long transID, short rowIDLen, Object rowIDs, 
-                       Object rows,
-                       long timestamp, boolean autoFlush, boolean asyncOperation)
-			throws IOException, InterruptedException, ExecutionException  {
-
-		if (logger.isTraceEnabled()) logger.trace("Enter putRows() " + tableName);
-
-		Put put;
-		ByteBuffer bbRows, bbRowIDs;
-		short numCols, numRows;
-		short colNameLen;
-                int colValueLen;
-		byte[] colName, colValue, rowID;
-		byte rowIDSuffix;
-                short actRowIDLen;
-		bbRowIDs = (ByteBuffer)rowIDs;
-		bbRows = (ByteBuffer)rows;
-
-		final List<Put> listOfPuts = new ArrayList<Put>();
-		numRows = bbRowIDs.getShort();
-		
-		for (short rowNum = 0; rowNum < numRows; rowNum++) {
-                        rowIDSuffix  = bbRowIDs.get();
-                        if (rowIDSuffix == '1')
-		           actRowIDLen = (short)(rowIDLen+1);
-                        else
-                           actRowIDLen = rowIDLen; 	
-			rowID = new byte[actRowIDLen];
-			bbRowIDs.get(rowID, 0, actRowIDLen);
-			put = new Put(rowID);
-			numCols = bbRows.getShort();
-			for (short colIndex = 0; colIndex < numCols; colIndex++)
-			{
-				colNameLen = bbRows.getShort();
-				colName = new byte[colNameLen];
-				bbRows.get(colName, 0, colNameLen);
-				colValueLen = bbRows.getInt();	
-				colValue = new byte[colValueLen];
-				bbRows.get(colValue, 0, colValueLen);
-				put.add(getFamily(colName), getName(colName), colValue); 
-			}
-			if (writeToWAL)  
-				put.setWriteToWAL(writeToWAL);
-			listOfPuts.add(put);
-		}
-		if (autoFlush == false)
-			table.setAutoFlush(false, true);
-		if (asyncOperation) {
-			future = executorService.submit(new Callable() {
-				public Object call() throws Exception {
-					boolean res = true;
-					if (useTRex && (transID != 0)) 
-						table.put(transID, listOfPuts);
-					else 
-						table.put(listOfPuts);
-					return new Boolean(res);
-				}
-			});
-		}
-		else {
-			if (useTRex && (transID != 0)) 
-				table.put(transID, listOfPuts);
-			else 
-				table.put(listOfPuts);
-		}
-		return true;
-	} 
-
-	public boolean completeAsyncOperation(int timeout, boolean resultArray[]) 
-			throws InterruptedException, ExecutionException
-	{
-		if (timeout == -1) {
-			if (! future.isDone()) 
-				return false;
-		}
-	 	try {			
-			Boolean result = (Boolean)future.get(timeout, TimeUnit.MILLISECONDS);
-                        // Need to enhance to return the result 
-                        // for each Put object
-			for (int i = 0; i < resultArray.length; i++)
-			    resultArray[i] = result.booleanValue();
-			future = null;
- 		} catch(TimeoutException te) {
-			return false;
-		} 
-		return true;
-	}
-
-	public boolean checkAndInsertRow(long transID, byte[] rowID, 
-                         Object row, 
-			 long timestamp,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException  {
-		return putRow(transID, rowID, row, null, null, 
-				true, asyncOperation);
-	}
-
-	public boolean checkAndUpdateRow(long transID, byte[] rowID, 
-             Object columns, byte[] columnToCheck, byte[] colValToCheck,
-             long timestamp, boolean asyncOperation) throws IOException, InterruptedException, 
-                                    ExecutionException, Throwable  {
-		return putRow(transID, rowID, columns, columnToCheck, 
-			colValToCheck, 
-				true, asyncOperation);
-	}
-
-        public byte[] coProcAggr(long transID, int aggrType, 
-		byte[] startRowID, 
-              byte[] stopRowID, byte[] colFamily, byte[] colName, 
-              boolean cacheBlocks, int numCacheRows) 
-                          throws IOException, Throwable {
-
-		    Configuration customConf = table.getConfiguration();
-                    long rowCount = 0;
-
-                    if (transID > 0) {
-		      TransactionalAggregationClient aggregationClient = 
-                          new TransactionalAggregationClient(customConf);
-		      Scan scan = new Scan();
-		      scan.addFamily(colFamily);
-		      scan.setCacheBlocks(false);
-		      final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
-			new LongColumnInterpreter();
-		      byte[] tname = getTableName().getBytes();
-		      rowCount = aggregationClient.rowCount(transID, 
-                        org.apache.hadoop.hbase.TableName.valueOf(getTableName()),
-                        ci,
-                        scan);
-                    }
-                    else {
-		      AggregationClient aggregationClient = 
-                          new AggregationClient(customConf);
-		      Scan scan = new Scan();
-		      scan.addFamily(colFamily);
-		      scan.setCacheBlocks(false);
-		      final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
-			new LongColumnInterpreter();
-		      byte[] tname = getTableName().getBytes();
-		      rowCount = aggregationClient.rowCount( 
-                        org.apache.hadoop.hbase.TableName.valueOf(getTableName()),
-                        ci,
-                        scan);
-                    }
-
-		    coprocAggrResult = new ByteArrayList();
-
-		    byte[] rcBytes = 
-                      ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(rowCount).array();
-                    return rcBytes; 
-	}
-
-	public boolean flush() throws IOException {
-		if (table != null)
-			table.flushCommits();
-		return true;
-	}
-
-	public boolean release(boolean cleanJniObject) throws IOException {
-
-           boolean retcode = false;
-          // Complete the pending IO
-           if (future != null) {
-              try {
-                 future.get(30, TimeUnit.SECONDS);
-              } catch(TimeoutException | InterruptedException e) {
-		  logger.error("Asynchronous Thread is Cancelled, " + e);
-                  retcode = true;
-                  future.cancel(true); // Interrupt the thread
-              } catch (ExecutionException ee)
-              {
-              }
-              future = null;
-          }
-	  if (table != null)
-	    table.flushCommits();
-	  if (scanner != null) {
-	    scanner.close();
-	    scanner = null;
-	  }
-	  if (snapHelper !=null)
-	  {
-	    snapHelper.release();
-	    snapHelper = null;
-	  }
-	  cleanScan();		
-	  getResultSet = null;
-	  if (cleanJniObject) {
-	    if (jniObject != 0)
-	      cleanup(jniObject);
-            tableName = null;
-	  }
-          scanHelper = null;
-	  jniObject = 0;
-	  return retcode;
-	}
-
-	public boolean close(boolean clearRegionCache, boolean cleanJniObject) throws IOException {
-           if (logger.isTraceEnabled()) logger.trace("Enter close() " + tableName);
-           if (table != null) 
-           {
-              if (clearRegionCache)
-              {
-                 HConnection connection = table.getConnection();
-                 connection.clearRegionCache(tableName.getBytes());
-              }
-              table.close();
-              table = null;
-           }
-           return true;
-	}
-
-	public ByteArrayList getEndKeys() throws IOException {
-	    if (logger.isTraceEnabled()) logger.trace("Enter getEndKeys() " + tableName);
-            ByteArrayList result = new ByteArrayList();
-            if (table == null) {
-                return null;
-            }
-            byte[][] htableResult = table.getEndKeys();
-
-            // transfer the HTable result to ByteArrayList
-            for (int i=0; i<htableResult.length; i++ ) {
-                if (logger.isTraceEnabled()) logger.trace("Inside getEndKeys(), result[i]: " + 
-                             htableResult[i]);
-                if (logger.isTraceEnabled()) logger.trace("Inside getEndKeys(), result[i]: " + 
-                             new String(htableResult[i]));
-                result.add(htableResult[i]);
-            }
-
-            if (logger.isTraceEnabled()) logger.trace("Exit getEndKeys(), result size: " + result.getSize());
-            return result;
-	}
-
-    public ByteArrayList getStartKeys() throws IOException {
-        if (logger.isTraceEnabled()) logger.trace("Enter getStartKeys() " + tableName);
-        ByteArrayList result = new ByteArrayList();
-        if (table == null) {
-            return null;
-        }
-        byte[][] htableResult = table.getStartKeys();
-
-        // transfer the HTable result to ByteArrayList
-        for (int i=0; i<htableResult.length; i++ ) {
-            if (logger.isTraceEnabled()) logger.trace("Inside getStartKeys(), result[i]: " + 
-                         htableResult[i]);
-            if (logger.isTraceEnabled()) logger.trace("Inside getStartKeys(), result[i]: " + 
-                         new String(htableResult[i]));
-            result.add(htableResult[i]);
-        }
-
-        if (logger.isTraceEnabled()) logger.trace("Exit getStartKeys(), result size: " + result.getSize());
-        return result;
-    }
-
-    private void cleanScan()
-    {
-        if (fetchType == GET_ROW || fetchType == BATCH_GET)
-           return;
-        numRowsCached = 1;
-        numColsInScan = 0;
-        kvValLen = null;
-        kvValOffset = null;
-        kvQualLen = null;
-        kvQualOffset = null;
-        kvFamLen = null;
-        kvFamOffset = null;
-        kvTimestamp = null;
-        kvBuffer = null;
-        rowIDs = null;
-        kvsPerRow = null;
-    }
-
-    protected void setJniObject(long inJniObject) {
-       jniObject = inJniObject;
-    }    
-
-    private native int setResultInfo(long jniObject,
-				int[] kvValLen, int[] kvValOffset,
-				int[] kvQualLen, int[] kvQualOffset,
-				int[] kvFamLen, int[] kvFamOffset,
-  				long[] timestamp, 
-				byte[][] kvBuffer, byte[][] rowIDs,
-				int[] kvsPerRow, int numCellsReturned,
-				int rowsReturned);
-
-   private native void cleanup(long jniObject);
-
-   protected native int setJavaObject(long jniObject);
- 
-   static {
-     executorService = Executors.newCachedThreadPool();
-     System.loadLibrary("executor");
-   }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HiveClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HiveClient.java b/core/sql/executor/HiveClient.java
deleted file mode 100755
index 5cedcc8..0000000
--- a/core/sql/executor/HiveClient.java
+++ /dev/null
@@ -1,301 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.ArrayList;
-import java.util.List;
-import java.lang.reflect.Field;
-
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import org.apache.hadoop.util.StringUtils;
-
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-// These are needed for the DDL_TIME constant. This class is different in Hive 0.10.
-// We use Java reflection instead of importing the class statically. 
-// For Hive 0.9 or lower
-// import org.apache.hadoop.hive.metastore.api.Constants;
-// For Hive 0.10 or higher
-// import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSDataOutputStream;
-
-import java.sql.SQLException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.Statement;
-import java.sql.DriverManager;
-
-
-public class HiveClient {
-    static Logger logger = Logger.getLogger(HiveClient.class.getName());
-    static String ddlTimeConst = null;
-    String lastError;
-    HiveConf hiveConf = null;
-    HiveMetaStoreClient hmsClient  ;
-    FSDataOutputStream fsOut = null;
-
-    public HiveClient() {
-   
-    }
-
-    public String getLastError() {
-        return lastError;
-    }
-
-    void setLastError(String err) {
-        lastError = err;
-    }
-
-    void setupLog4j() {
-        String confFile = System.getenv("MY_SQROOT")
-            + "/conf/log4j.hdfs.config";
-        PropertyConfigurator.configure(confFile);
-    }
-
-    public boolean init(String metastoreURI) 
-              throws MetaException {
-         setupLog4j();
-         if (logger.isDebugEnabled()) logger.debug("HiveClient.init(" + metastoreURI + " " + ") called.");
-         ddlTimeConst = getDDLTimeConstant();
-         hiveConf = new HiveConf();
-	 if (metastoreURI.length() > 0) {
-             hiveConf.set("hive.metastore.local", "false");
-             hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreURI);
-         }
-         hmsClient = new HiveMetaStoreClient(hiveConf, null);
-         return true;
-    }
-
-    public boolean close() {
-        hmsClient.close();
-        return true;
-    }
-
-    public boolean exists(String schName, String tblName)  
-        throws MetaException, TException, UnknownDBException {
-            if (logger.isDebugEnabled()) logger.debug("HiveClient.exists(" + schName + " , " + tblName + ") called.");
-            boolean result = hmsClient.tableExists(schName, tblName);
-            return result;
-    }
-
-    public String getHiveTableString(String schName, String tblName)
-        throws MetaException, TException {
-        Table table;
-        if (logger.isDebugEnabled()) logger.debug("HiveClient.getHiveTableString(" + schName + " , " + 
-                     tblName + ") called.");
-        try {
-            table = hmsClient.getTable(schName, tblName);
-        }
-        catch (NoSuchObjectException x) {
-            if (logger.isDebugEnabled()) logger.debug("HiveTable not found");
-            return new String("");
-        }
-        if (logger.isDebugEnabled()) logger.debug("HiveTable is " + table.toString());
-        return table.toString() ;
-    }
-
-    public long getRedefTime(String schName, String tblName)
-        throws MetaException, TException, ClassCastException, NullPointerException, NumberFormatException {
-        Table table;
-        if (logger.isDebugEnabled()) logger.debug("HiveClient.getRedefTime(" + schName + " , " + 
-                     tblName + ") called.");
-        try {
-            table = hmsClient.getTable(schName, tblName);
-            if (logger.isDebugEnabled()) logger.debug("getTable returns null for " + schName + "." + tblName + ".");
-            if (table == null)
-                return 0;
-        }
-        catch (NoSuchObjectException x) {
-            if (logger.isDebugEnabled()) logger.debug("Hive table no longer exists.");
-            return 0;
-        }
-
-        long redefTime = table.getCreateTime();
-        if (table.getParameters() != null){
-            // those would be used without reflection
-            //String rfTime = table.getParameters().get(Constants.DDL_TIME);
-            //String rfTime = table.getParameters().get(hive_metastoreConstants.DDL_TIME);
-            // determing the constant using reflection instead
-            String rfTime = table.getParameters().get(ddlTimeConst);
-            if (rfTime != null)
-                redefTime = Long.parseLong(rfTime);
-        }
-        if (logger.isDebugEnabled()) logger.debug("RedefTime is " + redefTime);
-        return redefTime ;
-    }
-
-    public Object[] getAllSchemas() throws MetaException {
-        List<String> schemaList = (hmsClient.getAllDatabases());
-        if (schemaList != null)
-           return schemaList.toArray();
-        else
-           return null; 
-    }
-
-    public Object[] getAllTables(String schName) 
-        throws MetaException {
-        List<String> tableList = hmsClient.getAllTables(schName);
-        if (tableList != null)
-           return tableList.toArray();
-        else
-           return null;
-    }
-
-    // Because Hive changed the name of the class containing internal constants changed
-    // in Hive 0.10, we are using Java Reflection to get the value of the DDL_TIME constant.
-    public static String getDDLTimeConstant()
-        throws MetaException {
-
-        Class constsClass = null;
-        Object constsFromReflection = null; 
-        Field ddlTimeField = null;
-        Object fieldVal = null;
-
-        // Using the class loader, try to load either class by name.
-        // Note that both classes have a default constructor and both have a static
-        // String field DDL_TIME, so the rest of the code is the same for both.
-        try { 
-            try {
-                constsClass = Class.forName(
-                   // Name in Hive 0.10 and higher
-                   "org.apache.hadoop.hive.metastore.api.hive_metastoreConstants");
-            } catch (ClassNotFoundException e) { 
-                // probably not found because we are using Hive 0.10 or later
-                constsClass = null;
-            } 
-            if (constsClass == null) {
-                constsClass = Class.forName(
-                    // Name in Hive 0.9 and lower
-                    "org.apache.hadoop.hive.metastore.api.Constants");
-            }
-
-            // Make a new object for this class, using the default constructor
-            constsFromReflection = constsClass.newInstance(); 
-        } catch (InstantiationException e) { 
-            throw new MetaException("Instantiation error for metastore constants class");
-        } catch (IllegalAccessException e) { 
-            throw new MetaException("Illegal access exception");
-        } catch (ClassNotFoundException e) { 
-            throw new MetaException("Could not find Hive Metastore constants class");
-        } 
-
-        // Using Java reflection, get a reference to the DDL_TIME field
-        try {
-            ddlTimeField = constsClass.getField("DDL_TIME");
-        } catch (NoSuchFieldException e) {
-            throw new MetaException("Could not find DDL_TIME constant field");
-        }
-
-        // get the String object that represents the value of this field
-        try {
-            fieldVal = ddlTimeField.get(constsFromReflection);
-        } catch (IllegalAccessException e) {
-            throw new MetaException("Could not get value for DDL_TIME constant field");
-        }
-
-        return fieldVal.toString();
-    }
-
-  ///////////////////   
-  boolean hdfsCreateFile(String fname) throws IOException
-  {
-    HiveConf  config = new HiveConf();
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsCreateFile() - started" );
-    Path filePath = new Path(fname);
-    FileSystem fs = FileSystem.get(filePath.toUri(),config);
-    fsOut = fs.create(filePath, true);
-    
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsCreateFile() - file created" );
-
-    return true;
-  }
-  
-  boolean hdfsWrite(byte[] buff, long len) throws Exception
-  {
-
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() - started" );
-    try
-    {
-      fsOut.write(buff);
-      fsOut.flush();
-    }
-    catch (Exception e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() -- exception: " + e);
-      throw e;
-    }
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() - bytes written and flushed:" + len  );
-    
-    return true;
-  }
-  
-  boolean hdfsClose() throws IOException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsClose() - started" );
-    try
-    {
-      fsOut.close();
-    }
-    catch (IOException e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsClose() - exception:" + e);
-      throw e;
-    }
-    return true;
-  }
-  
-  public void executeHiveSQL(String ddl) throws ClassNotFoundException, SQLException
-  {
-      try
-      {
-          Class.forName("org.apache.hive.jdbc.HiveDriver");
-      }
- 
-      catch(ClassNotFoundException e) 
-      {
-          throw e;
-      }
-
-      try 
-      {
-          Connection con = DriverManager.getConnection("jdbc:hive2://", "hive", "");
-          Statement stmt = con.createStatement();
-          stmt.execute(ddl);
-      }
- 
-      catch(SQLException e)
-      {
-	  throw e;
-      }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/OrcFileReader.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/OrcFileReader.cpp b/core/sql/executor/OrcFileReader.cpp
index 25a431a..9dfafd5 100644
--- a/core/sql/executor/OrcFileReader.cpp
+++ b/core/sql/executor/OrcFileReader.cpp
@@ -69,7 +69,7 @@ OrcFileReader::~OrcFileReader()
 //////////////////////////////////////////////////////////////////////////////
 OFR_RetCode OrcFileReader::init()
 {
-  static char className[]="org/apache/hadoop/hive/ql/io/orc/OrcFileReader";
+  static char className[]="org/trafodion/sql/OrcFileReader";
   
   if (JavaMethods_)
     return (OFR_RetCode)JavaObjectInterface::init(className, javaClass_, JavaMethods_, (Int32)JM_LAST, TRUE);       
@@ -102,7 +102,7 @@ OFR_RetCode OrcFileReader::init()
 //    JavaMethods_[JM_FETCHROW2 ].jm_name      = "fetchNextRow";
 //    JavaMethods_[JM_FETCHROW2 ].jm_signature = "()[B";
     JavaMethods_[JM_FETCHROW2 ].jm_name      = "fetchNextRowObj";
-    JavaMethods_[JM_FETCHROW2 ].jm_signature = "()Lorg/apache/hadoop/hive/ql/io/orc/OrcFileReader$OrcRowReturnSQL;";
+    JavaMethods_[JM_FETCHROW2 ].jm_signature = "()Lorg/trafodion/sql/OrcFileReader$OrcRowReturnSQL;";
     JavaMethods_[JM_GETNUMROWS ].jm_name      = "getNumberOfRows";
     JavaMethods_[JM_GETNUMROWS ].jm_signature = "()J";
 //    JavaMethods_[JM_FETCHBUFF1].jm_name      = "fetchArrayOfRows";


[4/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Posted by db...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java b/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
new file mode 100644
index 0000000..36c4e05
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/HBaseClient.java
@@ -0,0 +1,1596 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import com.google.protobuf.ServiceException;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.NavigableMap;
+import java.util.Map;
+import java.util.Arrays;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.log4j.PropertyConfigurator;
+import org.apache.log4j.Logger;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.transactional.RMInterface;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PoolMap;
+import org.apache.hadoop.hbase.util.PoolMap.PoolType;
+import org.apache.hadoop.hbase.security.access.AccessController;
+import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+//import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+//import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.BloomType; 
+//import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType ;
+import org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy;
+import org.apache.hadoop.hbase.client.Durability;
+import org.trafodion.sql.HTableClient;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ServerName;
+
+import java.util.concurrent.ExecutionException;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.DtmConst;
+import org.apache.commons.codec.binary.Hex;
+
+import com.google.protobuf.ServiceException;
+
+public class HBaseClient {
+
+    static Logger logger = Logger.getLogger(HBaseClient.class.getName());
+    public static Configuration config = HBaseConfiguration.create();
+    String lastError;
+    RMInterface table = null;
+
+    private PoolMap<String, HTableClient> hTableClientsFree;
+    private PoolMap<String, HTableClient> hTableClientsInUse;
+    // this set of constants MUST be kept in sync with the C++ enum in
+    // ExpHbaseDefs.h
+    public static final int HBASE_NAME = 0;
+    public static final int HBASE_MAX_VERSIONS = 1;
+    public static final int HBASE_MIN_VERSIONS = 2;
+    public static final int HBASE_TTL = 3;
+    public static final int HBASE_BLOCKCACHE = 4;
+    public static final int HBASE_IN_MEMORY = 5;
+    public static final int HBASE_COMPRESSION = 6;
+    public static final int HBASE_BLOOMFILTER = 7;
+    public static final int HBASE_BLOCKSIZE = 8;
+    public static final int HBASE_DATA_BLOCK_ENCODING = 9;
+    public static final int HBASE_CACHE_BLOOMS_ON_WRITE = 10;
+    public static final int HBASE_CACHE_DATA_ON_WRITE = 11;
+    public static final int HBASE_CACHE_INDEXES_ON_WRITE = 12;
+    public static final int HBASE_COMPACT_COMPRESSION = 13;
+    public static final int HBASE_PREFIX_LENGTH_KEY = 14;
+    public static final int HBASE_EVICT_BLOCKS_ON_CLOSE = 15;
+    public static final int HBASE_KEEP_DELETED_CELLS = 16;
+    public static final int HBASE_REPLICATION_SCOPE = 17;
+    public static final int HBASE_MAX_FILESIZE = 18;
+    public static final int HBASE_COMPACT = 19;
+    public static final int HBASE_DURABILITY = 20;
+    public static final int HBASE_MEMSTORE_FLUSH_SIZE = 21;
+    public static final int HBASE_SPLIT_POLICY = 22;
+
+    
+    public HBaseClient() {
+      if (hTableClientsFree == null)
+         hTableClientsFree = new PoolMap<String, HTableClient>
+                 (PoolType.Reusable, Integer.MAX_VALUE);
+      hTableClientsInUse = new PoolMap<String, HTableClient>
+               (PoolType.Reusable, Integer.MAX_VALUE);
+    }
+
+    public String getLastError() {
+        return lastError;
+    }
+
+    void setLastError(String err) {
+        lastError = err;
+    }
+
+    static {
+    	//Some clients of this class e.g., DcsServer/JdbcT2 
+    	//want to use use their own log4j.properties file instead
+    	//of the /conf/lo4j.hdf.config so they can see their
+    	//log events in their own log files or console.
+    	//So, check for alternate log4j.properties otherwise
+    	//use the default HBaseClient config.
+    	String confFile = System.getProperty("hbaseclient.log4j.properties");
+    	if(confFile == null) {
+    		System.setProperty("trafodion.hdfs.log", System.getenv("MY_SQROOT") + "/logs/trafodion.hdfs.log");
+    		confFile = System.getenv("MY_SQROOT") + "/conf/log4j.hdfs.config";
+    	}
+    	PropertyConfigurator.configure(confFile);
+    }
+
+    public boolean init(String zkServers, String zkPort) 
+	throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
+    {
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.init(" + zkServers + ", " + zkPort
+                         + ") called.");
+        HBaseAdmin.checkHBaseAvailable(config);
+
+        try {
+            table = new RMInterface();
+        } catch (Exception e) {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.init: Error in RMInterface instace creation.");
+        }
+        
+        return true;
+    }
+ 
+    private void  cleanup(PoolMap hTableClientsPool) throws IOException
+    {
+       Collection hTableClients;
+       Iterator<HTableClient> iter;
+       HTableClient htable;
+       boolean clearRegionCache = false;
+       boolean cleanJniObject = true;
+
+       hTableClients = hTableClientsPool.values();
+       iter = hTableClients.iterator();
+       while (iter.hasNext())
+       {
+         htable = iter.next();
+         htable.close(clearRegionCache, cleanJniObject);          
+       }
+       hTableClientsPool.clear();
+    }
+
+    public boolean cleanup() throws IOException {
+       cleanup(hTableClientsInUse);
+       cleanup(hTableClientsFree);
+       return true;
+    }
+
+   public void cleanupCache(Collection hTableClients) throws IOException
+    {
+       Iterator<HTableClient> iter;
+       HTableClient htable;
+       boolean clearRegionCache = true;
+       boolean cleanJniObject = false;
+ 
+       iter = hTableClients.iterator();
+       while (iter.hasNext())
+       {
+          htable = iter.next();
+          htable.close(clearRegionCache, cleanJniObject);     
+       }
+    }
+
+    public boolean cleanupCache(String tblName) throws IOException
+    {
+       Collection hTableClients;
+       hTableClients = hTableClientsFree.values(tblName);
+       cleanupCache(hTableClients);  
+       hTableClientsFree.remove(tblName);
+       hTableClients = hTableClientsInUse.values(tblName);
+       cleanupCache(hTableClients);  
+       hTableClientsInUse.remove(tblName);
+       return true;
+    }
+
+    public boolean create(String tblName, Object[]  colFamNameList,
+                          boolean isMVCC) 
+        throws IOException, MasterNotRunningException {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.create(" + tblName + ") called, and MVCC is " + isMVCC + ".");
+            cleanupCache(tblName);
+            HTableDescriptor desc = new HTableDescriptor(tblName);
+            for (int i = 0; i < colFamNameList.length ; i++) {
+		String  colFam = (String)colFamNameList[i];
+                HColumnDescriptor colDesc = new HColumnDescriptor(colFam);
+                if (isMVCC)
+                  colDesc.setMaxVersions(DtmConst.MVCC_MAX_VERSION);
+                else
+                  colDesc.setMaxVersions(DtmConst.SSCC_MAX_VERSION);
+                desc.addFamily(colDesc);
+            }
+            HColumnDescriptor metaColDesc = new HColumnDescriptor(DtmConst.TRANSACTION_META_FAMILY);
+            if (isMVCC)
+              metaColDesc.setMaxVersions(DtmConst.MVCC_MAX_DATA_VERSION);
+            else
+              metaColDesc.setMaxVersions(DtmConst.SSCC_MAX_DATA_VERSION);
+            metaColDesc.setInMemory(true);
+            desc.addFamily(metaColDesc);
+            HBaseAdmin admin = new HBaseAdmin(config);
+            admin.createTable(desc);
+            admin.close();
+            return true;
+   } 
+
+   // used for returning two flags from setDescriptors method
+
+   private class ChangeFlags {
+       boolean tableDescriptorChanged;
+       boolean columnDescriptorChanged;
+
+       ChangeFlags() {
+           tableDescriptorChanged = false;
+           columnDescriptorChanged = false;
+       }
+
+       void setTableDescriptorChanged() {
+           tableDescriptorChanged = true;
+       }
+
+       void setColumnDescriptorChanged() {
+           columnDescriptorChanged = true;
+       }
+
+       boolean tableDescriptorChanged() {
+           return tableDescriptorChanged;
+       }
+
+       boolean columnDescriptorChanged() {
+           return columnDescriptorChanged;
+       }
+   }
+
+   private ChangeFlags setDescriptors(Object[] tableOptions,
+                                      HTableDescriptor desc,
+                                      HColumnDescriptor colDesc,
+                                      int defaultVersionsValue) {
+       ChangeFlags returnStatus = new ChangeFlags();
+       String trueStr = "TRUE";
+       for (int i = 0; i < tableOptions.length; i++) {
+           if (i == HBASE_NAME)	
+               continue ;
+           String tableOption = (String)tableOptions[i];
+           if ((i != HBASE_MAX_VERSIONS) && (tableOption.isEmpty()))
+               continue ;
+           switch (i) {
+           case HBASE_MAX_VERSIONS:
+               if (tableOption.isEmpty()) {
+                   if (colDesc.getMaxVersions() != defaultVersionsValue) {
+                       colDesc.setMaxVersions(defaultVersionsValue);
+                       returnStatus.setColumnDescriptorChanged();
+                   }
+               }
+               else {
+                   colDesc.setMaxVersions
+                       (Integer.parseInt(tableOption));
+                   returnStatus.setColumnDescriptorChanged();
+               }
+               break ;
+           case HBASE_MIN_VERSIONS:
+               colDesc.setMinVersions
+                   (Integer.parseInt(tableOption));
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_TTL:
+               colDesc.setTimeToLive
+                   (Integer.parseInt(tableOption));
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_BLOCKCACHE:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setBlockCacheEnabled(true);
+               else
+                   colDesc.setBlockCacheEnabled(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_IN_MEMORY:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setInMemory(true);
+               else
+                   colDesc.setInMemory(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_COMPRESSION:
+               if (tableOption.equalsIgnoreCase("GZ"))
+                   colDesc.setCompressionType(Algorithm.GZ);
+               else if (tableOption.equalsIgnoreCase("LZ4"))
+                   colDesc.setCompressionType(Algorithm.LZ4);
+               else if (tableOption.equalsIgnoreCase("LZO"))
+                   colDesc.setCompressionType(Algorithm.LZO);
+               else if (tableOption.equalsIgnoreCase("NONE"))
+                   colDesc.setCompressionType(Algorithm.NONE);
+               else if (tableOption.equalsIgnoreCase("SNAPPY"))
+                   colDesc.setCompressionType(Algorithm.SNAPPY); 
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_BLOOMFILTER:
+               if (tableOption.equalsIgnoreCase("NONE"))
+                   colDesc.setBloomFilterType(BloomType.NONE);
+               else if (tableOption.equalsIgnoreCase("ROW"))
+                   colDesc.setBloomFilterType(BloomType.ROW);
+               else if (tableOption.equalsIgnoreCase("ROWCOL"))
+                   colDesc.setBloomFilterType(BloomType.ROWCOL); 
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_BLOCKSIZE:
+               colDesc.setBlocksize
+                   (Integer.parseInt(tableOption));
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_DATA_BLOCK_ENCODING:
+               if (tableOption.equalsIgnoreCase("DIFF"))
+                   colDesc.setDataBlockEncoding(DataBlockEncoding.DIFF);
+               else if (tableOption.equalsIgnoreCase("FAST_DIFF"))
+                   colDesc.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
+               else if (tableOption.equalsIgnoreCase("NONE"))
+                   colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
+               else if (tableOption.equalsIgnoreCase("PREFIX"))
+                   colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX);
+               else if (tableOption.equalsIgnoreCase("PREFIX_TREE"))
+                   colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_CACHE_BLOOMS_ON_WRITE:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setCacheBloomsOnWrite(true);
+               else
+                   colDesc.setCacheBloomsOnWrite(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_CACHE_DATA_ON_WRITE:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setCacheDataOnWrite(true);
+               else
+                   colDesc.setCacheDataOnWrite(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_CACHE_INDEXES_ON_WRITE:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setCacheIndexesOnWrite(true);
+               else
+                   colDesc.setCacheIndexesOnWrite(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_COMPACT_COMPRESSION:
+               if (tableOption.equalsIgnoreCase("GZ"))
+                   colDesc.setCompactionCompressionType(Algorithm.GZ);
+               else if (tableOption.equalsIgnoreCase("LZ4"))
+                   colDesc.setCompactionCompressionType(Algorithm.LZ4);
+               else if (tableOption.equalsIgnoreCase("LZO"))
+                   colDesc.setCompactionCompressionType(Algorithm.LZO);
+               else if (tableOption.equalsIgnoreCase("NONE"))
+                   colDesc.setCompactionCompressionType(Algorithm.NONE);
+               else if (tableOption.equalsIgnoreCase("SNAPPY"))
+                   colDesc.setCompactionCompressionType(Algorithm.SNAPPY); 
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_PREFIX_LENGTH_KEY:
+               desc.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY,
+                             tableOption);
+               returnStatus.setTableDescriptorChanged();
+               break ;
+           case HBASE_EVICT_BLOCKS_ON_CLOSE:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setEvictBlocksOnClose(true);
+               else
+                   colDesc.setEvictBlocksOnClose(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_KEEP_DELETED_CELLS:
+               if (tableOption.equalsIgnoreCase(trueStr))
+                   colDesc.setKeepDeletedCells(true);
+               else
+                   colDesc.setKeepDeletedCells(false);
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_REPLICATION_SCOPE:
+               colDesc.setScope
+                   (Integer.parseInt(tableOption));
+               returnStatus.setColumnDescriptorChanged();
+               break ;
+           case HBASE_MAX_FILESIZE:
+               desc.setMaxFileSize
+                   (Long.parseLong(tableOption));
+               returnStatus.setTableDescriptorChanged();
+               break ;
+           case HBASE_COMPACT:
+              if (tableOption.equalsIgnoreCase(trueStr))
+                   desc.setCompactionEnabled(true);
+               else
+                   desc.setCompactionEnabled(false); 
+               returnStatus.setTableDescriptorChanged();
+               break ;
+           case HBASE_DURABILITY:
+               if (tableOption.equalsIgnoreCase("ASYNC_WAL"))
+                   desc.setDurability(Durability.ASYNC_WAL);
+               else if (tableOption.equalsIgnoreCase("FSYNC_WAL"))
+                   desc.setDurability(Durability.FSYNC_WAL);
+               else if (tableOption.equalsIgnoreCase("SKIP_WAL"))
+                   desc.setDurability(Durability.SKIP_WAL);
+               else if (tableOption.equalsIgnoreCase("SYNC_WAL"))
+                   desc.setDurability(Durability.SYNC_WAL);
+               else if (tableOption.equalsIgnoreCase("USE_DEFAULT"))
+                   desc.setDurability(Durability.USE_DEFAULT);
+               returnStatus.setTableDescriptorChanged(); 
+               break ;
+           case HBASE_MEMSTORE_FLUSH_SIZE:
+               desc.setMemStoreFlushSize
+                   (Long.parseLong(tableOption));
+               returnStatus.setTableDescriptorChanged();
+               break ;
+           case HBASE_SPLIT_POLICY:
+               // This method not yet available in earlier versions
+               // desc.setRegionSplitPolicyClassName(tableOption));
+               desc.setValue(desc.SPLIT_POLICY, tableOption);
+               returnStatus.setTableDescriptorChanged();
+               break ;
+           default:
+               break;
+           }
+       }
+
+       return returnStatus;
+   }
+   
+
+   public boolean createk(String tblName, Object[] tableOptions,
+       Object[]  beginEndKeys, long transID, int numSplits, int keyLength,
+       boolean isMVCC)
+       throws IOException, MasterNotRunningException {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.createk(" + tblName + ") called.");
+            String trueStr = "TRUE";
+            cleanupCache(tblName);
+            HTableDescriptor desc = new HTableDescriptor(tblName);
+
+            int defaultVersionsValue = 0;
+            if (isMVCC)
+                defaultVersionsValue = DtmConst.MVCC_MAX_VERSION;
+            else
+                defaultVersionsValue = DtmConst.SSCC_MAX_VERSION;
+
+            // column family names are space delimited list of names.
+            // extract all family names and add to table descriptor.
+            // All other default and specified options remain the same for all families.
+            String colFamsStr = (String)tableOptions[HBASE_NAME];
+            String[] colFamsArr = colFamsStr.split("\\s+"); 
+
+            for (int i = 0; i < colFamsArr.length; i++){            
+                String colFam = colFamsArr[i];
+
+                HColumnDescriptor colDesc = new HColumnDescriptor(colFam);
+
+                // change the descriptors based on the tableOptions; 
+                setDescriptors(tableOptions,desc /*out*/,colDesc /*out*/, defaultVersionsValue);
+                
+                desc.addFamily(colDesc);
+            }
+
+            HColumnDescriptor metaColDesc = new HColumnDescriptor(DtmConst.TRANSACTION_META_FAMILY);
+            if (isMVCC)
+              metaColDesc.setMaxVersions(DtmConst.MVCC_MAX_DATA_VERSION);
+            else
+              metaColDesc.setMaxVersions(DtmConst.SSCC_MAX_DATA_VERSION);
+            metaColDesc.setInMemory(true);
+            desc.addFamily(metaColDesc);
+            HBaseAdmin admin = new HBaseAdmin(config);
+
+            try {
+               if (beginEndKeys != null && beginEndKeys.length > 0)
+               {
+                  byte[][] keys = new byte[beginEndKeys.length][];
+                  for (int i = 0; i < beginEndKeys.length; i++){
+                     keys[i] = (byte[])beginEndKeys[i]; 
+                     if (logger.isDebugEnabled()) logger.debug("HBaseClient.createk key #" + i + "value" + keys[i] + ") called.");
+                  }
+                  if (transID != 0) {
+                     table.createTable(desc, keys, numSplits, keyLength, transID);
+                     if (logger.isDebugEnabled()) logger.debug("HBaseClient.createk beginEndKeys(" + beginEndKeys + ") called.");
+                  } else {
+                     admin.createTable(desc, keys);
+                  }
+               }
+               else {
+                  if (transID != 0) {
+                     table.createTable(desc, null, numSplits, keyLength, transID);
+                  } else {
+                     admin.createTable(desc);
+                  }
+               }
+            }
+            catch (IOException e)
+            {
+               if (logger.isDebugEnabled()) logger.debug("HbaseClient.createk : createTable error" + e);
+               throw e;
+            }
+            admin.close();
+        return true;
+    }
+
+    public boolean registerTruncateOnAbort(String tblName, long transID)
+        throws MasterNotRunningException, IOException {
+
+        try {
+           if(transID != 0) {
+              table.truncateTableOnAbort(tblName, transID);
+           }
+        }
+        catch (IOException e) {
+           if (logger.isDebugEnabled()) logger.debug("HbaseClient.registerTruncateOnAbort error" + e);
+           throw e;
+        }
+        return true;
+    }
+
+    private void waitForCompletion(String tblName,HBaseAdmin admin) 
+        throws IOException {
+        // poll for completion of an asynchronous operation
+        boolean keepPolling = true;
+        while (keepPolling) {
+            // status.getFirst() returns the number of regions yet to be updated
+            // status.getSecond() returns the total number of regions
+            Pair<Integer,Integer> status = admin.getAlterStatus(tblName.getBytes());
+
+            keepPolling = (status.getFirst() > 0) && (status.getSecond() > 0);
+            if (keepPolling) {
+                try {
+                    Thread.sleep(2000);  // sleep two seconds or until interrupted
+                }
+                catch (InterruptedException e) {
+                    // ignore the interruption and keep going
+                }    
+            }
+        }
+    }
+
+    public boolean alter(String tblName, Object[] tableOptions, long transID)
+        throws IOException, MasterNotRunningException {
+
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.alter(" + tblName + ") called.");
+        cleanupCache(tblName);
+        HBaseAdmin admin = new HBaseAdmin(config);
+        HTableDescriptor htblDesc = admin.getTableDescriptor(tblName.getBytes());       
+        HColumnDescriptor[] families = htblDesc.getColumnFamilies();
+
+        String colFam = (String)tableOptions[HBASE_NAME];
+        if (colFam == null)
+            return true; // must have col fam name
+
+        // if the only option specified is col fam name and this family doesnt already
+        // exist, then add it.
+        boolean onlyColFamOptionSpecified = true;
+        for (int i = 0; (onlyColFamOptionSpecified && (i < tableOptions.length)); i++) {
+            if (i == HBASE_NAME)	
+                continue ;
+
+            if (((String)tableOptions[i]).length() != 0)
+                {
+                    onlyColFamOptionSpecified = false;
+                }
+        }
+
+        HColumnDescriptor colDesc = htblDesc.getFamily(colFam.getBytes());
+
+        ChangeFlags status = new ChangeFlags();
+        if (onlyColFamOptionSpecified) {
+            if (colDesc == null) {
+                colDesc = new HColumnDescriptor(colFam);
+                
+                htblDesc.addFamily(colDesc);
+                
+                status.setTableDescriptorChanged();
+            } else
+                return true; // col fam already exists
+        }
+        else {
+            if (colDesc == null)
+                return true; // colDesc must exist
+
+            int defaultVersionsValue = colDesc.getMaxVersions(); 
+
+            status = 
+                setDescriptors(tableOptions,htblDesc /*out*/,colDesc /*out*/, defaultVersionsValue);
+        }
+
+        try {
+            if (transID != 0) {
+                // Transactional alter support
+                table.alter(tblName, tableOptions, transID);
+                if (logger.isDebugEnabled()) logger.debug("HBaseClient.alter(" + tblName + ") called with object length: " + java.lang.reflect.Array.getLength(tableOptions));
+            }
+            else {
+                // the modifyTable and modifyColumn operations are asynchronous,
+                // so we have to have additional code to poll for their completion
+                // (I hear that synchronous versions will be available in HBase 1.x)
+                if (status.tableDescriptorChanged()) {
+                    admin.modifyTable(tblName,htblDesc);
+                    waitForCompletion(tblName,admin);
+                }
+                else if (status.columnDescriptorChanged()) {
+                    admin.modifyColumn(tblName,colDesc);                  
+                    waitForCompletion(tblName,admin);
+                }
+                admin.close();
+            }
+        }
+        catch (IOException e) {
+            if (logger.isDebugEnabled()) logger.debug("HbaseClient.drop  error" + e);
+            throw e;
+        }
+
+        cleanupCache(tblName);
+        return true;
+    }
+
+    public boolean drop(String tblName, long transID)
+             throws MasterNotRunningException, IOException {
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.drop(" + tblName + ") called.");
+        HBaseAdmin admin = new HBaseAdmin(config);
+        //			admin.disableTableAsync(tblName);
+
+        try {
+           if(transID != 0) {
+              table.dropTable(tblName, transID);
+           }
+           else {
+              admin.disableTable(tblName);
+              admin.deleteTable(tblName);
+              admin.close();
+           }
+        }
+        catch (IOException e) {
+           if (logger.isDebugEnabled()) logger.debug("HbaseClient.drop  error" + e);
+           throw e;
+        }
+
+        return cleanupCache(tblName);
+    }
+
+    public boolean dropAll(String pattern) 
+             throws MasterNotRunningException, IOException {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.dropAll(" + pattern + ") called.");
+            HBaseAdmin admin = new HBaseAdmin(config);
+
+	    HTableDescriptor[] htdl = admin.listTables(pattern);
+	    if (htdl == null) // no tables match the given pattern.
+		return true;
+
+	    for (HTableDescriptor htd : htdl) {
+		String tblName = htd.getNameAsString();
+
+                // do not drop DTM log files which have the format: TRAFODION._DTM_.*
+                int idx = tblName.indexOf("TRAFODION._DTM_");
+                if (idx == 0)
+                    continue;
+                
+                //                System.out.println(tblName);
+                admin.disableTable(tblName);
+                admin.deleteTable(tblName);
+	    }
+ 	    
+            admin.close();
+            return cleanup();
+    }
+
+    public ByteArrayList listAll(String pattern) 
+             throws MasterNotRunningException, IOException {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.listAll(" + pattern + ") called.");
+            HBaseAdmin admin = new HBaseAdmin(config);
+
+            ByteArrayList hbaseTables = new ByteArrayList();
+
+	    HTableDescriptor[] htdl = 
+                (pattern.isEmpty() ? admin.listTables() : admin.listTables(pattern));
+
+	    for (HTableDescriptor htd : htdl) {
+		String tblName = htd.getNameAsString();
+
+                //                System.out.println(tblName);
+
+                byte[] b = tblName.getBytes();
+                hbaseTables.add(b);
+	    }
+ 	    
+            admin.close();
+            cleanup();
+            
+            return hbaseTables;
+    }
+
+    public boolean copy(String currTblName, String oldTblName)
+	throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.copy(" + currTblName + oldTblName + ") called.");
+            HBaseAdmin admin = new HBaseAdmin(config);
+	    
+	    String snapshotName = currTblName + "_SNAPSHOT";
+	    
+	    List<SnapshotDescription> l = new ArrayList<SnapshotDescription>(); 
+	    //	    l = admin.listSnapshots(snapshotName);
+	    l = admin.listSnapshots();
+	    if (! l.isEmpty())
+		{
+		    for (SnapshotDescription sd : l) {
+			//			System.out.println("here 1");
+			//			System.out.println(snapshotName);
+			//			System.out.println(sd.getName());
+			if (sd.getName().compareTo(snapshotName) == 0)
+			    {
+				//				System.out.println("here 2");
+				//			    admin.enableTable(snapshotName);
+				//				System.out.println("here 3");
+				admin.deleteSnapshot(snapshotName);
+				//				System.out.println("here 4");
+			    }
+		    }
+		}
+	    //	    System.out.println(snapshotName);
+	    if (! admin.isTableDisabled(currTblName))
+		admin.disableTable(currTblName);
+	    //	    System.out.println("here 5");
+	    admin.snapshot(snapshotName, currTblName);
+	    admin.cloneSnapshot(snapshotName, oldTblName);
+	    admin.deleteSnapshot(snapshotName);
+	    //	    System.out.println("here 6");
+	    admin.enableTable(currTblName);
+            admin.close();
+            return true;
+    }
+
+    public boolean exists(String tblName)  
+           throws MasterNotRunningException, IOException {
+            if (logger.isDebugEnabled()) logger.debug("HBaseClient.exists(" + tblName + ") called.");
+            HBaseAdmin admin = new HBaseAdmin(config);
+            boolean result = admin.tableExists(tblName);
+            admin.close();
+            return result;
+    }
+
+    public HTableClient getHTableClient(long jniObject, String tblName, 
+                  boolean useTRex) throws IOException 
+    {
+       if (logger.isDebugEnabled()) logger.debug("HBaseClient.getHTableClient(" + tblName
+                         + (useTRex ? ", use TRX" : ", no TRX") + ") called.");
+       HTableClient htable = hTableClientsFree.get(tblName);
+       if (htable == null) {
+          htable = new HTableClient();
+          if (htable.init(tblName, useTRex) == false) {
+             if (logger.isDebugEnabled()) logger.debug("  ==> Error in init(), returning empty.");
+             return null;
+          }
+          if (logger.isDebugEnabled()) logger.debug("  ==> Created new object.");
+          hTableClientsInUse.put(htable.getTableName(), htable);
+          htable.setJniObject(jniObject);
+          return htable;
+       } else {
+            if (logger.isDebugEnabled()) logger.debug("  ==> Returning existing object, removing from container.");
+            hTableClientsInUse.put(htable.getTableName(), htable);
+            htable.resetAutoFlush();
+           htable.setJniObject(jniObject);
+            return htable;
+       }
+    }
+
+
+    public void releaseHTableClient(HTableClient htable) 
+                    throws IOException {
+        if (htable == null)
+            return;
+	                
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.releaseHTableClient(" + htable.getTableName() + ").");
+        boolean cleanJniObject = false;
+        if (htable.release(cleanJniObject))
+        // If the thread is interrupted, then remove the table from cache
+        // because the table connection is retried when the table is used
+        // next time
+
+           cleanupCache(htable.getTableName());
+        else
+        {
+           if (hTableClientsInUse.remove(htable.getTableName(), htable))
+              hTableClientsFree.put(htable.getTableName(), htable);
+           else
+              if (logger.isDebugEnabled()) logger.debug("Table not found in inUse Pool");
+        }
+    }
+
+    public boolean flushAllTables() throws IOException {
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.flushAllTables() called.");
+       if (hTableClientsInUse.isEmpty()) {
+          return true;
+        }
+        for (HTableClient htable : hTableClientsInUse.values()) {
+		  htable.flush();
+        }
+	return true; 
+    }
+
+    public boolean grant(byte[] user, byte[] tblName,
+                         Object[] actionCodes) throws IOException {
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.grant(" + new String(user) + ", "
+                     + new String(tblName) + ") called.");
+		byte[] colFamily = null;
+
+		Permission.Action[] assigned = new Permission.Action[actionCodes.length];
+		for (int i = 0 ; i < actionCodes.length; i++) {
+			String actionCode = (String)actionCodes[i];
+			assigned[i] = Permission.Action.valueOf(actionCode);
+		}
+
+	    //HB98
+	    TableName htblName = TableName.valueOf(new String(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME)
+						   ,new String(tblName));
+            UserPermission userPerm = new UserPermission(user, htblName,
+                                                         colFamily, assigned);
+
+            AccessController accessController = new AccessController();
+	    //HB98 The grant() method is very different in HB98 (commenting out for now)
+            //accessController.grant(userPerm);
+        return true;
+    }
+
+   public boolean revoke(byte[] user, byte[] tblName,
+                          Object[] actionCodes) 
+                     throws IOException {
+        if (logger.isDebugEnabled()) logger.debug("HBaseClient.revoke(" + new String(user) + ", "
+                     + new String(tblName) + ") called.");
+        byte[] colFamily = null;
+
+        Permission.Action[] assigned = new Permission.Action[actionCodes.length];
+        for (int i = 0 ; i < actionCodes.length; i++) {
+            String actionCode = (String)actionCodes[i];
+            assigned[i] = Permission.Action.valueOf(actionCode);
+        }
+
+	    //HB98
+	    TableName htblName = TableName.valueOf(new String(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME)
+						   ,new String(tblName));
+            UserPermission userPerm = new UserPermission(user, htblName,
+                                                         colFamily, assigned);
+
+            AccessController accessController = new AccessController();
+	    
+	    //HB98 The revoke() method is very different in HB98 (commenting out for now)
+            //accessController.revoke(userPerm);
+        return true;
+    }
+
+    // Debugging method to display initial set of KeyValues and sequence
+    // of column qualifiers.
+    private void printQualifiers(HFile.Reader reader, int maxKeys) 
+                 throws IOException {
+      String qualifiers = new String();
+      HFileScanner scanner = reader.getScanner(false, false, false);
+      scanner.seekTo();
+      int kvCount = 0;
+      int nonPuts = 0;
+      do {
+        KeyValue kv = scanner.getKeyValue();
+        System.out.println(kv.toString());
+        if (kv.getType() == KeyValue.Type.Put.getCode())
+          qualifiers = qualifiers + kv.getQualifier()[0] + " ";
+        else
+          nonPuts++;
+      } while (++kvCount < maxKeys && scanner.next());
+      System.out.println("First " + kvCount + " column qualifiers: " + qualifiers);
+      if (nonPuts > 0)
+        System.out.println("Encountered " + nonPuts + " non-PUT KeyValue types.");
+    }
+
+    // Estimates the number of rows still in the MemStores of the regions
+    // associated with the passed table name. The number of bytes in the
+    // MemStores is divided by the passed row size in bytes, which is
+    // derived by comparing the row count for an HFile (which in turn is
+    // derived by the number of KeyValues in the file and the number of
+    // columns in the table) to the size of the HFile.
+    private long estimateMemStoreRows(String tblName, int rowSize)
+                 throws MasterNotRunningException, IOException {
+      if (rowSize == 0)
+        return 0;
+
+      HBaseAdmin admin = new HBaseAdmin(config);
+      HTable htbl = new HTable(config, tblName);
+      long totalMemStoreBytes = 0;
+      try {
+        // Get a set of all the regions for the table.
+        Set<HRegionInfo> tableRegionInfos = htbl.getRegionLocations().keySet();
+        Set tableRegions = new TreeSet(Bytes.BYTES_COMPARATOR);
+        for (HRegionInfo regionInfo : tableRegionInfos) {
+          tableRegions.add(regionInfo.getRegionName());
+        }
+     
+        // Get collection of all servers in the cluster.
+        ClusterStatus clusterStatus = admin.getClusterStatus();
+        Collection<ServerName> servers = clusterStatus.getServers();
+        final long bytesPerMeg = 1024L * 1024L;
+     
+        // For each server, look at each region it contains and see if 
+        // it is in the set of regions for the table. If so, add the
+        // size of its the running total.
+        for (ServerName serverName : servers) {
+          ServerLoad serverLoad = clusterStatus.getLoad(serverName);
+          for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
+            byte[] regionId = regionLoad.getName();
+            if (tableRegions.contains(regionId)) {
+              long regionMemStoreBytes = bytesPerMeg * regionLoad.getMemStoreSizeMB();
+              if (logger.isDebugEnabled()) logger.debug("Region " + regionLoad.getNameAsString()
+                           + " has MemStore size " + regionMemStoreBytes);
+              totalMemStoreBytes += regionMemStoreBytes;
+            }
+          }
+        }
+      }
+      finally {
+        admin.close();
+      }
+
+      // Divide the total MemStore size by the size of a single row.
+      if (logger.isDebugEnabled()) logger.debug("Estimating " + (totalMemStoreBytes / rowSize)
+                   + " rows in MemStores of table's regions.");
+      return totalMemStoreBytes / rowSize;
+    }
+
+
+    public float getBlockCacheFraction()
+    {
+        float defCacheFraction = 0.4f;
+        return config.getFloat("hfile.block.cache.size",defCacheFraction);
+    }
+    // Estimates row count for tblName by iterating over the HFiles for
+    // the table, extracting the KeyValue entry count from the file's
+    // trailer block, summing the counts, and dividing by the number of
+    // columns in the table. An adjustment is made for the estimated
+    // number of missing (null) values by sampling the first several
+    // hundred KeyValues to see how many are missing.
+    public boolean estimateRowCount(String tblName, int partialRowSize,
+                                    int numCols, long[] rc)
+                   throws MasterNotRunningException, IOException, ClassNotFoundException, URISyntaxException {
+      if (logger.isDebugEnabled()) logger.debug("HBaseClient.estimateRowCount(" + tblName + ") called.");
+
+      final String REGION_NAME_PATTERN = "[0-9a-f]*";
+      final String HFILE_NAME_PATTERN  = "[0-9a-f]*";
+
+      // To estimate incidence of nulls, read the first 500 rows worth
+      // of KeyValues.
+      final int ROWS_TO_SAMPLE = 500;
+      int putKVsSampled = 0;
+      int nonPutKVsSampled = 0;
+      int nullCount = 0;
+      long totalEntries = 0;   // KeyValues in all HFiles for table
+      long totalSizeBytes = 0; // Size of all HFiles for table 
+      long estimatedTotalPuts = 0;
+      boolean more = true;
+
+      // Access the file system to go directly to the table's HFiles.
+      // Create a reader for the file to access the entry count stored
+      // in the trailer block, and a scanner to iterate over a few
+      // hundred KeyValues to estimate the incidence of nulls.
+      long nano1, nano2;
+      nano1 = System.nanoTime();
+      FileSystem fileSystem = FileSystem.get(config);
+      nano2 = System.nanoTime();
+      if (logger.isDebugEnabled()) logger.debug("FileSystem.get() took " + ((nano2 - nano1) + 500000) / 1000000 + " milliseconds.");
+      CacheConfig cacheConf = new CacheConfig(config);
+      String hbaseRootPath = config.get(HConstants.HBASE_DIR).trim();
+      if (hbaseRootPath.charAt(0) != '/')
+        hbaseRootPath = new URI(hbaseRootPath).getPath();
+      if (logger.isDebugEnabled()) logger.debug("hbaseRootPath = " + hbaseRootPath);
+      FileStatus[] fsArr = fileSystem.globStatus(new Path(
+                               hbaseRootPath + "/data/default/" +
+                               tblName + "/" + REGION_NAME_PATTERN +
+                               "/#1/" + HFILE_NAME_PATTERN));
+      for (FileStatus fs : fsArr) {
+        // Make sure the file name conforms to HFile name pattern.
+        if (!StoreFileInfo.isHFile(fs.getPath())) {
+          if (logger.isDebugEnabled()) logger.debug("Skipped file " + fs.getPath() + " -- not a valid HFile name.");
+          continue;
+        }
+        HFile.Reader reader = HFile.createReader(fileSystem, fs.getPath(), cacheConf, config);
+        try {
+          totalEntries += reader.getEntries();
+          totalSizeBytes += reader.length();
+          //printQualifiers(reader, 100);
+          if (ROWS_TO_SAMPLE > 0 &&
+              totalEntries == reader.getEntries()) {  // first file only
+            // Trafodion column qualifiers are ordinal numbers, which
+            // makes it easy to count missing (null) values. We also count
+            // the non-Put KVs (typically delete-row markers) to estimate
+            // their frequency in the full file set.
+            HFileScanner scanner = reader.getScanner(false, false, false);
+            scanner.seekTo();  //position at beginning of first data block
+            byte currQual = 0;
+            byte nextQual;
+            do {
+              KeyValue kv = scanner.getKeyValue();
+              if (kv.getType() == KeyValue.Type.Put.getCode()) {
+                nextQual = kv.getQualifier()[0];
+                if (nextQual <= currQual)
+                  nullCount += ((numCols - currQual)  // nulls at end of this row
+                              + (nextQual - 1));      // nulls at start of next row
+                else
+                  nullCount += (nextQual - currQual - 1);
+                currQual = nextQual;
+                putKVsSampled++;
+              } else {
+                nonPutKVsSampled++;  // don't count these toward the number
+              }                      //   we want to scan
+            } while ((putKVsSampled + nullCount) < (numCols * ROWS_TO_SAMPLE)
+                     && (more = scanner.next()));
+
+            // If all rows were read, count any nulls at end of last row.
+            if (!more && putKVsSampled > 0)
+              nullCount += (numCols - currQual);
+
+            if (logger.isDebugEnabled()) logger.debug("Sampled " + nullCount + " nulls.");
+          }  // code for first file
+        } finally {
+          reader.close(false);
+        }
+      } // for
+
+      long estimatedEntries = (ROWS_TO_SAMPLE > 0
+                                 ? 0               // get from sample data, below
+                                 : totalEntries);  // no sampling, use stored value
+      if (putKVsSampled > 0) // avoid div by 0 if no Put KVs in sample
+        {
+          estimatedTotalPuts = (putKVsSampled * totalEntries) / 
+                               (putKVsSampled + nonPutKVsSampled);
+          estimatedEntries = ((putKVsSampled + nullCount) * estimatedTotalPuts)
+                                   / putKVsSampled;
+        }
+
+      // Calculate estimate of rows in all HFiles of table.
+      rc[0] = (estimatedEntries + (numCols/2)) / numCols; // round instead of truncate
+
+      // Estimate # of rows in MemStores of all regions of table. Pass
+      // a value to divide the size of the MemStore by. Base this on the
+      // ratio of bytes-to-rows in the HFiles, or the actual row size if
+      // the HFiles were empty.
+      int rowSize;
+      if (rc[0] > 0)
+        rowSize = (int)(totalSizeBytes / rc[0]);
+      else {
+        // From Traf metadata we have calculated and passed in part of the row
+        // size, including size of column qualifiers (col names), which are not
+        // known to HBase.  Add to this the length of the fixed part of the
+        // KeyValue format, times the number of columns.
+        int fixedSizePartOfKV = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE // key len + value len
+                              + KeyValue.KEY_INFRASTRUCTURE_SIZE;     // rowkey & col family len, timestamp, key type
+        rowSize = partialRowSize   // for all cols: row key + col qualifiers + values
+                      + (fixedSizePartOfKV * numCols);
+
+        // Trafodion tables have a single col family at present, so we only look
+        // at the first family name, and multiply its length times the number of
+        // columns. Even if more than one family is used in the future, presumably
+        // they will all be the same short size.
+        HTable htbl = new HTable(config, tblName);
+        HTableDescriptor htblDesc = htbl.getTableDescriptor();
+        HColumnDescriptor[] families = htblDesc.getColumnFamilies();
+        rowSize += (families[0].getName().length * numCols);
+      }
+
+      // Get the estimate of MemStore rows. Add to total after logging
+      // of individual sums below.
+      long memStoreRows = estimateMemStoreRows(tblName, rowSize);
+
+      if (logger.isDebugEnabled()) logger.debug(tblName + " contains a total of " + totalEntries + " KeyValues in all HFiles.");
+      if (logger.isDebugEnabled()) logger.debug("Based on a sample, it is estimated that " + estimatedTotalPuts +
+                   " of these KeyValues are of type Put.");
+      if (putKVsSampled + nullCount > 0)
+        if (logger.isDebugEnabled()) logger.debug("Sampling indicates a null incidence of " + 
+                     (nullCount * 100)/(putKVsSampled + nullCount) +
+                     " percent.");
+      if (logger.isDebugEnabled()) logger.debug("Estimated number of actual values (including nulls) is " + estimatedEntries);
+      if (logger.isDebugEnabled()) logger.debug("Estimated row count in HFiles = " + estimatedEntries +
+                   " / " + numCols + " (# columns) = " + rc[0]);
+      if (logger.isDebugEnabled()) logger.debug("Estimated row count from MemStores = " + memStoreRows);
+
+      rc[0] += memStoreRows;  // Add memstore estimate to total
+      if (logger.isDebugEnabled()) logger.debug("Total estimated row count for " + tblName + " = " + rc[0]);
+      return true;
+    }
+
+
+    /**
+    This method returns node names where Hbase Table regions reside
+    **/
+    public boolean getRegionsNodeName(String tblName, String[] nodeNames)
+                   throws IOException
+    {
+      if (logger.isDebugEnabled()) 
+        logger.debug("HBaseClient.getRegionsNodeName(" + tblName + ") called.");
+
+      HRegionInfo regInfo = null;
+
+
+      HTable htbl = new HTable(config, tblName);
+      if (logger.isDebugEnabled())
+         logger.debug("after HTable call in getRegionsNodeName");
+
+      try {
+        NavigableMap<HRegionInfo, ServerName> locations = htbl.getRegionLocations();
+        if (logger.isDebugEnabled())
+           logger.debug("after htable.getRegionLocations call in getRegionsNodeName");
+
+      
+        String hostName;
+        int regCount = 0;
+
+        for (Map.Entry<HRegionInfo, ServerName> entry: locations.entrySet()) {
+          if (logger.isDebugEnabled()) logger.debug("Entered for loop in getRegionsNodeName");
+          regInfo = entry.getKey();
+          hostName = entry.getValue().getHostname();
+          nodeNames[regCount] = hostName;
+          if (logger.isDebugEnabled()) logger.debug("Hostname for region " + regCount + " is " + hostName);
+          regCount++;
+        }
+      } catch (Exception ie) {
+        if (logger.isDebugEnabled())
+          logger.debug("getRegionLocations throws exception " + ie.getMessage());
+        return false;
+      }
+
+      return true;
+    }
+
+
+
+    /**
+    This method returns index levels and block size of Hbase Table.
+    Index level is read from  Hfiles trailer block. Randomly selects one region and iterates through all Hfiles
+    in the chosen region and gets the maximum index level.
+    Block size is read from HColumnDescriptor.
+    **/
+    public boolean getHbaseTableInfo(String tblName, int[] tblInfo)
+                   throws MasterNotRunningException, IOException, ClassNotFoundException, URISyntaxException {
+
+      if (logger.isDebugEnabled()) logger.debug("HBaseClient.getHbaseTableInfo(" + tblName + ") called.");
+      final String REGION_NAME_PATTERN = "[0-9a-f]*";
+      final String HFILE_NAME_PATTERN  = "[0-9a-f]*";
+
+      // initialize 
+      int indexLevel = 0;
+      int currIndLevel = 0;
+      int blockSize = 0;
+      tblInfo[0] = indexLevel;
+      tblInfo[1] = blockSize;
+
+      // get block size
+      HTable htbl = new HTable(config, tblName);
+      HTableDescriptor htblDesc = htbl.getTableDescriptor();
+      HColumnDescriptor[] families = htblDesc.getColumnFamilies();
+      blockSize = families[0].getBlocksize();
+      tblInfo[1] = blockSize;
+
+      // Access the file system to go directly to the table's HFiles.
+      long nano1 = 0, nano2 = 0;
+      if (logger.isDebugEnabled())
+        nano1 = System.nanoTime();
+      FileSystem fileSystem = FileSystem.get(config);
+
+      if (logger.isDebugEnabled()) {
+        nano2 = System.nanoTime();
+        logger.debug("FileSystem.get() took " + ((nano2 - nano1) + 500000) / 1000000 + " milliseconds.");
+      }
+      CacheConfig cacheConf = new CacheConfig(config);
+      String hbaseRootPath = config.get(HConstants.HBASE_DIR).trim();
+      if (hbaseRootPath.charAt(0) != '/')
+        hbaseRootPath = new URI(hbaseRootPath).getPath();
+      if (logger.isDebugEnabled()) logger.debug("hbaseRootPath = " + hbaseRootPath);
+
+      String regDir = hbaseRootPath + "/data/default/" + 
+                      tblName + "/" + REGION_NAME_PATTERN + "/#1";
+      if (logger.isDebugEnabled()) logger.debug("region dir = " + regDir);
+
+      //get random region from the list of regions and look at all Hfiles in that region
+      FileStatus[] regArr;
+      try {
+        regArr = fileSystem.globStatus(new Path(regDir));
+      } catch (IOException ioe) {
+        if (logger.isDebugEnabled()) logger.debug("fs.globStatus on region throws IOException");
+        return false; // return index level = 0; and  block size
+      }
+      
+      // logging
+      if (logger.isDebugEnabled()) {
+        for (int i =0; i < regArr.length; i++) 
+          logger.debug("Region Path is " + regArr[i].getPath());
+      }
+      // get random region from the region array
+      int regInd = 0;
+      regInd = tblName.hashCode() % regArr.length;
+
+      Path regName = regArr[regInd].getPath();
+      // extract MD5 hash name of random region from its path including colFam name. 
+      // we just need part2 and looks something like /c8fe2d575de62d5d5ffc530bda497bca/#1
+      String strRegPath = regName.toString();
+      String parts[] = strRegPath.split(tblName);
+      String part2 = parts[1];
+
+      // now remove regular expression from the region path.
+      // would look something like /hbase/data/default/<cat.sch.tab>/[0-9a-f]*/#1
+      int j = regDir.indexOf("/[");
+      String regPrefix = regDir.substring(0,j);
+      if (logger.isDebugEnabled()) logger.debug("Region Path prefix = " + regPrefix);
+      String hfilePath = regPrefix + part2 + "/" + HFILE_NAME_PATTERN;
+      
+      if (logger.isDebugEnabled()) logger.debug("Random = " + regInd + ", region is " + regName);
+      if (logger.isDebugEnabled()) logger.debug("Hfile path = " + hfilePath);
+
+      FileStatus[] fsArr;
+      try {
+        fsArr = fileSystem.globStatus(new Path(hfilePath));
+      } catch (IOException ioe) {
+        if (logger.isDebugEnabled()) logger.debug("fs.globStatus on Hfile throws IOException");
+        return false; // return index level = 0; and  block size
+      }
+
+      if (logger.isDebugEnabled()) {
+        for (int i =0; i < fsArr.length; i++)
+          logger.debug("Hfile Path is " + fsArr[i].getPath());
+      }
+     
+      // no Hfiles return from here
+      if (fsArr.length == 0)
+        return true; // return index level = 0; and  block size
+
+      // get maximum index level going through all Hfiles of randomly chosen region
+      if (logger.isDebugEnabled())
+        nano1 = System.nanoTime();
+      for (FileStatus fs : fsArr) {
+        // Make sure the file name conforms to HFile name pattern.
+        if (!StoreFileInfo.isHFile(fs.getPath())) {
+          if (logger.isDebugEnabled()) logger.debug("Skipped file " + fs.getPath() + " -- not a valid HFile name.");
+          continue;
+        }
+
+        // Create a reader for the file to access the index levels stored
+        // in the trailer block
+        HFile.Reader reader = HFile.createReader(fileSystem, fs.getPath(), cacheConf, config);
+        try {
+          FixedFileTrailer trailer = reader.getTrailer();
+          currIndLevel = trailer.getNumDataIndexLevels();
+          // index levels also include data block, should be excluded.
+          if (currIndLevel > 0)
+            currIndLevel = currIndLevel - 1;
+          if (logger.isDebugEnabled()) 
+            logger.debug("currIndLevel = " + currIndLevel+ ", indexLevel = " + indexLevel);
+          if (currIndLevel > indexLevel)
+            indexLevel = currIndLevel;
+       } finally {
+         reader.close(false);
+       }
+      } // for
+
+      if (logger.isDebugEnabled()) {
+        nano2 = System.nanoTime();
+        logger.debug("get index level took " + ((nano2 - nano1) + 500000) / 1000000 + " milliseconds.");
+      }
+
+      tblInfo[0] = indexLevel;
+      if (logger.isDebugEnabled()) {
+        logger.debug("Index Levels for " + tblName + " = " + tblInfo[0]);
+        logger.debug("Block Size for " + tblName + " = " + tblInfo[1]);
+      }
+      
+      return true;
+    }
+
+    void printCell(KeyValue kv) {
+        String rowID = new String(kv.getRow());
+        String colFamily = new String(kv.getFamily());
+        String colName = new String(kv.getQualifier());
+        String colValue = new String(kv.getValue());
+        String row = rowID + ", " + colFamily + ", " + colName + ", "
+            + colValue + ", " + kv.getTimestamp();
+        System.out.println(row);
+    }
+
+    
+  public  HBulkLoadClient getHBulkLoadClient() throws IOException 
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBaseClient.getHBulkLoadClient() called.");
+    HBulkLoadClient hblc = null;
+    try 
+    {
+       hblc = new HBulkLoadClient( config);
+    
+    if (hblc == null)
+      throw new IOException ("hbkc is null");
+    }
+    catch (IOException e)
+    {
+      return null;
+    }
+    
+    return hblc;
+    
+  }
+  public void releaseHBulkLoadClient(HBulkLoadClient hblc) 
+      throws IOException 
+  {
+     if (hblc == null)
+       return;
+          
+      if (logger.isDebugEnabled()) logger.debug("HBaseClient.releaseHBulkLoadClient().");
+      hblc.release();
+   }
+  
+  //returns the latest snapshot name for a table. returns null if table has no snapshots
+  //associated with it
+  public String getLatestSnapshot(String tabName) throws IOException
+  {
+    HBaseAdmin admin = new HBaseAdmin(config);
+    List<SnapshotDescription> snapDescs = admin.listSnapshots();
+    long maxTimeStamp = 0;
+    String latestsnpName = null;
+    for (SnapshotDescription snp :snapDescs )
+    {
+      if (snp.getTable().compareTo(tabName) == 0 && 
+          snp.getCreationTime() > maxTimeStamp)
+      {
+        latestsnpName= snp.getName();
+        maxTimeStamp = snp.getCreationTime();
+      }
+      
+    }
+    admin.close();
+    admin = null;
+    return latestsnpName;
+  }
+  public boolean cleanSnpScanTmpLocation(String pathStr) throws Exception
+  {
+    if (logger.isDebugEnabled()) logger.debug("HbaseClient.cleanSnpScanTmpLocation() - start - Path: " + pathStr);
+    try 
+    {
+      Path delPath = new Path(pathStr );
+      delPath = delPath.makeQualified(delPath.toUri(), null);
+      FileSystem fs = FileSystem.get(delPath.toUri(),config);
+      fs.delete(delPath, true);
+    }
+    catch (IOException e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("HbaseClient.cleanSnpScanTmpLocation() --exception:" + e);
+      throw e;
+    }
+    
+    return true;
+  }
+  private boolean updatePermissionForEntries(FileStatus[] entries, String hbaseUser, FileSystem fs) throws IOException 
+  {
+    if (entries == null) {
+      return true;
+    }
+    
+    for (FileStatus child : entries) {
+      Path path = child.getPath();
+      List<AclEntry> lacl = AclEntry.parseAclSpec("user:" + hbaseUser + ":rwx", true) ;
+      try 
+      {
+        fs.modifyAclEntries(path, lacl);
+      }
+      catch (IOException e)
+      {
+        //if failure just log exception and continue
+        if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.updatePermissionForEntries() exception. " + e);
+      }
+      if (child.isDir()) 
+      {
+        FileStatus[] files = FSUtils.listStatus(fs,path);
+        updatePermissionForEntries(files,hbaseUser, fs);
+      } 
+    }
+    return true;
+  }
+  
+  public boolean setArchivePermissions( String tabName) throws IOException,ServiceException
+  {
+    if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.setArchivePermissions() called. ");
+    Path rootDir = FSUtils.getRootDir(config);
+    FileSystem myfs = FileSystem.get(rootDir.toUri(),config);
+    FileStatus fstatus = myfs.getFileStatus(rootDir);
+    String hbaseUser = fstatus.getOwner(); 
+    assert (hbaseUser != null && hbaseUser.length() != 0);
+    Path tabArcPath = HFileArchiveUtil.getTableArchivePath(config,  TableName.valueOf(tabName));
+    if (tabArcPath == null)
+      return true;
+    List<AclEntry> lacl = AclEntry.parseAclSpec("user:" + hbaseUser + ":rwx", true) ;
+    try
+    {
+      myfs.modifyAclEntries(tabArcPath, lacl);
+    }
+    catch (IOException e)
+    {
+      //if failure just log exception and continue
+      if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.setArchivePermissions() exception. " + e);
+    }
+    FileStatus[] files = FSUtils.listStatus(myfs,tabArcPath);
+    updatePermissionForEntries(files,  hbaseUser, myfs); 
+    return true;
+  }
+
+  public int startGet(long jniObject, String tblName, boolean useTRex, long transID, byte[] rowID,
+                        Object[] columns, long timestamp)
+                        throws IOException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      return htc.startGet(transID, rowID, columns, timestamp);
+  }
+
+  public int startGet(long jniObject, String tblName, boolean useTRex, long transID, Object[] rowIDs,
+                        Object[] columns, long timestamp)
+                        throws IOException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      return htc.startGet(transID, rowIDs, columns, timestamp);
+  }
+
+  public int startGet(long jniObject, String tblName, boolean useTRex, long transID, short rowIDLen, Object rowIDs,
+                        Object[] columns)
+                        throws IOException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      return htc.getRows(transID, rowIDLen, rowIDs, columns);
+  }
+
+  public boolean insertRow(long jniObject, String tblName, boolean useTRex, long transID, byte[] rowID,
+                         Object row,
+                         long timestamp,
+                         boolean checkAndPut,
+                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
+
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      boolean ret = htc.putRow(transID, rowID, row, null, null,
+                                checkAndPut, asyncOperation);
+      if (asyncOperation == true)
+         htc.setJavaObject(jniObject);
+      else
+         releaseHTableClient(htc);
+      return ret;
+  }
+
+  public boolean checkAndUpdateRow(long jniObject, String tblName, boolean useTRex, long transID, byte[] rowID,
+                         Object columnsToUpdate,
+                         byte[] columnToCheck, byte[] columnValToCheck,
+                         long timestamp,
+                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
+      boolean checkAndPut = true;
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      boolean ret = htc.putRow(transID, rowID, columnsToUpdate, columnToCheck, columnValToCheck,
+                                checkAndPut, asyncOperation);
+      if (asyncOperation == true)
+         htc.setJavaObject(jniObject);
+      else
+         releaseHTableClient(htc);
+      return ret;
+  }
+
+  public boolean insertRows(long jniObject, String tblName, boolean useTRex, long transID, 
+			 short rowIDLen,
+                         Object rowIDs,
+                         Object rows,
+                         long timestamp,
+                         boolean autoFlush,
+                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      boolean ret = htc.putRows(transID, rowIDLen, rowIDs, rows, timestamp, autoFlush, asyncOperation);
+      if (asyncOperation == true)
+         htc.setJavaObject(jniObject);
+      else
+         releaseHTableClient(htc);
+      return ret;
+  }
+
+  public boolean deleteRow(long jniObject, String tblName, boolean useTRex, long transID, 
+                                 byte[] rowID,
+                                 Object[] columns,
+                                 long timestamp, boolean asyncOperation) throws IOException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      boolean ret = htc.deleteRow(transID, rowID, columns, timestamp, asyncOperation);
+      if (asyncOperation == true)
+         htc.setJavaObject(jniObject);
+      else
+         releaseHTableClient(htc);
+      return ret;
+  }
+
+  public boolean deleteRows(long jniObject, String tblName, boolean useTRex, long transID, short rowIDLen, Object rowIDs,
+                      long timestamp, 
+                      boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      boolean ret = htc.deleteRows(transID, rowIDLen, rowIDs, timestamp, asyncOperation);
+      if (asyncOperation == true)
+         htc.setJavaObject(jniObject);
+      else
+         releaseHTableClient(htc);
+      return ret;
+  }
+
+  public boolean checkAndDeleteRow(long jniObject, String tblName, boolean useTRex, long transID, 
+                                 byte[] rowID,
+                                 byte[] columnToCheck, byte[] colValToCheck,
+                                 long timestamp, boolean asyncOperation) throws IOException {
+      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
+      boolean ret = htc.checkAndDeleteRow(transID, rowID, columnToCheck, colValToCheck, timestamp);
+      if (asyncOperation == true)
+         htc.setJavaObject(jniObject);
+      else
+         releaseHTableClient(htc);
+      return ret;
+  }
+
+  public boolean  createCounterTable(String tabName,  String famName) throws IOException, MasterNotRunningException
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBaseClient.createCounterTable() - start");
+    HBaseAdmin admin = new HBaseAdmin(config);
+    TableName tn =  TableName.valueOf (tabName);
+    if (admin.tableExists(tabName)) {
+        admin.close();
+        return true;
+    }
+    HTableDescriptor desc = new HTableDescriptor(tn);
+    HColumnDescriptor colDesc = new HColumnDescriptor(famName);
+    // A counter table is non-DTM-transactional.
+    // Use the default maximum versions for MVCC.
+    colDesc.setMaxVersions(DtmConst.MVCC_MAX_VERSION);
+    desc.addFamily(colDesc);
+    admin.createTable(desc);
+    admin.close();
+    if (logger.isDebugEnabled()) logger.debug("HBaseClient.createCounterTable() - end");
+    return true;
+  }
+
+  public long incrCounter(String tabName, String rowId, String famName, String qualName, long incrVal) throws Exception
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBaseClient.incrCounter() - start");
+
+    HTable myHTable = new HTable(config, tabName);
+    long count = myHTable.incrementColumnValue(Bytes.toBytes(rowId), Bytes.toBytes(famName), Bytes.toBytes(qualName), incrVal);
+    myHTable.close();
+    return count;
+  }
+
+}
+    
+
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java b/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
new file mode 100644
index 0000000..31d8cac
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/HBulkLoadClient.java
@@ -0,0 +1,533 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Iterator;
+import java.io.File;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.log4j.PropertyConfigurator;
+import org.apache.log4j.Logger;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.security.access.AccessController;
+import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.compress.*;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.regionserver.BloomType; 
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.trafodion.sql.HTableClient;
+//import org.trafodion.sql.HBaseClient;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.io.compress.CodecPool;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hbase.TableName;
+
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+
+import org.apache.hive.jdbc.HiveDriver;
+import java.sql.Connection;
+import java.sql.Statement;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.lang.ClassNotFoundException;
+
+public class HBulkLoadClient
+{
+  
+  private final static FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx");
+  private final static FsPermission PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x");
+  private final static String BULKLOAD_STAGING_DIR = "hbase.bulkload.staging.dir";
+  private final static long MAX_HFILE_SIZE = 10737418240L; //10 GB
+  
+  public static int BLOCKSIZE = 64*1024;
+  public static String COMPRESSION = Compression.Algorithm.NONE.getName();
+  String lastError;
+  static Logger logger = Logger.getLogger(HBulkLoadClient.class.getName());
+  Configuration config;
+  HFile.Writer writer;
+  String hFileLocation;
+  String hFileName;
+  long maxHFileSize = MAX_HFILE_SIZE;
+  FileSystem fileSys = null;
+  String compression = COMPRESSION;
+  int blockSize = BLOCKSIZE;
+  DataBlockEncoding dataBlockEncoding = DataBlockEncoding.NONE;
+  FSDataOutputStream fsOut = null;
+
+  public HBulkLoadClient()
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient() called.");
+  }
+
+  public HBulkLoadClient(Configuration conf) throws IOException
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.HBulkLoadClient(...) called.");
+    config = conf;
+  }
+
+  public String getLastError() {
+    return lastError;
+  }
+
+  void setLastError(String err) {
+      lastError = err;
+  }
+  public boolean initHFileParams(String hFileLoc, String hFileNm, long userMaxSize /*in MBs*/, String tblName,
+                                 String sampleTblName, String sampleTblDDL) 
+  throws UnsupportedOperationException, IOException, SQLException, ClassNotFoundException
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.initHFileParams() called.");
+    
+    hFileLocation = hFileLoc;
+    hFileName = hFileNm;
+    
+    HTable myHTable = new HTable(config, tblName);
+    HTableDescriptor hTbaledesc = myHTable.getTableDescriptor();
+    HColumnDescriptor[] hColDescs = hTbaledesc.getColumnFamilies();
+    if (hColDescs.length > 2 )  //2 column family , 1 for user data, 1 for transaction metadata
+    {
+      myHTable.close();
+      throw new UnsupportedOperationException ("only two families are supported.");
+    }
+    
+    compression= hColDescs[0].getCompression().getName();
+    blockSize= hColDescs[0].getBlocksize();
+    dataBlockEncoding = hColDescs[0].getDataBlockEncoding();
+    
+    if (userMaxSize == 0)
+    {
+      if (hTbaledesc.getMaxFileSize()==-1)
+      {
+        maxHFileSize = MAX_HFILE_SIZE;
+      }
+      else
+      {
+        maxHFileSize = hTbaledesc.getMaxFileSize();
+      }
+    }
+    else 
+      maxHFileSize = userMaxSize * 1024 *1024;  //maxSize is in MBs
+
+    myHTable.close();
+
+    if (sampleTblDDL.length() > 0)
+    {
+      Class.forName("org.apache.hive.jdbc.HiveDriver");
+      Connection conn = DriverManager.getConnection("jdbc:hive2://", "hive", "");
+      Statement stmt = conn.createStatement();
+      stmt.execute("drop table if exists " + sampleTblName);
+      //System.out.println("*** DDL for Hive sample table is: " + sampleTblDDL);
+      stmt.execute(sampleTblDDL);
+    }
+
+    return true;
+  }
+  public boolean doCreateHFile() throws IOException, URISyntaxException
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doCreateHFile() called.");
+    
+    if (hFileLocation == null )
+      throw new NullPointerException(hFileLocation + " is not set");
+    if (hFileName == null )
+      throw new NullPointerException(hFileName + " is not set");
+    
+    closeHFile();
+    
+    if (fileSys == null)
+     fileSys = FileSystem.get(config); 
+
+    Path hfilePath = new Path(new Path(hFileLocation ), hFileName + "_" +  System.currentTimeMillis());
+    hfilePath = hfilePath.makeQualified(hfilePath.toUri(), null);
+
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.createHFile Path: " + hfilePath);
+
+    try
+    {
+      HFileContext hfileContext = new HFileContextBuilder()
+                                 .withBlockSize(blockSize)
+                                 .withCompression(Compression.getCompressionAlgorithmByName(compression))
+                                 .withDataBlockEncoding(dataBlockEncoding)
+                                 .build();
+
+      writer =    HFile.getWriterFactory(config, new CacheConfig(config))
+                     .withPath(fileSys, hfilePath)
+                     .withFileContext(hfileContext)
+                     .withComparator(KeyValue.COMPARATOR)
+                     .create();
+      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.createHFile Path: " + writer.getPath() + "Created");
+    }
+    catch (IOException e)
+    {
+       if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doCreateHFile Exception" + e.getMessage());
+       throw e;
+    }
+    return true;
+  }
+  
+  public boolean isNewFileNeeded() throws IOException
+  {
+    if (writer == null)
+      return true;
+    
+    if (fileSys == null)
+      fileSys = FileSystem.get(writer.getPath().toUri(),config);
+    
+    if (fileSys.getFileStatus(writer.getPath()).getLen() > maxHFileSize)
+     return true;
+
+    return false;
+  }
+
+  public boolean addToHFile(short rowIDLen, Object rowIDs,
+                Object rows) throws IOException, URISyntaxException
+  {
+     if (logger.isDebugEnabled()) logger.debug("Enter addToHFile() ");
+     Put put;
+    if (isNewFileNeeded())
+    {
+      doCreateHFile();
+    }
+     ByteBuffer bbRows, bbRowIDs;
+     short numCols, numRows;
+     short colNameLen;
+     int colValueLen;
+     byte[] colName, colValue, rowID;
+     short actRowIDLen;
+
+     bbRowIDs = (ByteBuffer)rowIDs;
+     bbRows = (ByteBuffer)rows;
+     numRows = bbRowIDs.getShort();
+     HTableClient htc = new HTableClient();
+     long now = System.currentTimeMillis();
+     for (short rowNum = 0; rowNum < numRows; rowNum++) 
+     {
+        byte rowIDSuffix  = bbRowIDs.get();
+        if (rowIDSuffix == '1')
+           actRowIDLen = (short)(rowIDLen+1);
+        else
+           actRowIDLen = rowIDLen;
+        rowID = new byte[actRowIDLen];
+        bbRowIDs.get(rowID, 0, actRowIDLen);
+        numCols = bbRows.getShort();
+        for (short colIndex = 0; colIndex < numCols; colIndex++)
+        {
+            colNameLen = bbRows.getShort();
+            colName = new byte[colNameLen];
+            bbRows.get(colName, 0, colNameLen);
+            colValueLen = bbRows.getInt();
+            colValue = new byte[colValueLen];
+            bbRows.get(colValue, 0, colValueLen);
+            KeyValue kv = new KeyValue(rowID,
+                                htc.getFamily(colName), 
+                                htc.getName(colName), 
+                                now,
+                                colValue);
+            writer.append(kv);
+        } 
+    }
+    if (logger.isDebugEnabled()) logger.debug("End addToHFile() ");
+       return true;
+  }
+
+  public boolean closeHFile() throws IOException
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.closeHFile() called." + ((writer == null) ? "NULL" : "NOT NULL"));
+
+    if (writer == null)
+      return false;
+    
+    writer.close();
+    return true;
+  }
+
+  private boolean createSnapshot( String tableName, String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException, Exception
+  {
+    HBaseAdmin admin = null;
+    try 
+    {
+      admin = new HBaseAdmin(config);
+      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
+      if (! lstSnaps.isEmpty())
+      {
+        for (SnapshotDescription snpd : lstSnaps) 
+        {
+            if (snpd.getName().compareTo(snapshotName) == 0)
+            {
+              if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.createSnapshot() -- deleting: " + snapshotName + " : " + snpd.getName());
+              admin.deleteSnapshot(snapshotName);
+            }
+        }
+      }
+      admin.snapshot(snapshotName, tableName);
+   }
+    catch (Exception e)
+    {
+      //log exeception and throw the exception again to teh parent
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.createSnapshot() - Exception: " + e);
+      throw e;
+    }
+    finally
+    {
+      //close HBaseAdmin instance 
+      if (admin !=null)
+        admin.close();
+    }
+    return true;
+  }
+  
+  private boolean restoreSnapshot( String snapshotName, String tableName)
+      throws IOException, RestoreSnapshotException, Exception
+  {
+    HBaseAdmin admin = null;
+    try
+    {
+      admin = new HBaseAdmin(config);
+      if (! admin.isTableDisabled(tableName))
+          admin.disableTable(tableName);
+      
+      admin.restoreSnapshot(snapshotName);
+  
+      admin.enableTable(tableName);
+    }
+    catch (Exception e)
+    {
+      //log exeception and throw the exception again to the parent
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.restoreSnapshot() - Exception: " + e);
+      throw e;
+    }
+    finally
+    {
+      //close HBaseAdmin instance 
+      if (admin != null) 
+        admin.close();
+    }
+
+    return true;
+  }
+  private boolean deleteSnapshot( String snapshotName, String tableName)
+      throws IOException, Exception
+  {
+    
+    HBaseAdmin admin = null;
+    boolean snapshotExists = false;
+    try
+    {
+      admin = new HBaseAdmin(config);
+      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
+      if (! lstSnaps.isEmpty())
+      {
+        for (SnapshotDescription snpd : lstSnaps) 
+        {
+          //System.out.println("here 1: " + snapshotName + snpd.getName());
+          if (snpd.getName().compareTo(snapshotName) == 0)
+          {
+            //System.out.println("deleting: " + snapshotName + " : " + snpd.getName());
+            snapshotExists = true;
+            break;
+          }
+        }
+      }
+      if (!snapshotExists)
+        return true;
+      if (admin.isTableDisabled(tableName))
+          admin.enableTable(tableName);
+      admin.deleteSnapshot(snapshotName);
+    }
+    catch (Exception e)
+    {
+      //log exeception and throw the exception again to the parent
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.restoreSnapshot() - Exception: " + e);
+      throw e;
+    }
+    finally 
+    {
+      //close HBaseAdmin instance 
+      if (admin != null) 
+        admin.close();
+    }
+    return true;
+  }
+  
+  private void doSnapshotNBulkLoad(Path hFilePath, String tableName, HTable table, LoadIncrementalHFiles loader, boolean snapshot)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException, RestoreSnapshotException, Exception
+  {
+    HBaseAdmin admin = new HBaseAdmin(config);
+    String snapshotName= null;
+    if (snapshot)
+    {
+      snapshotName = tableName + "_SNAPSHOT";
+      createSnapshot(tableName, snapshotName);
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot created: " + snapshotName);
+    }
+    try
+    {
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - bulk load started ");
+      loader.doBulkLoad(hFilePath, table);
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - bulk load is done ");
+    }
+    catch (IOException e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - Exception: " + e.toString());
+      if (snapshot)
+      {
+        restoreSnapshot(snapshotName, tableName);
+        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot restored: " + snapshotName);
+        deleteSnapshot(snapshotName, tableName);
+        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
+        throw e;
+      }
+    }
+    finally
+    {
+      if  (snapshot)
+      {
+        deleteSnapshot(snapshotName, tableName);
+        if (logger.isDebugEnabled()) logger.debug("HbulkLoadClient.doSnapshotNBulkLoad() - snapshot deleted: " + snapshotName);
+      }
+    }
+    
+  }
+  public boolean doBulkLoad(String prepLocation, String tableName, boolean quasiSecure, boolean snapshot) throws Exception
+  {
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - start");
+    if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - Prep Location: " + prepLocation + 
+                                             ", Table Name:" + tableName + 
+                                             ", quasisecure : " + quasiSecure +
+                                             ", snapshot: " + snapshot);
+
+      
+    HTable table = new HTable(config, tableName);
+    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(config);    
+    Path prepPath = new Path(prepLocation );
+    prepPath = prepPath.makeQualified(prepPath.toUri(), null);
+    FileSystem prepFs = FileSystem.get(prepPath.toUri(),config);
+    
+    Path[] hFams = FileUtil.stat2Paths(prepFs.listStatus(prepPath));
+
+    if (quasiSecure)
+    {
+      throw new Exception("HBulkLoadClient.doBulkLoad() - cannot perform load. Trafodion on secure HBase mode is not implemented yet");
+    }
+    else
+    {
+      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfiles permissions");
+      for (Path hfam : hFams) 
+      {
+         Path[] hfiles = FileUtil.stat2Paths(prepFs.listStatus(hfam));
+         prepFs.setPermission(hfam,PERM_ALL_ACCESS );
+         for (Path hfile : hfiles)
+         {
+           if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - adjusting hfile permissions:" + hfile);
+           prepFs.setPermission(hfile,PERM_ALL_ACCESS);
+           
+         }
+         //create _tmp dir used as temp space for Hfile processing
+         FileSystem.mkdirs(prepFs, new Path(hfam,"_tmp"), PERM_ALL_ACCESS);
+      }
+      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - bulk load started. Loading directly from preparation directory");
+      doSnapshotNBulkLoad(prepPath,tableName,  table,  loader,  snapshot);
+      if (logger.isDebugEnabled()) logger.debug("HBulkLoadClient.doBulkLoad() - bulk load is done ");
+    }
+    return true;
+  }
+
+  public boolean bulkLoadCleanup(String location) throws Exception
+  {
+      Path dir = new Path(location );
+      dir = dir.makeQualified(dir.toUri(), null);
+      FileSystem fs = FileSystem.get(dir.toUri(),config);
+      fs.delete(dir, true);
+      
+      return true;
+
+  }
+  
+  public boolean release( ) throws IOException {
+    if (writer != null)
+    {
+       writer.close();
+       writer = null;
+    }
+    if (fileSys !=null)
+    {
+      fileSys.close();
+      fileSys = null;
+    }
+    if (config != null) 
+    {
+      config = null;
+    }
+    if (hFileLocation != null)
+    {
+      hFileLocation = null;
+    }
+    if (hFileName != null)
+    {
+      hFileName = null;
+    }
+
+    if (compression != null)
+    {
+      compression = null;
+    }
+    return true;
+  }
+}



[3/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Posted by db...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HTableClient.java b/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
new file mode 100644
index 0000000..3f3334e
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/HTableClient.java
@@ -0,0 +1,1337 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+import org.trafodion.sql.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NavigableSet;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.nio.ByteBuffer;
+import java.nio.LongBuffer;
+import java.nio.ByteOrder;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
+import org.apache.hadoop.hbase.client.transactional.RMInterface;
+import org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient;
+import org.apache.hadoop.hbase.client.transactional.TransactionState;
+
+import org.apache.log4j.Logger;
+
+// H98 coprocessor needs
+import java.util.*;
+import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.coprocessor.*;
+import org.apache.hadoop.hbase.coprocessor.*;
+import org.apache.hadoop.hbase.ipc.*;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.*;
+import org.apache.hadoop.hbase.util.*;
+
+//import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
+import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
+import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
+
+// classes to do column value filtering
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.RandomRowFilter;
+
+import org.apache.hadoop.hbase.client.TableSnapshotScanner;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileUtil;
+import java.util.UUID;
+import java.security.InvalidParameterException;
+
+public class HTableClient {
+	private static final int GET_ROW = 1;
+	private static final int BATCH_GET = 2;
+	private static final int SCAN_FETCH = 3;
+	private boolean useTRex;
+	private boolean useTRexScanner;
+	private String tableName;
+
+	private ResultScanner scanner = null;
+        private ScanHelper scanHelper = null;
+	Result[] getResultSet = null;
+	String lastError;
+        RMInterface table = null;
+        ByteArrayList coprocAggrResult = null;
+        private boolean writeToWAL = false;
+	int numRowsCached = 1;
+	int numColsInScan = 0;
+	int[] kvValLen = null;
+	int[] kvValOffset = null;
+	int[] kvQualLen = null;
+	int[] kvQualOffset = null;
+	int[] kvFamLen = null;
+	int[] kvFamOffset = null;
+	long[] kvTimestamp = null;
+	byte[][] kvBuffer = null;
+	byte[][] rowIDs = null;
+	int[] kvsPerRow = null;
+        static ExecutorService executorService = null;
+        Future future = null;
+	boolean preFetch = false;
+	int fetchType = 0;
+	long jniObject = 0;
+	SnapshotScanHelper snapHelper = null;
+
+	 class SnapshotScanHelper
+	 {
+	   Path snapRestorePath = null;
+	   HBaseAdmin admin  = null;
+	   Configuration conf = null;
+	   SnapshotDescription snpDesc = null;
+	   String tmpLocation = null;
+	   FileSystem fs  = null;
+
+	   SnapshotScanHelper( Configuration cnfg , String tmpLoc, String snapName) 
+	       throws IOException
+	   {
+	     conf = cnfg;
+	     admin = new HBaseAdmin(conf);
+	     tmpLocation = tmpLoc;
+	     setSnapshotDescription(snapName);
+	     Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
+	     fs = rootDir.getFileSystem(conf);
+	     setSnapRestorePath();
+	   }
+
+	   String getTmpLocation()
+	   {
+	     return tmpLocation;
+	   }
+	   String getSnapshotName()
+	   {
+	     if (snpDesc == null)
+	       return null;
+	     return snpDesc.getName();
+	   }
+	   void setSnapRestorePath() throws IOException
+	   {
+	     String restoreDirStr = tmpLocation + getSnapshotDescription().getName(); ;
+	     snapRestorePath = new Path(restoreDirStr);
+	     snapRestorePath = snapRestorePath.makeQualified(fs.getUri(), snapRestorePath);
+	   }
+	   Path getSnapRestorePath() throws IOException
+	   {
+	     return snapRestorePath;
+	   }
+	   boolean snapshotExists() throws IOException
+	   {
+	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.snapshotExists() called. ");
+	     return !admin.listSnapshots(snpDesc.getName()).isEmpty();
+	   }
+	   void deleteSnapshot() throws IOException
+	   {
+	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot() called. ");
+	     if (snapshotExists())
+	     {
+	       admin.deleteSnapshot(snpDesc.getName());
+	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " deleted.");
+	     }
+	     else
+	     {
+	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteSnapshot(). snapshot: " + snpDesc.getName() + " does not exist.");
+	     }
+	   }
+	   void deleteRestorePath() throws IOException
+	   {
+	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath() called. ");
+	     if (fs.exists(snapRestorePath))
+	     {
+	       fs.delete(snapRestorePath, true);
+	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath(). restorePath: " + snapRestorePath + " deleted.");
+	     }
+	     else
+	     {
+	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.deleteRestorePath(). restorePath: " + snapRestorePath  + " does not exist.");
+	     }
+	   }
+	   
+	   void createTableSnapshotScanner(int timeout, int slp, long nbre, Scan scan) throws InterruptedException
+	   {
+	     if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner() called. ");
+	     int xx=0;
+	     while (xx < timeout)
+	     {
+         xx++;
+	       scanner = null;
+	       try
+	       {
+	         scanner = new TableSnapshotScanner(table.getConfiguration(), snapHelper.getSnapRestorePath(), snapHelper.getSnapshotName(), scan);
+	       }
+	       catch(IOException e )
+	       {
+	         if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner(). espNumber: " + nbre  + 
+	             " snapshot " + snpDesc.getName() + " TableSnapshotScanner Exception :" + e);
+	         Thread.sleep(slp);
+	         continue;
+	       }
+	       if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.createTableSnapshotScanner(). espNumber: " + 
+	           nbre + " snapshot " + snpDesc.getName() +  " TableSnapshotScanner Done - Scanner:" + scanner );
+	       break;
+	     }
+	   }
+	   void setSnapshotDescription( String snapName)
+	   {
+       if (snapName == null )
+         throw new InvalidParameterException ("snapshotName is null.");
+       
+	     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+	     builder.setTable(Bytes.toString(table.getTableName()));
+	     builder.setName(snapName);
+	     builder.setType(SnapshotDescription.Type.FLUSH);
+	     snpDesc = builder.build();
+	   }
+	   SnapshotDescription getSnapshotDescription()
+	   {
+	     return snpDesc;
+	   }
+
+	   public void release() throws IOException
+	   {
+	     if (admin != null)
+	     {
+	       admin.close();
+	       admin = null;
+	     }
+	   }
+	 }
+
+	class ScanHelper implements Callable {
+            public Result[] call() throws Exception {
+                return scanner.next(numRowsCached);
+            }
+        }
+	 
+	static Logger logger = Logger.getLogger(HTableClient.class.getName());;
+
+        static public  byte[] getFamily(byte[] qc) {
+	   byte[] family = null;
+
+	   if (qc != null && qc.length > 0) {
+	       int pos = Bytes.indexOf(qc, (byte) ':');
+	       if (pos == -1) 
+	          family = Bytes.toBytes("cf1");
+	       else
+	          family = Arrays.copyOfRange(qc, 0, pos);
+           }	
+	   return family;
+	}
+
+        static public byte[] getName(byte[] qc) {
+	   byte[] name = null;
+
+	   if (qc != null && qc.length > 0) {
+	      int pos = Bytes.indexOf(qc, (byte) ':');
+	      if (pos == -1) 
+	         name = qc;
+	      else
+	         name = Arrays.copyOfRange(qc, pos + 1, qc.length);
+	   }	
+	   return name;
+	}
+
+	public boolean setWriteBufferSize(long writeBufferSize) throws IOException {
+		if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::setWriteBufferSize, size  : " + writeBufferSize);
+	    table.setWriteBufferSize(writeBufferSize);
+	    return true;
+	  }
+	 public long getWriteBufferSize() {
+		 if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::getWriteBufferSize, size return : " + table.getWriteBufferSize());
+		 return table.getWriteBufferSize();
+	 }
+	public boolean setWriteToWAL(boolean v) {
+		if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::setWriteToWALL, size  : " + v);
+	    writeToWAL = v;
+	    return true;
+	  }
+ 
+	public boolean init(String tblName,
+              boolean useTRex) throws IOException 
+        {
+	    if (logger.isDebugEnabled()) logger.debug("Enter HTableClient::init, tableName: " + tblName);
+	    this.useTRex = useTRex;
+	    tableName = tblName;
+	    
+	    if ( !this.useTRex ) {
+		this.useTRexScanner = false;
+	    }
+	    else {
+
+		// If the parameter useTRex is false, then do not go thru this logic
+
+		String useTransactions = System.getenv("USE_TRANSACTIONS");
+		if (useTransactions != null) {
+		    int lv_useTransactions = (Integer.parseInt(useTransactions));
+		    if (lv_useTransactions == 0) {
+			this.useTRex = false;
+		    }
+		}
+	    
+		this.useTRexScanner = true;
+		String useTransactionsScanner = System.getenv("USE_TRANSACTIONS_SCANNER");
+		if (useTransactionsScanner != null) {
+		    int lv_useTransactionsScanner = (Integer.parseInt(useTransactionsScanner));
+		    if (lv_useTransactionsScanner == 0) {
+			this.useTRexScanner = false;
+		    }
+		}
+	    }
+
+	    table = new RMInterface(tblName);
+	    if (logger.isDebugEnabled()) logger.debug("Exit HTableClient::init, table object: " + table);
+	    return true;
+	}
+
+	public String getLastError() {
+		String ret = lastError;
+		lastError = null;
+		return ret;
+	}
+
+	void setLastError(String err) {
+		lastError = err;
+	}
+
+	String getTableName() {
+		return tableName;
+	}
+
+	String getHTableName() {
+		if (table == null)
+			return null;
+		else
+			return new String(table.getTableName());
+	}
+
+	void resetAutoFlush() {
+		table.setAutoFlush(true, true);
+	}
+
+	public boolean startScan(long transID, byte[] startRow, byte[] stopRow,
+                                 Object[]  columns, long timestamp,
+                                 boolean cacheBlocks, int numCacheRows,
+                                 Object[] colNamesToFilter, 
+                                 Object[] compareOpList, 
+                                 Object[] colValuesToCompare,
+                                 float samplePercent,
+                                 boolean inPreFetch,
+                                 boolean useSnapshotScan,
+                                 int snapTimeout,
+                                 String snapName,
+                                 String tmpLoc,
+                                 int espNum,
+                                 int versions)
+	        throws IOException, Exception {
+	  if (logger.isTraceEnabled()) logger.trace("Enter startScan() " + tableName + " txid: " + transID+ " CacheBlocks: " + cacheBlocks + " numCacheRows: " + numCacheRows + " Bulkread: " + useSnapshotScan);
+
+	  Scan scan;
+
+	  if (startRow != null && startRow.toString() == "")
+	    startRow = null;
+	  if (stopRow != null && stopRow.toString() == "")
+	    stopRow = null;
+
+	  if (startRow != null && stopRow != null)
+	    scan = new Scan(startRow, stopRow);
+	  else
+	    scan = new Scan();
+
+          if (versions != 0)
+            {
+              if (versions == -1)
+                scan.setMaxVersions();
+              else if (versions == -2)
+                {
+                  scan.setMaxVersions();
+                  scan.setRaw(true);
+                  columns = null;
+                }
+              else if (versions > 0)
+               {
+                 scan.setMaxVersions(versions);
+               }
+           }
+
+          if (cacheBlocks == true) {
+              scan.setCacheBlocks(true);
+              // Disable block cache for full table scan
+              if (startRow == null && stopRow == null)
+                  scan.setCacheBlocks(false);
+          }
+	  else
+              scan.setCacheBlocks(false);
+          
+	  scan.setCaching(numCacheRows);
+	  numRowsCached = numCacheRows;
+	  if (columns != null) {
+	    numColsInScan = columns.length;
+	    for (int i = 0; i < columns.length ; i++) {
+	      byte[] col = (byte[])columns[i];
+	      scan.addColumn(getFamily(col), getName(col));
+	    }
+	  }
+	  else
+	    numColsInScan = 0;
+	  if (colNamesToFilter != null) {
+	    FilterList list = new FilterList(FilterList.Operator.MUST_PASS_ALL);
+
+	    for (int i = 0; i < colNamesToFilter.length; i++) {
+	      byte[] colName = (byte[])colNamesToFilter[i];
+	      byte[] coByte = (byte[])compareOpList[i];
+	      byte[] colVal = (byte[])colValuesToCompare[i];
+
+	      if ((coByte == null) || (colVal == null)) {
+	        return false;
+	      }
+
+	      String coStr = new String(coByte);
+	      CompareOp co = CompareOp.valueOf(coStr);
+
+	      SingleColumnValueFilter filter1 = 
+	          new SingleColumnValueFilter(getFamily(colName), getName(colName), 
+	              co, colVal);
+	      list.addFilter(filter1);
+	    }
+
+	    if (samplePercent > 0.0f)
+	      list.addFilter(new RandomRowFilter(samplePercent));
+	    scan.setFilter(list);
+	  } else if (samplePercent > 0.0f) {
+	    scan.setFilter(new RandomRowFilter(samplePercent));
+	  }
+
+	  if (!useSnapshotScan || transID != 0)
+	  {
+	    if (useTRexScanner && (transID != 0)) {
+	      scanner = table.getScanner(transID, scan);
+	    } else {
+	      scanner = table.getScanner(scan);
+	    }
+	    if (logger.isTraceEnabled()) logger.trace("startScan(). After getScanner. Scanner: " + scanner);
+	  }
+	  else
+	  {
+	    snapHelper = new SnapshotScanHelper(table.getConfiguration(), tmpLoc,snapName);
+
+	    if (logger.isTraceEnabled()) 
+	      logger.trace("[Snapshot Scan] HTableClient.startScan(). useSnapshotScan: " + useSnapshotScan + 
+	                   " espNumber: " + espNum + 
+	                   " tmpLoc: " + snapHelper.getTmpLocation() + 
+	                   " snapshot name: " + snapHelper.getSnapshotName());
+	    
+	    if (!snapHelper.snapshotExists())
+	      throw new Exception ("Snapshot " + snapHelper.getSnapshotName() + " does not exist.");
+
+	    snapHelper.createTableSnapshotScanner(snapTimeout, 5, espNum, scan);
+	    if (scanner==null)
+	      throw new Exception("Cannot create Table Snapshot Scanner");
+	  }
+    
+          if (useSnapshotScan)
+             preFetch = false;
+          else
+	     preFetch = inPreFetch;
+	  if (preFetch)
+	  {
+	    scanHelper = new ScanHelper(); 
+            future = executorService.submit(scanHelper);
+	  }
+          fetchType = SCAN_FETCH;
+	  if (logger.isTraceEnabled()) logger.trace("Exit startScan().");
+	  return true;
+	}
+
+	public int  startGet(long transID, byte[] rowID, 
+                     Object[] columns,
+		     long timestamp) throws IOException {
+
+	    if (logger.isTraceEnabled()) logger.trace("Enter startGet(" + tableName + 
+			     " #cols: " + ((columns == null) ? 0:columns.length ) +
+			     " rowID: " + new String(rowID));
+		fetchType = GET_ROW;
+		Get get = new Get(rowID);
+		if (columns != null)
+		{
+			for (int i = 0; i < columns.length; i++) {
+				byte[] col = (byte[]) columns[i];
+				get.addColumn(getFamily(col), getName(col));
+			}
+			numColsInScan = columns.length;
+		}
+		else
+			numColsInScan = 0;
+			
+		Result getResult;
+		if (useTRex && (transID != 0)) {
+			getResult = table.get(transID, get);
+		} else {
+			getResult = table.get(get);
+		}
+		if (getResult == null
+                    || getResult.isEmpty()) {
+                        setJavaObject(jniObject);
+			return 0;
+		}
+		if (logger.isTraceEnabled()) logger.trace("startGet, result: " + getResult);
+		pushRowsToJni(getResult);
+		return 1;
+
+	}
+
+	// The TransactionalTable class is missing the batch get operation,
+	// so work around it.
+	private Result[] batchGet(long transactionID, List<Get> gets)
+			throws IOException {
+		if (logger.isTraceEnabled()) logger.trace("Enter batchGet(multi-row) " + tableName);
+		Result [] results = new Result[gets.size()];
+		int i=0;
+		for (Get g : gets) {
+			Result r = table.get(transactionID, g);
+			results[i++] = r;
+		}
+		return results;
+	}
+
+	public int startGet(long transID, Object[] rows,
+			Object[] columns, long timestamp)
+                        throws IOException {
+
+		if (logger.isTraceEnabled()) logger.trace("Enter startGet(multi-row) " + tableName);
+
+		List<Get> listOfGets = new ArrayList<Get>();
+		for (int i = 0; i < rows.length; i++) {
+			byte[] rowID = (byte[])rows[i]; 
+			Get get = new Get(rowID);
+			listOfGets.add(get);
+			if (columns != null)
+			{
+				for (int j = 0; j < columns.length; j++ ) {
+					byte[] col = (byte[])columns[j];
+					get.addColumn(getFamily(col), getName(col));
+				}
+			}
+		}
+		if (columns != null)
+			numColsInScan = columns.length;
+		else
+			numColsInScan = 0;
+		if (useTRex && (transID != 0)) {
+			getResultSet = batchGet(transID, listOfGets);
+                        fetchType = GET_ROW; 
+		} else {
+			getResultSet = table.get(listOfGets);
+			fetchType = BATCH_GET;
+		}
+		if (getResultSet != null && getResultSet.length > 0) {
+                	 pushRowsToJni(getResultSet);
+			return getResultSet.length;
+		}
+		else {
+			setJavaObject(jniObject);
+			return 0;
+		}
+	}
+
+	public int getRows(long transID, short rowIDLen, Object rowIDs,
+			Object[] columns)
+                        throws IOException {
+            
+		if (logger.isTraceEnabled()) logger.trace("Enter getRows " + tableName);
+
+		ByteBuffer bbRowIDs = (ByteBuffer)rowIDs;
+		List<Get> listOfGets = new ArrayList<Get>();
+		short numRows = bbRowIDs.getShort();
+		short actRowIDLen ;
+		byte rowIDSuffix;
+		byte[] rowID;
+
+		for (int i = 0; i < numRows; i++) {
+                        rowIDSuffix  = bbRowIDs.get();
+                        if (rowIDSuffix == '1')
+		           actRowIDLen = (short)(rowIDLen+1);
+                        else
+                           actRowIDLen = rowIDLen; 	
+			rowID = new byte[actRowIDLen];
+			bbRowIDs.get(rowID, 0, actRowIDLen);
+			Get get = new Get(rowID);
+			listOfGets.add(get);
+			if (columns != null) {
+				for (int j = 0; j < columns.length; j++ ) {
+					byte[] col = (byte[])columns[j];
+					get.addColumn(getFamily(col), getName(col));
+				}
+			}
+		}
+		if (columns != null)
+			numColsInScan = columns.length;
+		else
+			numColsInScan = 0;
+		if (useTRex && (transID != 0)) {
+			getResultSet = batchGet(transID, listOfGets);
+                        fetchType = GET_ROW; 
+		} else {
+			getResultSet = table.get(listOfGets);
+			fetchType = BATCH_GET;
+		}
+		if (getResultSet.length != numRows)
+                   throw new IOException("Number of rows retunred is not equal to requested number of rows");
+ 		pushRowsToJni(getResultSet);
+		return getResultSet.length;
+	}
+
+	public int fetchRows() throws IOException, 
+			InterruptedException, ExecutionException {
+		int rowsReturned = 0;
+
+		if (logger.isTraceEnabled()) logger.trace("Enter fetchRows(). Table: " + tableName);
+		if (getResultSet != null)
+		{
+			rowsReturned = pushRowsToJni(getResultSet);
+			getResultSet = null;
+			return rowsReturned;
+		}
+		else
+		{
+			if (scanner == null) {
+				String err = "  fetchRows() called before scanOpen().";
+				logger.error(err);
+				setLastError(err);
+				return -1;
+			}
+			Result[] result = null;
+			if (preFetch)
+			{
+				result = (Result[])future.get();
+				rowsReturned = pushRowsToJni(result);
+				future = null;
+				if ((rowsReturned <= 0 || rowsReturned < numRowsCached))
+					return rowsReturned;
+                                future = executorService.submit(scanHelper);
+			}
+			else
+			{
+				result = scanner.next(numRowsCached);
+				rowsReturned = pushRowsToJni(result);
+			}
+			return rowsReturned;
+		}
+	}
+
+	protected int pushRowsToJni(Result[] result) 
+			throws IOException {
+		if (result == null || result.length == 0)
+			return 0; 
+		int rowsReturned = result.length;
+		int numTotalCells = 0;
+		if (numColsInScan == 0)
+		{
+			for (int i = 0; i < result.length; i++) {	
+				numTotalCells += result[i].size();
+			}
+		}
+		else
+		// There can be maximum of 2 versions per kv
+		// So, allocate place holder to keep cell info
+		// for that many KVs
+			numTotalCells = 2 * rowsReturned * numColsInScan;
+		int numColsReturned;
+		Cell[] kvList;
+		Cell kv;
+
+		if (kvValLen == null ||
+	 		(kvValLen != null && numTotalCells > kvValLen.length))
+		{
+			kvValLen = new int[numTotalCells];
+			kvValOffset = new int[numTotalCells];
+			kvQualLen = new int[numTotalCells];
+			kvQualOffset = new int[numTotalCells];
+			kvFamLen = new int[numTotalCells];
+			kvFamOffset = new int[numTotalCells];
+			kvTimestamp = new long[numTotalCells];
+			kvBuffer = new byte[numTotalCells][];
+		}
+               
+		if (rowIDs == null || (rowIDs != null &&
+				rowsReturned > rowIDs.length))
+		{
+			rowIDs = new byte[rowsReturned][];
+			kvsPerRow = new int[rowsReturned];
+		}
+		int cellNum = 0;
+		boolean colFound = false;
+		for (int rowNum = 0; rowNum < rowsReturned ; rowNum++)
+		{
+			rowIDs[rowNum] = result[rowNum].getRow();
+			kvList = result[rowNum].rawCells();
+			numColsReturned = kvList.length;
+			if ((cellNum + numColsReturned) > numTotalCells)
+				throw new IOException("Insufficient cell array pre-allocated");
+			kvsPerRow[rowNum] = numColsReturned;
+			for (int colNum = 0 ; colNum < numColsReturned ; colNum++, cellNum++)
+			{ 
+				kv = kvList[colNum];
+				kvValLen[cellNum] = kv.getValueLength();
+				kvValOffset[cellNum] = kv.getValueOffset();
+				kvQualLen[cellNum] = kv.getQualifierLength();
+				kvQualOffset[cellNum] = kv.getQualifierOffset();
+				kvFamLen[cellNum] = kv.getFamilyLength();
+				kvFamOffset[cellNum] = kv.getFamilyOffset();
+				kvTimestamp[cellNum] = kv.getTimestamp();
+				kvBuffer[cellNum] = kv.getValueArray();
+				colFound = true;
+			}
+		}
+		int cellsReturned;
+		if (colFound)
+                	cellsReturned = cellNum++;
+		else
+			cellsReturned = 0;
+		if (cellsReturned == 0)
+			setResultInfo(jniObject, null, null,
+				null, null, null, null,
+				null, null, rowIDs, kvsPerRow, cellsReturned, rowsReturned);
+		else 
+			setResultInfo(jniObject, kvValLen, kvValOffset,
+				kvQualLen, kvQualOffset, kvFamLen, kvFamOffset,
+				kvTimestamp, kvBuffer, rowIDs, kvsPerRow, cellsReturned, rowsReturned);
+		return rowsReturned;	
+	}		
+	
+	protected int pushRowsToJni(Result result) 
+			throws IOException {
+		int rowsReturned = 1;
+		int numTotalCells;
+		if (numColsInScan == 0)
+			numTotalCells = result.size();
+		else
+		// There can be maximum of 2 versions per kv
+		// So, allocate place holder to keep cell info
+		// for that many KVs
+			numTotalCells = 2 * rowsReturned * numColsInScan;
+		int numColsReturned;
+		Cell[] kvList;
+		Cell kv;
+
+		if (kvValLen == null ||
+	 		(kvValLen != null && numTotalCells > kvValLen.length))
+		{
+			kvValLen = new int[numTotalCells];
+			kvValOffset = new int[numTotalCells];
+			kvQualLen = new int[numTotalCells];
+			kvQualOffset = new int[numTotalCells];
+			kvFamLen = new int[numTotalCells];
+			kvFamOffset = new int[numTotalCells];
+			kvTimestamp = new long[numTotalCells];
+			kvBuffer = new byte[numTotalCells][];
+		}
+		if (rowIDs == null)
+		{
+			rowIDs = new byte[rowsReturned][];
+			kvsPerRow = new int[rowsReturned];
+		}
+		kvList = result.rawCells();
+ 		if (kvList == null)
+			numColsReturned = 0; 
+		else
+			numColsReturned = kvList.length;
+		if ((numColsReturned) > numTotalCells)
+			throw new IOException("Insufficient cell array pre-allocated");
+ 		rowIDs[0] = result.getRow();
+		kvsPerRow[0] = numColsReturned;
+		for (int colNum = 0 ; colNum < numColsReturned ; colNum++)
+		{ 
+			kv = kvList[colNum];
+			kvValLen[colNum] = kv.getValueLength();
+			kvValOffset[colNum] = kv.getValueOffset();
+			kvQualLen[colNum] = kv.getQualifierLength();
+			kvQualOffset[colNum] = kv.getQualifierOffset();
+			kvFamLen[colNum] = kv.getFamilyLength();
+			kvFamOffset[colNum] = kv.getFamilyOffset();
+			kvTimestamp[colNum] = kv.getTimestamp();
+			kvBuffer[colNum] = kv.getValueArray();
+		}
+		if (numColsReturned == 0)
+			setResultInfo(jniObject, null, null,
+				null, null, null, null,
+				null, null, rowIDs, kvsPerRow, numColsReturned, rowsReturned);
+		else
+			setResultInfo(jniObject, kvValLen, kvValOffset,
+				kvQualLen, kvQualOffset, kvFamLen, kvFamOffset,
+				kvTimestamp, kvBuffer, rowIDs, kvsPerRow, numColsReturned, rowsReturned);
+		return rowsReturned;	
+	}		
+	
+	public boolean deleteRow(final long transID, byte[] rowID, 
+				 Object[] columns,
+				 long timestamp,
+                                 boolean asyncOperation) throws IOException {
+
+		if (logger.isTraceEnabled()) logger.trace("Enter deleteRow(" + new String(rowID) + ", "
+			     + timestamp + ") " + tableName);
+
+		final Delete del;
+		if (timestamp == -1)
+			del = new Delete(rowID);
+		else
+			del = new Delete(rowID, timestamp);
+
+		if (columns != null) {
+			for (int i = 0; i < columns.length ; i++) {
+				byte[] col = (byte[]) columns[i];
+				del.deleteColumns(getFamily(col), getName(col));
+			}
+		}
+               	if (asyncOperation) {
+			future = executorService.submit(new Callable() {
+ 				public Object call() throws Exception {
+					boolean res = true;
+					if (useTRex && (transID != 0)) 
+				           table.delete(transID, del);
+				        else
+				           table.delete(del);
+				        return new Boolean(res);
+				}
+			});
+			return true;
+		}
+		else {
+	          	if (useTRex && (transID != 0)) 
+				table.delete(transID, del);
+			else
+				table.delete(del);
+		}
+		if (logger.isTraceEnabled()) logger.trace("Exit deleteRow");
+		return true;
+	}
+
+	public boolean deleteRows(final long transID, short rowIDLen, Object rowIDs,
+		      long timestamp,
+                      boolean asyncOperation) throws IOException {
+
+	        if (logger.isTraceEnabled()) logger.trace("Enter deleteRows() " + tableName);
+
+		final List<Delete> listOfDeletes = new ArrayList<Delete>();
+		listOfDeletes.clear();
+		ByteBuffer bbRowIDs = (ByteBuffer)rowIDs;
+		short numRows = bbRowIDs.getShort();
+                byte[] rowID;		
+		byte rowIDSuffix;
+		short actRowIDLen;
+       
+		for (short rowNum = 0; rowNum < numRows; rowNum++) {
+                        rowIDSuffix  = bbRowIDs.get();
+                        if (rowIDSuffix == '1')
+		           actRowIDLen = (short)(rowIDLen+1);
+                        else
+                           actRowIDLen = rowIDLen; 	
+			rowID = new byte[actRowIDLen];
+			bbRowIDs.get(rowID, 0, actRowIDLen);
+
+			Delete del;
+			if (timestamp == -1)
+			    del = new Delete(rowID);
+			else
+			    del = new Delete(rowID, timestamp);
+			listOfDeletes.add(del);
+		}
+                if (asyncOperation) {
+                        future = executorService.submit(new Callable() {
+                                public Object call() throws Exception {
+                                    boolean res = true;
+				   if (useTRex && (transID != 0)) 
+				      table.delete(transID, listOfDeletes);
+				   else
+				      table.delete(listOfDeletes);
+				   return new Boolean(res);
+				}
+			});
+			return true;
+		}
+		else {
+			if (useTRex && (transID != 0)) 
+		    	   table.delete(transID, listOfDeletes);
+			else
+		  	   table.delete(listOfDeletes);
+		}
+		if (logger.isTraceEnabled()) logger.trace("Exit deleteRows");
+		return true;
+	}
+
+         public byte[] intToByteArray(int value) {
+	     return new byte[] {
+		 (byte)(value >>> 24),
+		 (byte)(value >>> 16),
+		 (byte)(value >>> 8),
+		 (byte)value};
+	 }
+    
+	public boolean checkAndDeleteRow(long transID, byte[] rowID, 
+					 byte[] columnToCheck, byte[] colValToCheck,
+					 long timestamp) throws IOException {
+
+		if (logger.isTraceEnabled()) logger.trace("Enter checkAndDeleteRow(" + new String(rowID) + ", "
+			     + new String(columnToCheck) + ", " + new String(colValToCheck) + ", " + timestamp + ") " + tableName);
+
+			Delete del;
+			if (timestamp == -1)
+				del = new Delete(rowID);
+			else
+				del = new Delete(rowID, timestamp);
+
+			byte[] family = null;
+			byte[] qualifier = null;
+
+			if (columnToCheck.length > 0) {
+				family = getFamily(columnToCheck);
+				qualifier = getName(columnToCheck);
+			}
+			
+			boolean res;
+			if (useTRex && (transID != 0)) {
+			    res = table.checkAndDelete(transID, rowID, family, qualifier, colValToCheck, del);
+			} else {
+			    res = table.checkAndDelete(rowID, family, qualifier, colValToCheck, del);
+			}
+
+			if (res == false)
+			    return false;
+		return true;
+	}
+
+	public boolean putRow(final long transID, final byte[] rowID, Object row,
+		byte[] columnToCheck, final byte[] colValToCheck,
+		final boolean checkAndPut, boolean asyncOperation) throws IOException, InterruptedException, 
+                          ExecutionException 
+	{
+		if (logger.isTraceEnabled()) logger.trace("Enter putRow() " + tableName);
+
+	 	final Put put;
+		ByteBuffer bb;
+		short numCols;
+		short colNameLen;
+                int colValueLen;
+		byte[] family = null;
+		byte[] qualifier = null;
+		byte[] colName, colValue;
+
+		bb = (ByteBuffer)row;
+		put = new Put(rowID);
+		numCols = bb.getShort();
+		for (short colIndex = 0; colIndex < numCols; colIndex++)
+		{
+			colNameLen = bb.getShort();
+			colName = new byte[colNameLen];
+			bb.get(colName, 0, colNameLen);
+			colValueLen = bb.getInt();	
+			colValue = new byte[colValueLen];
+			bb.get(colValue, 0, colValueLen);
+			put.add(getFamily(colName), getName(colName), colValue); 
+			if (checkAndPut && colIndex == 0) {
+				family = getFamily(colName);
+				qualifier = getName(colName);
+			} 
+		}
+		if (columnToCheck != null && columnToCheck.length > 0) {
+			family = getFamily(columnToCheck);
+			qualifier = getName(columnToCheck);
+		}
+		final byte[] family1 = family;
+		final byte[] qualifier1 = qualifier;
+		if (asyncOperation) {
+			future = executorService.submit(new Callable() {
+				public Object call() throws Exception {
+					boolean res = true;
+
+					if (checkAndPut) {
+		    				if (useTRex && (transID != 0)) 
+							res = table.checkAndPut(transID, rowID, 
+								family1, qualifier1, colValToCheck, put);
+		    				else 
+							res = table.checkAndPut(rowID, 
+								family1, qualifier1, colValToCheck, put);
+					}
+					else {
+		    				if (useTRex && (transID != 0)) 
+							table.put(transID, put);
+		    				else 
+							table.put(put);
+					}
+					return new Boolean(res);
+				}
+			});
+			return true;
+		} else {
+		 	boolean result = true;
+			if (checkAndPut) {
+		    		if (useTRex && (transID != 0)) 
+					result = table.checkAndPut(transID, rowID, 
+						family1, qualifier1, colValToCheck, put);
+		   		else 
+					result = table.checkAndPut(rowID, 
+						family1, qualifier1, colValToCheck, put);
+			}
+			else {
+		    		if (useTRex && (transID != 0)) 
+					table.put(transID, put);
+		    		else 
+					table.put(put);
+			}
+			return result;
+		}	
+	}
+
+	public boolean insertRow(long transID, byte[] rowID, 
+                         Object row, 
+			 long timestamp,
+                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
+		return putRow(transID, rowID, row, null, null, 
+				false, asyncOperation);
+	}
+
+	public boolean putRows(final long transID, short rowIDLen, Object rowIDs, 
+                       Object rows,
+                       long timestamp, boolean autoFlush, boolean asyncOperation)
+			throws IOException, InterruptedException, ExecutionException  {
+
+		if (logger.isTraceEnabled()) logger.trace("Enter putRows() " + tableName);
+
+		Put put;
+		ByteBuffer bbRows, bbRowIDs;
+		short numCols, numRows;
+		short colNameLen;
+                int colValueLen;
+		byte[] colName, colValue, rowID;
+		byte rowIDSuffix;
+                short actRowIDLen;
+		bbRowIDs = (ByteBuffer)rowIDs;
+		bbRows = (ByteBuffer)rows;
+
+		final List<Put> listOfPuts = new ArrayList<Put>();
+		numRows = bbRowIDs.getShort();
+		
+		for (short rowNum = 0; rowNum < numRows; rowNum++) {
+                        rowIDSuffix  = bbRowIDs.get();
+                        if (rowIDSuffix == '1')
+		           actRowIDLen = (short)(rowIDLen+1);
+                        else
+                           actRowIDLen = rowIDLen; 	
+			rowID = new byte[actRowIDLen];
+			bbRowIDs.get(rowID, 0, actRowIDLen);
+			put = new Put(rowID);
+			numCols = bbRows.getShort();
+			for (short colIndex = 0; colIndex < numCols; colIndex++)
+			{
+				colNameLen = bbRows.getShort();
+				colName = new byte[colNameLen];
+				bbRows.get(colName, 0, colNameLen);
+				colValueLen = bbRows.getInt();	
+				colValue = new byte[colValueLen];
+				bbRows.get(colValue, 0, colValueLen);
+				put.add(getFamily(colName), getName(colName), colValue); 
+			}
+			if (writeToWAL)  
+				put.setWriteToWAL(writeToWAL);
+			listOfPuts.add(put);
+		}
+		if (autoFlush == false)
+			table.setAutoFlush(false, true);
+		if (asyncOperation) {
+			future = executorService.submit(new Callable() {
+				public Object call() throws Exception {
+					boolean res = true;
+					if (useTRex && (transID != 0)) 
+						table.put(transID, listOfPuts);
+					else 
+						table.put(listOfPuts);
+					return new Boolean(res);
+				}
+			});
+		}
+		else {
+			if (useTRex && (transID != 0)) 
+				table.put(transID, listOfPuts);
+			else 
+				table.put(listOfPuts);
+		}
+		return true;
+	} 
+
+	public boolean completeAsyncOperation(int timeout, boolean resultArray[]) 
+			throws InterruptedException, ExecutionException
+	{
+		if (timeout == -1) {
+			if (! future.isDone()) 
+				return false;
+		}
+	 	try {			
+			Boolean result = (Boolean)future.get(timeout, TimeUnit.MILLISECONDS);
+                        // Need to enhance to return the result 
+                        // for each Put object
+			for (int i = 0; i < resultArray.length; i++)
+			    resultArray[i] = result.booleanValue();
+			future = null;
+ 		} catch(TimeoutException te) {
+			return false;
+		} 
+		return true;
+	}
+
+	public boolean checkAndInsertRow(long transID, byte[] rowID, 
+                         Object row, 
+			 long timestamp,
+                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException  {
+		return putRow(transID, rowID, row, null, null, 
+				true, asyncOperation);
+	}
+
+	public boolean checkAndUpdateRow(long transID, byte[] rowID, 
+             Object columns, byte[] columnToCheck, byte[] colValToCheck,
+             long timestamp, boolean asyncOperation) throws IOException, InterruptedException, 
+                                    ExecutionException, Throwable  {
+		return putRow(transID, rowID, columns, columnToCheck, 
+			colValToCheck, 
+				true, asyncOperation);
+	}
+
+        public byte[] coProcAggr(long transID, int aggrType, 
+		byte[] startRowID, 
+              byte[] stopRowID, byte[] colFamily, byte[] colName, 
+              boolean cacheBlocks, int numCacheRows) 
+                          throws IOException, Throwable {
+
+		    Configuration customConf = table.getConfiguration();
+                    long rowCount = 0;
+
+                    if (transID > 0) {
+		      TransactionalAggregationClient aggregationClient = 
+                          new TransactionalAggregationClient(customConf);
+		      Scan scan = new Scan();
+		      scan.addFamily(colFamily);
+		      scan.setCacheBlocks(false);
+		      final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
+			new LongColumnInterpreter();
+		      byte[] tname = getTableName().getBytes();
+		      rowCount = aggregationClient.rowCount(transID, 
+                        org.apache.hadoop.hbase.TableName.valueOf(getTableName()),
+                        ci,
+                        scan);
+                    }
+                    else {
+		      AggregationClient aggregationClient = 
+                          new AggregationClient(customConf);
+		      Scan scan = new Scan();
+		      scan.addFamily(colFamily);
+		      scan.setCacheBlocks(false);
+		      final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
+			new LongColumnInterpreter();
+		      byte[] tname = getTableName().getBytes();
+		      rowCount = aggregationClient.rowCount( 
+                        org.apache.hadoop.hbase.TableName.valueOf(getTableName()),
+                        ci,
+                        scan);
+                    }
+
+		    coprocAggrResult = new ByteArrayList();
+
+		    byte[] rcBytes = 
+                      ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(rowCount).array();
+                    return rcBytes; 
+	}
+
+	public boolean flush() throws IOException {
+		if (table != null)
+			table.flushCommits();
+		return true;
+	}
+
+	public boolean release(boolean cleanJniObject) throws IOException {
+
+           boolean retcode = false;
+          // Complete the pending IO
+           if (future != null) {
+              try {
+                 future.get(30, TimeUnit.SECONDS);
+              } catch(TimeoutException e) {
+		  logger.error("Asynchronous Thread is Cancelled (timeout), " + e);
+                  retcode = true;
+                  future.cancel(true); // Interrupt the thread
+              } catch(InterruptedException e) {
+		  logger.error("Asynchronous Thread is Cancelled (interrupt), " + e);
+                  retcode = true;
+                  future.cancel(true); // Interrupt the thread
+              } catch (ExecutionException ee)
+              {
+              }
+              future = null;
+          }
+	  if (table != null)
+	    table.flushCommits();
+	  if (scanner != null) {
+	    scanner.close();
+	    scanner = null;
+	  }
+	  if (snapHelper !=null)
+	  {
+	    snapHelper.release();
+	    snapHelper = null;
+	  }
+	  cleanScan();		
+	  getResultSet = null;
+	  if (cleanJniObject) {
+	    if (jniObject != 0)
+	      cleanup(jniObject);
+            tableName = null;
+	  }
+          scanHelper = null;
+	  jniObject = 0;
+	  return retcode;
+	}
+
+	public boolean close(boolean clearRegionCache, boolean cleanJniObject) throws IOException {
+           if (logger.isTraceEnabled()) logger.trace("Enter close() " + tableName);
+           if (table != null) 
+           {
+              if (clearRegionCache)
+              {
+                 HConnection connection = table.getConnection();
+                 connection.clearRegionCache(tableName.getBytes());
+              }
+              table.close();
+              table = null;
+           }
+           return true;
+	}
+
+	public ByteArrayList getEndKeys() throws IOException {
+	    if (logger.isTraceEnabled()) logger.trace("Enter getEndKeys() " + tableName);
+            ByteArrayList result = new ByteArrayList();
+            if (table == null) {
+                return null;
+            }
+            byte[][] htableResult = table.getEndKeys();
+
+            // transfer the HTable result to ByteArrayList
+            for (int i=0; i<htableResult.length; i++ ) {
+                if (logger.isTraceEnabled()) logger.trace("Inside getEndKeys(), result[i]: " + 
+                             htableResult[i]);
+                if (logger.isTraceEnabled()) logger.trace("Inside getEndKeys(), result[i]: " + 
+                             new String(htableResult[i]));
+                result.add(htableResult[i]);
+            }
+
+            if (logger.isTraceEnabled()) logger.trace("Exit getEndKeys(), result size: " + result.getSize());
+            return result;
+	}
+
+    public ByteArrayList getStartKeys() throws IOException {
+        if (logger.isTraceEnabled()) logger.trace("Enter getStartKeys() " + tableName);
+        ByteArrayList result = new ByteArrayList();
+        if (table == null) {
+            return null;
+        }
+        byte[][] htableResult = table.getStartKeys();
+
+        // transfer the HTable result to ByteArrayList
+        for (int i=0; i<htableResult.length; i++ ) {
+            if (logger.isTraceEnabled()) logger.trace("Inside getStartKeys(), result[i]: " + 
+                         htableResult[i]);
+            if (logger.isTraceEnabled()) logger.trace("Inside getStartKeys(), result[i]: " + 
+                         new String(htableResult[i]));
+            result.add(htableResult[i]);
+        }
+
+        if (logger.isTraceEnabled()) logger.trace("Exit getStartKeys(), result size: " + result.getSize());
+        return result;
+    }
+
+    private void cleanScan()
+    {
+        if (fetchType == GET_ROW || fetchType == BATCH_GET)
+           return;
+        numRowsCached = 1;
+        numColsInScan = 0;
+        kvValLen = null;
+        kvValOffset = null;
+        kvQualLen = null;
+        kvQualOffset = null;
+        kvFamLen = null;
+        kvFamOffset = null;
+        kvTimestamp = null;
+        kvBuffer = null;
+        rowIDs = null;
+        kvsPerRow = null;
+    }
+
+    protected void setJniObject(long inJniObject) {
+       jniObject = inJniObject;
+    }    
+
+    private native int setResultInfo(long jniObject,
+				int[] kvValLen, int[] kvValOffset,
+				int[] kvQualLen, int[] kvQualOffset,
+				int[] kvFamLen, int[] kvFamOffset,
+  				long[] timestamp, 
+				byte[][] kvBuffer, byte[][] rowIDs,
+				int[] kvsPerRow, int numCellsReturned,
+				int rowsReturned);
+
+   private native void cleanup(long jniObject);
+
+   protected native int setJavaObject(long jniObject);
+ 
+   static {
+     executorService = Executors.newCachedThreadPool();
+     System.loadLibrary("executor");
+   }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/HiveClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HiveClient.java b/core/sql/src/main/java/org/trafodion/sql/HiveClient.java
new file mode 100644
index 0000000..3ab6ef8
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/HiveClient.java
@@ -0,0 +1,301 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.lang.reflect.Field;
+
+import org.apache.log4j.PropertyConfigurator;
+import org.apache.log4j.Logger;
+import org.apache.thrift.TException;
+
+import org.apache.hadoop.util.StringUtils;
+
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+// These are needed for the DDL_TIME constant. This class is different in Hive 0.10.
+// We use Java reflection instead of importing the class statically. 
+// For Hive 0.9 or lower
+// import org.apache.hadoop.hive.metastore.api.Constants;
+// For Hive 0.10 or higher
+// import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
+
+import java.sql.SQLException;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.DriverManager;
+
+
+public class HiveClient {
+    static Logger logger = Logger.getLogger(HiveClient.class.getName());
+    static String ddlTimeConst = null;
+    String lastError;
+    HiveConf hiveConf = null;
+    HiveMetaStoreClient hmsClient  ;
+    FSDataOutputStream fsOut = null;
+
+    public HiveClient() {
+   
+    }
+
+    public String getLastError() {
+        return lastError;
+    }
+
+    void setLastError(String err) {
+        lastError = err;
+    }
+
+    void setupLog4j() {
+        String confFile = System.getenv("MY_SQROOT")
+            + "/conf/log4j.hdfs.config";
+        PropertyConfigurator.configure(confFile);
+    }
+
+    public boolean init(String metastoreURI) 
+              throws MetaException {
+         setupLog4j();
+         if (logger.isDebugEnabled()) logger.debug("HiveClient.init(" + metastoreURI + " " + ") called.");
+         ddlTimeConst = getDDLTimeConstant();
+         hiveConf = new HiveConf();
+	 if (metastoreURI.length() > 0) {
+             hiveConf.set("hive.metastore.local", "false");
+             hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreURI);
+         }
+         hmsClient = new HiveMetaStoreClient(hiveConf, null);
+         return true;
+    }
+
+    public boolean close() {
+        hmsClient.close();
+        return true;
+    }
+
+    public boolean exists(String schName, String tblName)  
+        throws MetaException, TException, UnknownDBException {
+            if (logger.isDebugEnabled()) logger.debug("HiveClient.exists(" + schName + " , " + tblName + ") called.");
+            boolean result = hmsClient.tableExists(schName, tblName);
+            return result;
+    }
+
+    public String getHiveTableString(String schName, String tblName)
+        throws MetaException, TException {
+        Table table;
+        if (logger.isDebugEnabled()) logger.debug("HiveClient.getHiveTableString(" + schName + " , " + 
+                     tblName + ") called.");
+        try {
+            table = hmsClient.getTable(schName, tblName);
+        }
+        catch (NoSuchObjectException x) {
+            if (logger.isDebugEnabled()) logger.debug("HiveTable not found");
+            return new String("");
+        }
+        if (logger.isDebugEnabled()) logger.debug("HiveTable is " + table.toString());
+        return table.toString() ;
+    }
+
+    public long getRedefTime(String schName, String tblName)
+        throws MetaException, TException, ClassCastException, NullPointerException, NumberFormatException {
+        Table table;
+        if (logger.isDebugEnabled()) logger.debug("HiveClient.getRedefTime(" + schName + " , " + 
+                     tblName + ") called.");
+        try {
+            table = hmsClient.getTable(schName, tblName);
+            if (logger.isDebugEnabled()) logger.debug("getTable returns null for " + schName + "." + tblName + ".");
+            if (table == null)
+                return 0;
+        }
+        catch (NoSuchObjectException x) {
+            if (logger.isDebugEnabled()) logger.debug("Hive table no longer exists.");
+            return 0;
+        }
+
+        long redefTime = table.getCreateTime();
+        if (table.getParameters() != null){
+            // those would be used without reflection
+            //String rfTime = table.getParameters().get(Constants.DDL_TIME);
+            //String rfTime = table.getParameters().get(hive_metastoreConstants.DDL_TIME);
+            // determing the constant using reflection instead
+            String rfTime = table.getParameters().get(ddlTimeConst);
+            if (rfTime != null)
+                redefTime = Long.parseLong(rfTime);
+        }
+        if (logger.isDebugEnabled()) logger.debug("RedefTime is " + redefTime);
+        return redefTime ;
+    }
+
+    public Object[] getAllSchemas() throws MetaException {
+        List<String> schemaList = (hmsClient.getAllDatabases());
+        if (schemaList != null)
+           return schemaList.toArray();
+        else
+           return null; 
+    }
+
+    public Object[] getAllTables(String schName) 
+        throws MetaException {
+        List<String> tableList = hmsClient.getAllTables(schName);
+        if (tableList != null)
+           return tableList.toArray();
+        else
+           return null;
+    }
+
+    // Because Hive changed the name of the class containing internal constants changed
+    // in Hive 0.10, we are using Java Reflection to get the value of the DDL_TIME constant.
+    public static String getDDLTimeConstant()
+        throws MetaException {
+
+        Class constsClass = null;
+        Object constsFromReflection = null; 
+        Field ddlTimeField = null;
+        Object fieldVal = null;
+
+        // Using the class loader, try to load either class by name.
+        // Note that both classes have a default constructor and both have a static
+        // String field DDL_TIME, so the rest of the code is the same for both.
+        try { 
+            try {
+                constsClass = Class.forName(
+                   // Name in Hive 0.10 and higher
+                   "org.apache.hadoop.hive.metastore.api.hive_metastoreConstants");
+            } catch (ClassNotFoundException e) { 
+                // probably not found because we are using Hive 0.10 or later
+                constsClass = null;
+            } 
+            if (constsClass == null) {
+                constsClass = Class.forName(
+                    // Name in Hive 0.9 and lower
+                    "org.apache.hadoop.hive.metastore.api.Constants");
+            }
+
+            // Make a new object for this class, using the default constructor
+            constsFromReflection = constsClass.newInstance(); 
+        } catch (InstantiationException e) { 
+            throw new MetaException("Instantiation error for metastore constants class");
+        } catch (IllegalAccessException e) { 
+            throw new MetaException("Illegal access exception");
+        } catch (ClassNotFoundException e) { 
+            throw new MetaException("Could not find Hive Metastore constants class");
+        } 
+
+        // Using Java reflection, get a reference to the DDL_TIME field
+        try {
+            ddlTimeField = constsClass.getField("DDL_TIME");
+        } catch (NoSuchFieldException e) {
+            throw new MetaException("Could not find DDL_TIME constant field");
+        }
+
+        // get the String object that represents the value of this field
+        try {
+            fieldVal = ddlTimeField.get(constsFromReflection);
+        } catch (IllegalAccessException e) {
+            throw new MetaException("Could not get value for DDL_TIME constant field");
+        }
+
+        return fieldVal.toString();
+    }
+
+  ///////////////////   
+  boolean hdfsCreateFile(String fname) throws IOException
+  {
+    HiveConf  config = new HiveConf();
+    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsCreateFile() - started" );
+    Path filePath = new Path(fname);
+    FileSystem fs = FileSystem.get(filePath.toUri(),config);
+    fsOut = fs.create(filePath, true);
+    
+    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsCreateFile() - file created" );
+
+    return true;
+  }
+  
+  boolean hdfsWrite(byte[] buff, long len) throws Exception
+  {
+
+    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() - started" );
+    try
+    {
+      fsOut.write(buff);
+      fsOut.flush();
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() -- exception: " + e);
+      throw e;
+    }
+    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsWrite() - bytes written and flushed:" + len  );
+    
+    return true;
+  }
+  
+  boolean hdfsClose() throws IOException
+  {
+    if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsClose() - started" );
+    try
+    {
+      fsOut.close();
+    }
+    catch (IOException e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("HiveClient.hdfsClose() - exception:" + e);
+      throw e;
+    }
+    return true;
+  }
+  
+  public void executeHiveSQL(String ddl) throws ClassNotFoundException, SQLException
+  {
+      try
+      {
+          Class.forName("org.apache.hive.jdbc.HiveDriver");
+      }
+ 
+      catch(ClassNotFoundException e) 
+      {
+          throw e;
+      }
+
+      try 
+      {
+          Connection con = DriverManager.getConnection("jdbc:hive2://", "hive", "");
+          Statement stmt = con.createStatement();
+          stmt.execute(ddl);
+      }
+ 
+      catch(SQLException e)
+      {
+	  throw e;
+      }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/OrcFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/OrcFileReader.java b/core/sql/src/main/java/org/trafodion/sql/OrcFileReader.java
new file mode 100644
index 0000000..a7cb22e
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/OrcFileReader.java
@@ -0,0 +1,500 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.util.*;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+import org.apache.hadoop.hive.serde2.objectinspector.*;
+import org.apache.hadoop.hive.ql.io.orc.*;
+
+public class OrcFileReader
+{
+
+    Configuration               m_conf;
+    Path                        m_file_path;
+    
+    Reader                      m_reader;
+    List<OrcProto.Type>         m_types;
+    StructObjectInspector       m_oi;
+    List<? extends StructField> m_fields;
+    RecordReader                m_rr;
+    String                      lastError = null;
+    Reader.Options		m_options;
+
+public class OrcRowReturnSQL
+{
+		int m_row_length;
+		int m_column_count;
+		long m_row_number;
+		byte[] m_row_ba = new byte[4096];
+}
+
+    OrcRowReturnSQL		rowData;	//TEMP!!
+
+
+    OrcFileReader() {
+	m_conf = new Configuration();
+	rowData = new OrcRowReturnSQL();	//TEMP: was in fetch
+    }
+
+//********************************************************************************
+
+//  ORIGINAL VERSION BEFORE ADDING SUPPORT FOR COLUMN SELECTION
+    public String open(String pv_file_name) throws IOException {
+//    pv_file_name= pv_file_name + "/000000_0";
+
+	m_file_path = new Path(pv_file_name);
+
+		try{
+				m_reader = OrcFile.createReader(m_file_path, OrcFile.readerOptions(m_conf));
+		} catch (java.io.FileNotFoundException e1) {
+						return "file not found";
+		}
+	if (m_reader == null)
+			return "open failed!";
+	m_types = m_reader.getTypes();
+	m_oi = (StructObjectInspector) m_reader.getObjectInspector();
+	m_fields = m_oi.getAllStructFieldRefs();
+	
+	try{
+			m_rr = m_reader.rows();
+	} catch (java.io.IOException e1) {
+					return (e1.getMessage());
+	}
+	
+	if (m_rr == null)
+			return "open:RecordReader is null";
+	return null;
+    }
+
+//********************************************************************************
+/*
+    public String open(String pv_file_name) throws Exception {
+//    pv_file_name= pv_file_name + "/000000_0";
+	m_file_path = new Path(pv_file_name);
+
+		try{
+				m_reader = OrcFile.createReader(m_file_path, OrcFile.readerOptions(m_conf));
+		} catch (java.io.FileNotFoundException e1) {
+						return "file not found";
+		}
+	if (m_reader == null)
+			return "open failed!";
+	m_types = m_reader.getTypes();
+	m_oi = (StructObjectInspector) m_reader.getObjectInspector();
+	m_fields = m_oi.getAllStructFieldRefs();
+	
+//	m_rr = m_reader.rows();		//RESTORE THIS as working code!
+//						boolean[] includes = new boolean[29];
+  						boolean[] includes = new boolean[] 					{true,true,false,false,false,false,false,false,false,false,false,false,
+  											false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true};
+  						 m_options = new Reader.Options();
+//  						my_options.include(includes);
+//  						System.out.println("Array size: " + includes.length);
+ 					m_rr = m_reader.rowsOptions(m_options.include(includes));
+// 					m_rr = m_reader.rowsOptions(m_options.include(new boolean[] {false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false}));
+//{true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true}));
+
+	return null;
+    }
+*/
+//********************************************************************************
+    
+	public String close()
+	{
+				m_reader = null;
+				m_rr = null; 
+				m_file_path = null;            
+    return null;
+	}
+
+
+    public void printFileInfo() throws Exception {
+
+	System.out.println("Reader: " + m_reader);
+
+
+	System.out.println("# Rows: " + m_reader.getNumberOfRows());
+	System.out.println("# Types in the file: " + m_types.size());
+	for (int i=0; i < m_types.size(); i++) {
+	    System.out.println("Type " + i + ": " + m_types.get(i).getKind());
+	}
+
+	System.out.println("Compression: " + m_reader.getCompression());
+	if (m_reader.getCompression() != CompressionKind.NONE) {
+	    System.out.println("Compression size: " + m_reader.getCompressionSize());
+	}
+
+	m_oi = (StructObjectInspector) m_reader.getObjectInspector();
+	
+	System.out.println("object inspector type category: " + m_oi.getCategory());
+	System.out.println("object inspector type name    : " + m_oi.getTypeName());
+
+	System.out.println("Number of columns in the table: " + m_fields.size());
+
+	// Print the type info:
+	for (int i = 0; i < m_fields.size(); i++) {
+	    System.out.println("Column " + i + " name: " + m_fields.get(i).getFieldName());
+	    ObjectInspector lv_foi = m_fields.get(i).getFieldObjectInspector();
+	    System.out.println("Column " + i + " type category: " + lv_foi.getCategory());
+	    System.out.println("Column " + i + " type name: " + lv_foi.getTypeName());
+	}
+
+    }
+
+    public boolean seekToRow(long pv_rowNumber) throws IOException {
+
+	if (m_reader == null) {
+	    return false;
+	}
+
+	if ((pv_rowNumber < 0) ||
+	    (pv_rowNumber >= m_reader.getNumberOfRows())) {
+	    return false;
+	}
+
+	m_rr.seekToRow(pv_rowNumber);
+
+	return true;
+    }
+
+    public String seeknSync(long pv_rowNumber) throws IOException {
+	if (m_reader == null) {
+	    return "Looks like a file has not been opened. Call open() first.";
+	}
+
+	if ((pv_rowNumber < 0) ||
+	    (pv_rowNumber >= m_reader.getNumberOfRows())) {
+	    return "Invalid rownumber: " + pv_rowNumber + " provided.";
+	}
+
+	m_rr.seekToRow(pv_rowNumber);
+
+	return null;
+    }
+
+    public long getNumberOfRows() throws IOException {
+
+	return m_reader.getNumberOfRows();
+
+    }
+
+    public long getPosition() throws IOException {
+
+	return m_rr.getRowNumber();
+
+    }
+
+    // Dumps the content of the file. The columns are '|' separated.
+    public void readFile_String() throws Exception {
+
+	seeknSync(0);
+	OrcStruct lv_row = null;
+	Object lv_field_val = null;
+   	StringBuilder lv_row_string = new StringBuilder(1024);
+	while (m_rr.hasNext()) {
+	    lv_row = (OrcStruct) m_rr.next(lv_row);
+	    lv_row_string.setLength(0);
+	    for (int i = 0; i < m_fields.size(); i++) {
+		lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+		if (lv_field_val != null) {
+		    lv_row_string.append(lv_field_val);
+		}
+		lv_row_string.append('|');
+	    }
+	    System.out.println(lv_row_string);
+	}
+
+    }
+
+
+    // Dumps the contents of the file as ByteBuffer.
+    public void readFile_ByteBuffer() throws Exception {
+
+	OrcStruct lv_row = null;
+	Object lv_field_val = null;
+   	ByteBuffer lv_row_buffer;
+
+	seeknSync(0);
+	while (m_rr.hasNext()) {
+	    byte[] lv_row_ba = new byte[4096];
+	    lv_row_buffer = ByteBuffer.wrap(lv_row_ba);
+	    lv_row = (OrcStruct) m_rr.next(lv_row);
+	    for (int i = 0; i < m_fields.size(); i++) {
+		lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+		if (lv_field_val == null) {
+		    lv_row_buffer.putInt(0);
+		    continue;
+		}
+		String lv_field_val_str = lv_field_val.toString();
+		lv_row_buffer.putInt(lv_field_val_str.length());
+		if (lv_field_val != null) {
+		    lv_row_buffer.put(lv_field_val_str.getBytes());
+		}
+	    }
+	    System.out.println(lv_row_buffer);
+	    //	    System.out.println(new String(lv_row_buffer.array()));
+	}
+    }
+
+    public String getNext_String(char pv_ColSeparator) throws Exception {
+
+	if ( ! m_rr.hasNext()) {
+	    return null;
+	}
+
+	OrcStruct lv_row = null;
+	Object lv_field_val = null;
+   	StringBuilder lv_row_string = new StringBuilder(1024);
+
+	lv_row = (OrcStruct) m_rr.next(lv_row);
+	for (int i = 0; i < m_fields.size(); i++) {
+	    lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+	    if (lv_field_val != null) {
+		lv_row_string.append(lv_field_val);
+	    }
+	    lv_row_string.append(pv_ColSeparator);
+	}
+	
+	return lv_row_string.toString();
+    }
+
+    // returns the next row as a byte array
+    public byte[] fetchNextRow() throws Exception {
+
+	if ( ! m_rr.hasNext()) {
+	    return null;
+	}
+
+//	OrcStruct lv_row = (OrcStruct) m_rr.next(null);
+ OrcStruct lv_row = (OrcStruct) m_rr.next(null);
+	Object lv_field_val = null;
+   	ByteBuffer lv_row_buffer;
+
+	byte[] lv_row_ba = new byte[4096];
+	lv_row_buffer = ByteBuffer.wrap(lv_row_ba);
+	for (int i = 0; i < m_fields.size(); i++) {
+	    lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+	    if (lv_field_val == null) {
+  		lv_row_buffer.putInt(0);
+		continue;
+	    }
+	    String lv_field_val_str = lv_field_val.toString();
+	    lv_row_buffer.putInt(lv_field_val_str.length());
+	    if (lv_field_val != null) {
+		lv_row_buffer.put(lv_field_val_str.getBytes());
+	    }
+	}
+	return lv_row_buffer.array();
+    }
+    
+    
+//****************************************************************************
+	
+//THIS IS THE ORIGINAL FORM BEFORE ADDING SUPPORT FOR COLUMN SELECTION !!!!
+public OrcRowReturnSQL fetchNextRowObj() throws Exception
+{
+//		int	lv_integerLength = Integer.Bytes;
+		int	lv_integerLength = 4;
+//		OrcRowReturnSQL rowData = new OrcRowReturnSQL();
+	 
+	 	if ( ! m_rr.hasNext()) {
+	    return null;
+	}
+
+	OrcStruct lv_row = (OrcStruct) m_rr.next(null);
+	Object lv_field_val = null;
+   	ByteBuffer lv_row_buffer;
+
+//	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
+	lv_row_buffer = ByteBuffer.wrap(rowData.m_row_ba);
+	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
+	
+	rowData.m_row_length = 0;
+	rowData.m_column_count = m_fields.size();
+	rowData.m_row_number = m_rr.getRowNumber();
+	
+	for (int i = 0; i < m_fields.size(); i++) {
+	    lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+	    if (lv_field_val == null) {
+  		lv_row_buffer.putInt(0);
+  		rowData.m_row_length = rowData.m_row_length + lv_integerLength;
+		continue;
+	    }
+	    String lv_field_val_str = lv_field_val.toString();
+	    lv_row_buffer.putInt(lv_field_val_str.length());
+  			rowData.m_row_length = rowData.m_row_length + lv_integerLength;
+	    if (lv_field_val != null) {
+		lv_row_buffer.put(lv_field_val_str.getBytes());
+  		rowData.m_row_length = rowData.m_row_length + lv_field_val_str.length();
+	    }
+	}
+    	 
+	 return rowData;
+	
+}
+
+//****************************************************************************
+/*
+public OrcRowReturnSQL fetchNextRowObj() throws Exception
+{
+//		int	lv_integerLength = Integer.Bytes;
+		int	lv_integerLength = 4;
+		boolean[]	lv_include;
+		
+		OrcRowReturnSQL rowData = new OrcRowReturnSQL();
+	 
+	 	if ( ! m_rr.hasNext()) {
+	    return null;
+	}
+
+	OrcStruct lv_row = (OrcStruct) m_rr.next(null);
+	Object lv_field_val = null;
+   	ByteBuffer lv_row_buffer;
+
+//	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
+	lv_row_buffer = ByteBuffer.wrap(rowData.m_row_ba);
+	lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
+//	rowData.m_column_count = m_fields.size();
+	rowData.m_column_count = 0;;
+	rowData.m_row_number = m_rr.getRowNumber();
+	lv_include = m_options.getInclude();
+	
+	for (int i = 0; i < m_fields.size(); i++) {
+					if (lv_include[i+1] == false) continue;
+	    lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+	    if (lv_field_val == null) {
+  				lv_row_buffer.putInt(0);
+  				rowData.m_row_length = rowData.m_row_length + lv_integerLength;
+						rowData.m_column_count++;;
+						continue;
+	    }
+	    String lv_field_val_str = lv_field_val.toString();
+	    lv_row_buffer.putInt(lv_field_val_str.length());
+  			rowData.m_row_length = rowData.m_row_length + lv_integerLength;
+	    if (lv_field_val != null) {
+		lv_row_buffer.put(lv_field_val_str.getBytes());
+  		rowData.m_row_length = rowData.m_row_length + lv_field_val_str.length();
+				rowData.m_column_count++;;
+
+	    }
+	}
+    	 
+	 return rowData;
+	
+}
+*/
+//****************************************************************************
+String getLastError() {
+      return lastError;
+  }
+
+//****************************************************************************
+public boolean isEOF() throws Exception
+{ 
+	if (m_rr.hasNext())
+	{
+	    return false;
+	}
+	else
+			{
+					return true;
+			}
+}  
+//****************************************************************************
+ public String fetchNextRow(char pv_ColSeparator) throws Exception {
+
+	if ( ! m_rr.hasNext()) {
+	    return null;
+	}
+
+	OrcStruct lv_row = null;
+	Object lv_field_val = null;
+   	StringBuilder lv_row_string = new StringBuilder(1024);
+
+	lv_row = (OrcStruct) m_rr.next(lv_row);
+	for (int i = 0; i < m_fields.size(); i++) {
+	    lv_field_val = m_oi.getStructFieldData(lv_row, m_fields.get(i));
+	    if (lv_field_val != null) {
+		lv_row_string.append(lv_field_val);
+	    }
+	    lv_row_string.append(pv_ColSeparator);
+	}
+	
+	return lv_row_string.toString();
+    }
+    
+
+
+    public static void main(String[] args) throws Exception
+    {
+	System.out.println("OrcFile Reader main");
+
+	OrcFileReader lv_this = new OrcFileReader();
+
+	lv_this.open(args[0]);
+
+	lv_this.printFileInfo();
+
+	lv_this.readFile_String();
+
+	lv_this.readFile_ByteBuffer();
+
+	// Gets rows as byte[]  (starts at row# 4)
+	boolean lv_done = false;
+	if (lv_this.seeknSync(4) == null) {
+	    while (! lv_done) {
+		System.out.println("Next row #: " + lv_this.getPosition());
+		byte[] lv_row_bb = lv_this.fetchNextRow();
+		if (lv_row_bb != null) {
+		    System.out.println("First 100 bytes of lv_row_bb: " + new String(lv_row_bb, 0, 100));
+		    System.out.println("Length lv_row_bb: " + lv_row_bb.length);
+		}
+		else {
+		    lv_done = true;
+		}
+	    }
+	}
+
+	// Gets rows as String (starts at row# 10)
+	lv_done = false;
+	String lv_row_string;
+	if (lv_this.seeknSync(10) == null) {
+	    while (! lv_done) {
+		lv_row_string = lv_this.getNext_String('|');
+		if (lv_row_string != null) {
+		    System.out.println(lv_row_string);
+		}
+		else {
+		    lv_done = true;
+		}
+	    }
+	}
+        System.out.println("Shows the change in place");
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ResultIterator.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ResultIterator.java b/core/sql/src/main/java/org/trafodion/sql/ResultIterator.java
new file mode 100644
index 0000000..fccda24
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ResultIterator.java
@@ -0,0 +1,133 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+
+
+public class ResultIterator {
+	ResultScanner   scanner;
+	Result[]        resultSet;
+	Result          row = null;
+	scanFetchStep   step;
+	List<KeyValue>  kvList;
+	int 			listIndex = 0;
+	int             cellIndex;
+	int				numKVs;
+	boolean         isSingleRow = false;
+	
+	private enum scanFetchStep {
+		SCAN_FETCH_NEXT_ROW,
+		SCAN_FETCH_NEXT_COL,
+		SCAN_FETCH_CLOSE
+	} ;
+
+	public ResultIterator(ResultScanner scanner) {
+		this.scanner = scanner;
+		resultSet = null;
+		step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
+	}
+	
+	public ResultIterator(Result[] results) {
+		this.scanner = null;
+		resultSet = results;
+		step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
+	}
+	
+	public ResultIterator(Result result) {
+		this.scanner = null;
+		resultSet = null;
+		row = result;
+		isSingleRow = true;
+		step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
+	}
+	
+	KeyValue nextCell() throws IOException {
+		while (true)
+		{
+			switch (step)
+			{
+				case SCAN_FETCH_NEXT_ROW:
+				{
+				        if (isSingleRow == false) {				        
+        					if (scanner != null)
+        						row = scanner.next();
+        					else {
+        						if (listIndex == resultSet.length) {
+        							step = scanFetchStep.SCAN_FETCH_CLOSE;
+        							break;
+        						}							
+        						row = resultSet[listIndex];
+        						listIndex++;
+        					}
+        				}
+					
+					if (row == null || row.isEmpty()) {
+						step = scanFetchStep.SCAN_FETCH_CLOSE;
+						break;
+					}
+					
+					kvList = row.list();
+					cellIndex = 0;
+					numKVs = kvList.size();
+	
+					step = scanFetchStep.SCAN_FETCH_NEXT_COL;
+				}
+				break;
+	
+				case SCAN_FETCH_NEXT_COL:
+				{
+					KeyValue kv = kvList.get(cellIndex);
+					cellIndex++;
+					if (kv == null) {
+					        if (isSingleRow)
+						        step = scanFetchStep.SCAN_FETCH_CLOSE;
+						else
+						        step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
+						break;
+					}
+	
+					if (cellIndex == numKVs)
+					        if (isSingleRow)
+						        step = scanFetchStep.SCAN_FETCH_CLOSE;
+						else
+						        step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
+	
+					return kv;
+				}
+				
+				case SCAN_FETCH_CLOSE:
+				{
+					return null;
+				}
+	
+			}// switch
+		} // while
+		
+	}
+	
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ResultKeyValueList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ResultKeyValueList.java b/core/sql/src/main/java/org/trafodion/sql/ResultKeyValueList.java
new file mode 100644
index 0000000..6332070
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ResultKeyValueList.java
@@ -0,0 +1,100 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.List;
+import java.io.*;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Result;
+import java.nio.*;
+
+public class ResultKeyValueList {
+	Result result;
+	List<KeyValue> kvList;
+
+	public ResultKeyValueList(Result result) {
+		super();
+		this.result = result;
+		kvList = result.list();
+	}
+
+	byte[] getRowID() {
+	        if (result == null)
+	                return null;
+	        else
+		        return result.getRow();
+	}
+
+	byte[] getAllKeyValues() {
+        if (kvList == null)
+           return null;
+        int numCols = kvList.size();
+        byte[] rowID = result.getRow();
+        int bufSize = rowID.length;
+        bufSize += (64 * numCols);
+        for (int i=0; i<numCols; i++) {
+          bufSize += kvList.get(i).getLength();
+        }
+        ByteBuffer buf = ByteBuffer.allocate(bufSize);
+        buf.order(ByteOrder.LITTLE_ENDIAN);
+        // move in numCols
+        buf.putInt(numCols);
+        // move in rowID length and rowID
+        buf.putInt(rowID.length);
+        buf.put(rowID);;
+        // move in all descriptors
+        for (int i=0; i<numCols; i++) {
+          copyKVs(buf, kvList.get(i));
+        }
+        return buf.array();
+    }
+
+	void copyKVs(ByteBuffer buf, KeyValue kv)
+	{
+	    buf.putInt(kv.getLength());
+        int offset = kv.getOffset();
+		buf.putInt(kv.getValueLength());
+		buf.putInt(kv.getValueOffset() - offset);
+		buf.putInt(kv.getQualifierLength());
+		buf.putInt(kv.getQualifierOffset() - offset);
+		buf.putInt(kv.getFamilyLength());
+		buf.putInt(kv.getFamilyOffset() - offset);
+		buf.putLong(kv.getTimestamp());
+		buf.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
+	}
+
+
+	int getSize() {
+	        if (kvList == null)
+	                return 0;
+	        else
+		        return kvList.size();
+	}
+
+	KeyValue getEntry(int i) {
+	        if (kvList == null)
+	                return null;
+	        else
+		        return kvList.get(i);
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/RowToInsert.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/RowToInsert.java b/core/sql/src/main/java/org/trafodion/sql/RowToInsert.java
new file mode 100644
index 0000000..7bb53c3
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/RowToInsert.java
@@ -0,0 +1,44 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.Vector;
+
+public class RowToInsert extends Vector<RowToInsert.ColToInsert> {
+
+	public class ColToInsert {
+		public byte[] qualName;
+		public byte[] colValue;
+	}
+
+	private static final long serialVersionUID = 5066470006717527862L;
+
+	public void addColumn(byte[] name, byte[] value) {
+		ColToInsert col = new ColToInsert();
+		col.qualName = name;
+		col.colValue = value;
+		add(col);
+	}
+
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/RowsToInsert.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/RowsToInsert.java b/core/sql/src/main/java/org/trafodion/sql/RowsToInsert.java
new file mode 100644
index 0000000..594fe61
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/RowsToInsert.java
@@ -0,0 +1,57 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.Vector;
+
+public class RowsToInsert  extends Vector<RowsToInsert.RowInfo> {
+
+    public class RowInfo {
+	public byte[] rowId;
+	public Vector<RowsToInsert.ColToInsert> columns;
+    }
+
+    public class ColToInsert {
+	public byte[] qualName;
+	public byte[] colValue;
+    }
+
+    private static final long serialVersionUID = 5066470006717527863L;
+
+    public void addRowId(byte[] rowId) {
+	RowInfo rowInfo = new RowInfo();
+	rowInfo.rowId = rowId;
+	rowInfo.columns = new Vector<RowsToInsert.ColToInsert>();
+	rowInfo.columns.clear();
+	add(rowInfo);
+    }
+
+    public void addColumn(byte[] name, byte[] value) {
+	ColToInsert col = new ColToInsert();
+	col.qualName = name;
+	col.colValue = value;
+	if (size() > 0)
+	    get(size()-1).columns.add(col);
+	//	RowInfo.columns.add(col);
+    }
+
+}


[8/9] incubator-trafodion git commit: Merge branch 'master' of github.com:apache/incubator-trafodion into bug/1129

Posted by db...@apache.org.
Merge branch 'master' of github.com:apache/incubator-trafodion into bug/1129

Conflicts:
	core/sqf/sqenvcom.sh


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/d7f1daee
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/d7f1daee
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/d7f1daee

Branch: refs/heads/master
Commit: d7f1daee614b8e4c04bd10dd05033c46a61f2d14
Parents: a44823f 469408f
Author: Hans Zeller <ha...@esgyn.com>
Authored: Thu Oct 1 02:21:40 2015 +0000
Committer: Hans Zeller <ha...@esgyn.com>
Committed: Thu Oct 1 02:21:40 2015 +0000

----------------------------------------------------------------------
 .gitignore                                      |   1 +
 Makefile                                        |  25 ++
 core/Makefile                                   |  45 ++-
 core/conn/Makefile                              |   6 +-
 .../unixcli/package/trafodbclnx_install.sh      |   5 -
 core/macros.gmk                                 |   1 +
 core/rest/Makefile                              |   6 +-
 core/rest/genvers                               |  22 +-
 core/rest/src/saveVersion.sh                    |  11 +-
 core/sqf/Makefile                               |  21 +-
 core/sqf/sqenvcom.sh                            |   1 +
 core/sqf/sql/scripts/dcscheck                   | 108 ++++-
 core/sqf/sql/scripts/install_local_hadoop       | 273 ++-----------
 core/sqf/sql/scripts/install_traf_components    | 401 +++++++++++++++++++
 core/sqf/sql/scripts/sqcheck                    |   2 +-
 core/sqf/sql/scripts/sqconfig                   |  24 --
 core/sql/optimizer/NodeMap.cpp                  |   2 +-
 core/sql/regress/hive/EXPECTED009               |  32 +-
 core/sql/sqlcomp/DefaultConstants.h             |   2 +
 core/sql/sqlcomp/nadefaults.cpp                 |   4 +-
 core/sql/ustat/hs_globals.cpp                   |  30 +-
 core/sql/ustat/hs_globals.h                     |   6 +-
 dcs/.gitignore                                  |   1 +
 dcs/Makefile                                    |  52 +++
 dcs/genvers                                     |  24 ++
 dcs/src/saveVersion.sh                          |  15 +-
 env.sh                                          |  24 ++
 wms/src/saveVersion.sh                          |   7 +-
 28 files changed, 790 insertions(+), 361 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/d7f1daee/core/sqf/sqenvcom.sh
----------------------------------------------------------------------
diff --cc core/sqf/sqenvcom.sh
index 2de3983,e0420cc..a9b2d00
--- a/core/sqf/sqenvcom.sh
+++ b/core/sqf/sqenvcom.sh
@@@ -872,6 -872,8 +872,7 @@@ if [[ -n "$SQ_CLASSPATH"   ]]; then SQ_
  SQ_CLASSPATH=${SQ_CLASSPATH}${HBASE_TRXDIR}:\
  ${HBASE_TRXDIR}/${HBASE_TRX_JAR}:\
  $MY_SQROOT/export/lib/trafodion-sql-${TRAFODION_VER}.jar:\
 -$MY_SQROOT/export/lib/trafodion-HBaseAccess-${TRAFODION_VER}.jar:\
+ $MY_SQROOT/export/lib/jdbcT4.jar:\
  $MY_SQROOT/export/lib/jdbcT2.jar
  
  # Check whether the current shell environment changed from a previous execution of this


[2/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Posted by db...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java b/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java
new file mode 100644
index 0000000..2547af0
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java
@@ -0,0 +1,448 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+/**
+ * 
+ */
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.ReflectionUtils;
+//import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+//import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+//import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+
+
+public class SequenceFileReader {
+
+  Configuration conf = null;           // File system configuration
+  SequenceFile.Reader reader = null;   // The HDFS SequenceFile reader object.
+  Writable key = null;
+  Writable row = null;
+//    LazySimpleSerDe serde = null;
+  boolean isEOF = false;
+  String lastError = null;  
+    
+	/**
+	 * Class Constructor
+	 */
+	SequenceFileReader() {
+    	conf = new Configuration();
+    	conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
+  }
+    
+  String getLastError() {
+      return lastError;
+  }
+    
+	/**
+	 * Initialize the SerDe object. Needed only before calling fetchArrayOfColumns(). 
+	 * @param numColumns The number of columns in the table.
+	 * @param fieldDelim The delimiter between fields.
+	 * @param columns A comma delimited list of column names. 
+	 * @param colTypes A comma delimited list of column types.
+	 * @param nullFormat NULL representation.
+	 */
+//	public void initSerDe(String numColumns, String fieldDelim, String columns, String colTypes, String nullFormat) throws IllegalStateException {
+//		
+//            serde = new LazySimpleSerDe();
+//            Properties tbl = new Properties();
+//            tbl.setProperty("serialization.format", numColumns);
+//            tbl.setProperty("field.delim", fieldDelim);
+//            tbl.setProperty("columns", columns);
+//            tbl.setProperty("columns.types", colTypes);
+//            tbl.setProperty("serialization.null.format", colTypes);
+//            serde.initialize(conf, tbl);
+//	}
+	
+	/**
+	 * Open the SequenceFile for reading.
+	 * @param path The HDFS path to the file.
+	 */
+	public String open(String path) throws IOException {
+
+        Path filename = new Path(path);
+        	
+        reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(filename));
+	
+        key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        row = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        
+        return null;
+            
+	}
+	
+	/**
+	 * Get the current position in the file.
+	 * @return The current position or -1 if error.
+	 */
+	public long getPosition() throws IOException {
+
+    lastError = null;		
+		if (reader == null) {
+			lastError = "open() was not called first.";
+			return -1;
+		}
+		
+        return reader.getPosition();
+	}	
+	
+    /**
+     * Have we reached the end of the file yet?
+     * @return
+     */
+  public boolean isEOF() {
+		return isEOF;
+	}
+
+	/**
+	 * Seek to the specified position in the file, and then to the beginning 
+	 * of the record after the next sync mark.
+	 * @param pos Required file position.
+	 * @return null if OK, or error message.
+	 */
+	public String seeknSync(long pos) throws IOException {
+
+		if (reader == null) {
+			return "open() was not called first.";
+		}
+		
+			reader.sync(pos);
+			return null;
+	}
+	
+	/**
+	 * Fetch the next row as an array of columns.
+	 * @return An array of columns.
+	 */
+//	public String[] fetchArrayOfColumns() throws IllegalStateException {
+//		if (reader == null)
+//			throw new IllegalStateException("open() was not called first.");
+//		if (serde == null)
+//			throw new IllegalStateException("initSerDe() was not called first.");
+//		
+//		ArrayList<String> result = new ArrayList<String>();
+//            boolean theresMore = reader.next(key, row);
+//            if (!theresMore)
+//            	return null;
+//            StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector();
+//            List<? extends StructField> fieldRefs = soi.getAllStructFieldRefs();
+//            Object data = serde.deserialize(row);
+//            
+//            for (StructField fieldRef : fieldRefs) {
+//                ObjectInspector oi = fieldRef.getFieldObjectInspector();
+//                Object obj = soi.getStructFieldData(data, fieldRef);
+//                Object column = convertLazyToJava(obj, oi);
+//                if (column == null)
+//                	result.add(null);
+//                else
+//                	result.add(column.toString());
+//              }
+//    		String[] resultArray = new String[result.size()];
+//    		result.toArray(resultArray);
+//    		return resultArray;
+//	}
+	
+	/**
+	 * Fetch the next row as a single String, that still needs to be parsed.
+	 * @return The next row.
+	 */
+	public String fetchNextRow() throws IOException {
+
+    lastError = null;		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+		
+			boolean result = reader.next(key, row);
+			if (result)	{
+				return row.toString();
+			}
+			else {				
+				return null;
+			}
+	}
+	
+	/**
+	 * @param minSize Minimum size of the result. If the file is compressed, 
+	 * the result may be much larger. The reading starts at the current 
+	 * position in the file, and stops once the limit has been reached.
+	 * @return An array of result rows.
+	 * @throws IllegalStateException
+	 */
+	public String[] fetchArrayOfRows(int minSize) throws IOException {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+		
+		ArrayList<String> result = new ArrayList<String>();
+		long initialPos = getPosition();
+		boolean stop = false;
+		do {
+			String newRow = fetchNextRow();
+			
+			if (newRow==null && lastError!=null)
+			  return null;
+			  
+			boolean reachedEOF = (newRow == null || newRow == "");
+			if (!reachedEOF)
+				result.add(newRow);
+			
+			long bytesRead = getPosition() - initialPos;
+			stop = reachedEOF || (bytesRead > minSize);
+		} while (!stop);
+		
+		String[] resultArray = new String[result.size()];
+		result.toArray(resultArray);
+		return resultArray;
+	}
+	
+	/**
+	 * Read a block of data from the file and return it as an array of rows.
+	 * First sync to startOffset, and skip the first row, then keep reading
+	 * Until passing stopOffset and passing the next Sync marker.
+	 * @param startOffset
+	 * @param stopOffset
+	 * @return
+	 * @throws IllegalStateException
+	 * @throws IOException
+	 */
+	public String[] fetchArrayOfRows(int startOffset, int stopOffset)
+                  throws IOException  {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+		
+		seeknSync(startOffset);
+		
+		ArrayList<String> result = new ArrayList<String>();
+		boolean stop = false;
+		do {
+			long startingPosition = getPosition();
+			String newRow = fetchNextRow();
+
+			if (newRow==null && lastError!=null)
+			  return null;
+			  
+			boolean reachedEOF = (newRow == null || newRow == "");
+			
+			boolean reachedSize = (startingPosition > stopOffset);
+			boolean lastSyncSeen = (reachedSize && reader.syncSeen());
+			// Stop reading if there is no more data, or if we have read 
+			// enough bytes and have seen the Sync mark.
+			stop = reachedEOF || (reachedSize && lastSyncSeen);
+			
+			if (!stop)
+				result.add(newRow);
+			
+		} while (!stop);
+		
+		String[] resultArray = new String[result.size()];
+		result.toArray(resultArray);
+		return resultArray;
+	}
+	
+	/**
+	 * Fetch the next row from the file.
+	 * @param stopOffset File offset at which to start looking for a sync marker
+	 * @return The next row, or null if we have reached EOF or have passed stopOffset and then
+	 *         the sync marker.
+	 */
+	public String fetchNextRow(long stopOffset) throws IOException {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+
+		long startingPosition = getPosition();
+		
+		String newRow = fetchNextRow();
+		
+    if (newRow==null && lastError!=null)
+	    return null;
+
+		if (newRow == null)
+			isEOF = true;
+		
+		if (newRow == "")
+			newRow = null;
+		
+		// If we have already read past the stopOffset on a previous row, 
+		// and have seen the sync marker, then this row belongs to the next block.
+		if ((startingPosition > stopOffset) && reader.syncSeen())
+			newRow = null;
+		
+		return newRow;
+	}
+	
+	/**
+	 * Close the reader.
+	 */
+	public String close() {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+
+      IOUtils.closeStream(reader);            
+    
+    return null;
+	}
+
+	private boolean ReadnPrint(int start, int end) 
+                       throws IOException {
+		System.out.println("Beginning position: " + getPosition());
+		String[] batch;
+    batch = fetchArrayOfRows(start, end);
+    if (batch==null)
+      return false;
+      
+		boolean theresMore = (batch.length > 0);
+		for (String newRow : batch)
+			System.out.println(newRow);
+		System.out.println("Ending position: " + getPosition());
+		System.out.println("===> Buffer Split <===");
+		return theresMore;
+	}
+
+	private boolean ReadnPrint2(int start, int end) throws IOException {
+			System.out.println("Read from: " + start + " to: " + end + ".");
+			seeknSync(start);
+			System.out.println("Beginning position: " + getPosition());
+			String newRow = null;
+			do {
+				newRow = fetchNextRow(end);
+				
+				if (newRow != null)
+					System.out.println(newRow);
+			} while (newRow != null); 
+			
+		System.out.println("Ending position: " + getPosition());
+		System.out.println("===> Buffer Split <===");
+		return !isEOF();
+	}
+
+	/**
+	 * @param args
+	 * @throws IOException 
+	 */
+	public static void main(String[] args) throws IOException {
+		
+		SequenceFileReader sfReader = new SequenceFileReader();
+		byte[] fieldDelim = new byte[2];
+		fieldDelim[0] = 1;
+		fieldDelim[1] = 0;
+		//sfReader.initSerDe("19", "\01",
+                //           "p_promo_sk,p_promo_id,p_start_date_sk,p_end_date_sk,p_item_sk,p_cost,p_response_target,p_promo_name,p_channel_dmail,p_channel_email,p_channel_catalog,p_channel_tv,p_channel_radio,p_channel_press,p_channel_event,p_channel_demo,p_channel_details,p_purpose,p_discount_active",
+                //           "int,string,int,int,int,float,int,string,string,string,string,string,string,string,string,string,string,string,string",
+                //          "NULL");
+                          
+		//sfReader.open("hdfs://localhost:9000/user/hive/warehouse/promotion_seq/000000_0");
+		sfReader.seeknSync(300);
+
+		int opType = 4;
+		switch (opType)
+		{
+//		case 1:
+//			boolean theresMoreRows = true;
+//			do {
+//				String[] columns = sfReader.fetchArrayOfColumns();
+//				theresMoreRows = (columns != null);
+//				if (theresMoreRows)
+//				{
+//					for (String col : columns)
+//					{
+//						if (col == null)
+//							System.out.print("<NULL>, ");
+//						else
+//							System.out.print(col + ", ");
+//					}
+//					System.out.println();
+//				}
+//			} while (theresMoreRows); 
+//			break;
+			
+		case 2: // Return row as String
+			String row;
+			do {
+				row = sfReader.fetchNextRow();
+				if (row != null)
+					System.out.println(row);
+			} while (row != null);
+			break;
+			
+		case 3:
+		case 4:
+			int size = 3000;
+			int start = 0;
+			int end = size;
+			boolean theresMore3 = true;
+			
+			while (theresMore3) {
+				if (opType == 3)
+					theresMore3 = sfReader.ReadnPrint(start, end);
+				else
+					theresMore3 = sfReader.ReadnPrint2(start, end);
+				start += size;
+				end += size;				
+			}
+			break;
+
+		}
+		
+		sfReader.close();
+	}
+
+//	private static Object convertLazyToJava(Object o, ObjectInspector oi) {
+//	    Object obj = ObjectInspectorUtils.copyToStandardObject(o, oi, ObjectInspectorCopyOption.JAVA);
+//
+//	    // for now, expose non-primitive as a string
+//	    // TODO: expose non-primitive as a structured object while maintaining JDBC compliance
+//	    if (obj != null && oi.getCategory() != ObjectInspector.Category.PRIMITIVE) {
+//	      obj = obj.toString();
+//	    }
+//
+//	    return obj;
+//	  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java b/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
new file mode 100644
index 0000000..0950431
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
@@ -0,0 +1,467 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+/**
+ * 
+ */
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.TableSnapshotScanner;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.ByteWritable;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.CodecPool;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.io.compress.*;
+import org.apache.hadoop.io.compress.zlib.*;
+import org.apache.hadoop.fs.*;
+
+import java.io.*;
+import java.util.List;
+
+import org.apache.hadoop.util.*;
+import org.apache.hadoop.io.*;
+import org.apache.log4j.Logger;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsPermission;
+public class SequenceFileWriter {
+
+    static Logger logger = Logger.getLogger(SequenceFileWriter.class.getName());
+    Configuration conf = null;           // File system configuration
+    HBaseAdmin admin = null;
+    
+    SequenceFile.Writer writer = null;
+
+    FSDataOutputStream fsOut = null;
+    OutputStream outStream = null;
+    
+    FileSystem  fs = null;
+    /**
+     * Class Constructor
+     */
+    SequenceFileWriter() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
+    {
+      init("", "");
+      conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
+    }
+    
+	
+    public String open(String path)	{
+      try {
+        Path filename = new Path(path);
+        writer = SequenceFile.createWriter(conf, 
+          	       SequenceFile.Writer.file(filename),
+          	       SequenceFile.Writer.keyClass(ByteWritable.class),
+          	       SequenceFile.Writer.valueClass(BytesWritable.class),
+          	       SequenceFile.Writer.compression(CompressionType.NONE));
+        return null;
+      } catch (Exception e) {
+        //e.printStackTrace();
+        return e.getMessage();
+      }	
+    }
+	
+    public String open(String path, int compressionType)	{
+      try {
+        Path filename = new Path(path);
+        
+        CompressionType compType=null;
+        switch (compressionType) {
+          case 0:
+            compType = CompressionType.NONE;
+            break;
+            
+          case 1:
+            compType = CompressionType.RECORD;
+            break;
+            
+          case 2:
+            compType = CompressionType.BLOCK;
+            break;
+          
+          default:
+            return "Wrong argument for compression type.";
+        }
+        
+        writer = SequenceFile.createWriter(conf, 
+          	                               SequenceFile.Writer.file(filename),
+          	                               SequenceFile.Writer.keyClass(BytesWritable.class),
+          	                               SequenceFile.Writer.valueClass(Text.class),
+          	                               SequenceFile.Writer.compression(compType));
+        return null;
+      } catch (Exception e) {
+        //e.printStackTrace();
+        return e.getMessage();
+      }	
+    }
+	
+    public String write(String data) {
+		  if (writer == null)
+			  return "open() was not called first.";
+			
+      try {
+	      writer.append(new BytesWritable(), new Text(data.getBytes()));
+        return null;
+    	} catch (IOException e) {
+    	  //e.printStackTrace();
+        return e.getMessage();
+    	}
+    }
+	
+    public String close() {
+		  if (writer == null)
+			  return "open() was not called first.";
+			
+      try {
+        writer.close();
+        return null;
+      } catch (Exception e) {
+        //e.printStackTrace();
+        return e.getMessage();
+      }
+    }
+    
+    
+    
+    boolean hdfsCreate(String fname , boolean compress) throws IOException
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - started" );
+      Path filePath = null;
+      if (!compress || (compress && fname.endsWith(".gz")))
+        filePath = new Path(fname);
+      else
+        filePath = new Path(fname + ".gz");
+        
+      fs = FileSystem.get(filePath.toUri(),conf);
+      fsOut = fs.create(filePath, true);
+      
+      outStream = fsOut;
+      
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - file created" );
+      if (compress)
+      {
+        GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, conf);
+        Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
+        try 
+        {
+          outStream = gzipCodec.createOutputStream(fsOut, gzipCompressor);
+        }
+        catch (IOException e)
+        {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() --exception :" + e);
+          throw e;
+        }
+      }
+      
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - compressed output stream created" );
+      return true;
+    }
+    
+    boolean hdfsWrite(byte[] buff, long len) throws Exception,OutOfMemoryError
+    {
+
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - started" );
+      try
+      {
+        outStream.write(buff);
+        outStream.flush();
+      }
+      catch (Exception e)
+      {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() -- exception: " + e);
+        throw e;
+      }
+      catch (OutOfMemoryError e1)
+      {
+        logger.debug("SequenceFileWriter.hdfsWrite() -- OutOfMemory Error: " + e1);
+        throw e1;
+      }
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - bytes written and flushed:" + len  );
+      
+      return true;
+    }
+    
+    boolean hdfsClose() throws IOException
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - started" );
+      try
+      {
+        outStream.close();
+        fsOut.close();
+      }
+      catch (IOException e)
+      {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - exception:" + e);
+        throw e;
+      }
+      return true;
+    }
+
+    
+    public boolean hdfsMergeFiles(String srcPathStr, String dstPathStr) throws Exception
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - start");
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - source Path: " + srcPathStr + 
+                                               ", destination File:" + dstPathStr );
+      try 
+      {
+        Path srcPath = new Path(srcPathStr );
+        srcPath = srcPath.makeQualified(srcPath.toUri(), null);
+        FileSystem srcFs = FileSystem.get(srcPath.toUri(),conf);
+  
+        Path dstPath = new Path(dstPathStr);
+        dstPath = dstPath.makeQualified(dstPath.toUri(), null);
+        FileSystem dstFs = FileSystem.get(dstPath.toUri(),conf);
+        
+        if (dstFs.exists(dstPath))
+        {
+          if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - destination files exists" );
+          // for this prototype we just delete the file-- will change in next code drops
+          dstFs.delete(dstPath, false);
+           // The caller should already have checked existence of file-- throw exception 
+           //throw new FileAlreadyExistsException(dstPath.toString());
+        }
+        
+        Path tmpSrcPath = new Path(srcPath, "tmp");
+
+        FileSystem.mkdirs(srcFs, tmpSrcPath,srcFs.getFileStatus(srcPath).getPermission());
+        logger.debug("SequenceFileWriter.hdfsMergeFiles() - tmp folder created." );
+        Path[] files = FileUtil.stat2Paths(srcFs.listStatus(srcPath));
+        for (Path f : files)
+        {
+          srcFs.rename(f, tmpSrcPath);
+        }
+        // copyMerge and use false for the delete option since it removes the whole directory
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - copyMerge" );
+        FileUtil.copyMerge(srcFs, tmpSrcPath, dstFs, dstPath, false, conf, null);
+        
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - delete intermediate files" );
+        srcFs.delete(tmpSrcPath, true);
+      }
+      catch (IOException e)
+      {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() --exception:" + e);
+        throw e;
+      }
+      
+      
+      return true;
+    }
+    public boolean hdfsCleanUnloadPath(String uldPathStr
+                         /*, boolean checkExistence, String mergeFileStr*/) throws Exception
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - start");
+      logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - unload Path: " + uldPathStr );
+      
+      try 
+      {
+      Path uldPath = new Path(uldPathStr );
+      uldPath = uldPath.makeQualified(uldPath.toUri(), null);
+      FileSystem srcFs = FileSystem.get(uldPath.toUri(),conf);
+      if (!srcFs.exists(uldPath))
+      {
+        //unload location does not exist. hdfscreate will create it later
+        //nothing to do 
+        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -- unload location does not exist." );
+        return true;
+      }
+       
+      Path[] files = FileUtil.stat2Paths(srcFs.listStatus(uldPath));
+      logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - delete files" );
+      for (Path f : files){
+        srcFs.delete(f, false);
+      }
+      }
+      catch (IOException e)
+      {
+        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -exception:" + e);
+        throw e;
+      }
+      
+      return true;
+    }
+
+  public boolean hdfsExists(String filePathStr) throws Exception 
+  {
+    logger.debug("SequenceFileWriter.hdfsExists() - start");
+    logger.debug("SequenceFileWriter.hdfsExists() - Path: " + filePathStr);
+
+    try 
+    {
+        //check existence of the merge Path
+       Path filePath = new Path(filePathStr );
+       filePath = filePath.makeQualified(filePath.toUri(), null);
+       FileSystem mergeFs = FileSystem.get(filePath.toUri(),conf);
+       if (mergeFs.exists( filePath))
+       {
+       logger.debug("SequenceFileWriter.hdfsExists() - Path: "
+       + filePath + " exists" );
+         return true;
+       }
+
+    } catch (IOException e) {
+      logger.debug("SequenceFileWriter.hdfsExists() -exception:" + e);
+      throw e;
+    }
+    return false;
+  }
+
+  public boolean hdfsDeletePath(String pathStr) throws Exception
+  {
+    if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() - start - Path: " + pathStr);
+    try 
+    {
+      Path delPath = new Path(pathStr );
+      delPath = delPath.makeQualified(delPath.toUri(), null);
+      FileSystem fs = FileSystem.get(delPath.toUri(),conf);
+      fs.delete(delPath, true);
+    }
+    catch (IOException e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() --exception:" + e);
+      throw e;
+    }
+    
+    return true;
+  }
+
+  private boolean init(String zkServers, String zkPort) 
+      throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
+  {
+    logger.debug("SequenceFileWriter.init(" + zkServers + ", " + zkPort + ") called.");
+    if (conf != null)		
+       return true;		
+    conf = HBaseConfiguration.create();		
+    if (zkServers.length() > 0)		
+      conf.set("hbase.zookeeper.quorum", zkServers);		
+    if (zkPort.length() > 0)		
+      conf.set("hbase.zookeeper.property.clientPort", zkPort);		
+    HBaseAdmin.checkHBaseAvailable(conf);
+    return true;
+  }
+  
+  public boolean createSnapshot( String tableName, String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, 
+             InterruptedException, ZooKeeperConnectionException, ServiceException, Exception
+  {
+    try 
+    {
+      if (admin == null)
+        admin = new HBaseAdmin(conf);
+      admin.snapshot(snapshotName, tableName);
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Snapshot created: " + snapshotName);
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Exception: " + e);
+      throw e;
+    }
+    return true;
+  }
+  public boolean verifySnapshot( String tableName, String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, 
+             InterruptedException, ZooKeeperConnectionException, ServiceException, Exception
+  {
+    try 
+    {
+      if (admin == null)
+        admin = new HBaseAdmin(conf);
+      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
+
+      for (SnapshotDescription snpd : lstSnaps) 
+      {
+        if (snpd.getName().compareTo(snapshotName) == 0 && 
+            snpd.getTable().compareTo(tableName) == 0)
+        {
+          if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Snapshot verified: " + snapshotName);
+          return true;
+        }
+      }
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Exception: " + e);
+      throw e;
+    }
+    return false;
+  }
+ 
+  public boolean deleteSnapshot( String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, 
+             InterruptedException, ZooKeeperConnectionException, ServiceException, Exception
+  {
+    try 
+    {
+      if (admin == null)
+        admin = new HBaseAdmin(conf);
+      admin.deleteSnapshot(snapshotName);
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Snapshot deleted: " + snapshotName);
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Exception: " + e);
+      throw e;
+    }
+
+    return true;
+  }
+
+  public boolean release()  throws IOException
+  {
+    if (admin != null)
+    {
+      admin.close();
+      admin = null;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java b/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java
new file mode 100644
index 0000000..6a2672c
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java
@@ -0,0 +1,47 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.ArrayList;
+
+public class StringArrayList extends ArrayList<String> {
+
+	private static final long serialVersionUID = -3557219338406352735L;
+
+	void addElement(String st) {
+	        add(st);
+	}
+
+	String getElement(int i) {
+	    if (size() == 0)
+		return null;
+	    else if (i < size())
+		return get(i);
+	    else
+		return null;
+	}
+
+        int getSize() {
+           return size();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java b/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java
new file mode 100644
index 0000000..21a058b
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java
@@ -0,0 +1,426 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+//
+// This is the Java stored procedure for adding and deleting from the USTAT_AUTO_TABLES.
+// Compile this as: javac ChgAutoList.java;  
+//  (Note that the class must be the same name as the file).
+//
+package org.trafodion.sql.ustat;
+
+import java.sql.*;
+import java.io.*;
+
+public class ChgAutoList {
+ 
+  public static void chg(String  operation,     // Input
+                         String  schema,        // Input
+                         String  table,         // Input
+                         String[] result)       // Output
+      throws SQLException
+  {
+    String tableCat  = "NEO";
+    String autoCat   = "MANAGEABILITY";
+    String autoSch   = "HP_USTAT";
+    String autoTable = autoCat + "." + autoSch + ".USTAT_AUTO_TABLES";
+
+    operation = operation.toUpperCase().trim();
+    schema    = schema.trim();
+    table     = table.trim();
+    if (schema.length() > 0)
+      if (schema.charAt(0) != '"') schema = schema.toUpperCase();
+      else                         schema = internalFormat(schema);
+    if (table.length() > 0)    
+      if (table.charAt(0)  != '"') table  = table.toUpperCase();
+      else                         table  = internalFormat(table);
+
+    String intSchInStrLit = schema;
+    String intTblInStrLit = table;
+    intSchInStrLit = "_UCS2'" + intSchInStrLit.replaceAll("'", "''") + "'";
+    intTblInStrLit = "_UCS2'" + intTblInStrLit.replaceAll("'", "''") + "'";
+
+    String extSchName = schema;
+    String extTblName = table;
+    extSchName = "\"" + extSchName.replaceAll("\"", "\"\"") + "\"";
+    extTblName = "\"" + extTblName.replaceAll("\"", "\"\"") + "\"";
+    String extSchDotTbl = extSchName+"."+extTblName;
+
+    String addStr  = "INSERT";
+    String inclStr = "INCLUDE"; // This is a synonym for INSERT.
+    String exclStr = "EXCLUDE";
+    String delStr  = "DELETE";
+    Connection conn   = DriverManager.getConnection("jdbc:default:connection");
+
+    // Check for valid schema and table names.
+    if      (schema.length() > 128) result[0]="Schema name too long. No changes made.";
+    else if (table.length()  > 128) result[0]="Table name too long. No changes made.";
+    else if (( schema.equals("*") && !table.equals("*")) ||
+             (!schema.equals("*") &&  table.equals("*")))
+      result[0]="You must specify '*' for both schema and table. No changes made.";
+    else if (schema.equals("") || table.equals(""))
+      result[0]="\"" + schema + "\".\"" + table + 
+                "\" is an invalid name. No changes made.";
+    else try {
+      if(operation.equals(addStr) || operation.equals(inclStr) || operation.equals(exclStr))
+      {          
+        // Perform INSERT, INCLUDE, and EXCLUDE command.
+        if (!operation.equals(exclStr) && schema.equals("*") && table.equals("*")) 
+        {
+          // Perform INSERT or INCLUDE of all tables ('*'.'*' for schema and table).          
+          try 
+          {
+
+            String os = System.getProperty("os.name").toLowerCase();
+            String sys = "";
+  
+            if ( os.indexOf("linux") >=0 ) {
+              sys = "NSK"; 
+            } 
+   
+            else { // assume NSK
+              // Obtain system name, which is needed for query to get all tables.
+              String shellCmd ="/bin/gtacl -c SYSINFO";
+              Process p = Runtime.getRuntime().exec(shellCmd);
+              BufferedReader stdInput = new BufferedReader(new 
+                                        InputStreamReader(p.getInputStream()));
+              String s;
+              int pos;
+              while ((s = stdInput.readLine()) != null)
+              if ((pos = s.indexOf("System name")) >= 0)
+              {
+                pos = s.indexOf("\\"); // Find beginning of system name.
+                sys = s.substring(pos+1);
+              }
+            }
+
+            PreparedStatement findSchemaVersion, insStmt, delStmt, cntStmt;
+            // Obtain a list of all schema versions >= 2300 present on system.
+            String verCmd="SELECT DISTINCT S.SCHEMA_VERSION " +
+                       " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
+                       "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S " +
+                       " WHERE C.CAT_UID=S.CAT_UID AND " +
+                       "      C.CAT_NAME=_UCS2'NEO' AND " +
+                       "      S.SCHEMA_VERSION >= 2300";
+            findSchemaVersion = conn.prepareStatement(verCmd);
+            ResultSet rs = findSchemaVersion.executeQuery();
+
+            String ver, cmd;
+            int autoListCnt=0;
+            // Loop through all schema versions >= 2300:
+            while (rs.next()) // Advance to next row in result set
+            {
+              ver=""+rs.getInt(1); // Get current row (version) from result set.
+
+              String cqdCmd="CONTROL QUERY DEFAULT BLOCK_TO_PREVENT_HALLOWEEN 'ON'";
+              PreparedStatement cqdStmt = conn.prepareStatement(cqdCmd);
+              cqdStmt.executeUpdate();
+
+              // Insert all tables and MVs in NEO catalog that don't already exist in list.
+              cmd="INSERT INTO " + autoTable +
+                " SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME, " +
+                "       TIMESTAMP '0001-01-01 00:00:00', " +
+                "       TIMESTAMP '0001-01-01 00:00:00', " +
+                "       0, _UCS2'', _ISO88591'SYSTEM' " +
+                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
+                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
+                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
+                " WHERE C.CAT_UID=S.CAT_UID AND " +
+                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
+                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
+                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
+                "      C.CAT_NAME=_UCS2'NEO' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD' AND " +
+                "      (C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME) NOT IN " +
+                "        (SELECT CAT_NAME, SCH_NAME, TBL_NAME FROM " + autoTable + ")";
+              insStmt = conn.prepareStatement(cmd);
+              insStmt.executeUpdate();
+
+              // Delete all tables and MVs in list that no longer exist in NEO catalog.
+              cmd="DELETE FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD' AND " +
+                " (CAT_NAME, SCH_NAME, TBL_NAME) NOT IN " +
+                " (SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME " +
+                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
+                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
+                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
+                " WHERE C.CAT_UID=S.CAT_UID AND " +
+                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
+                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
+                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
+                "      C.CAT_NAME=_UCS2'NEO' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD')";
+              delStmt = conn.prepareStatement(cmd);
+              delStmt.executeUpdate();                                                                     
+            }
+            // Get current count of tables that will be automated.
+            cmd="SELECT COUNT(*) FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD'";
+            cntStmt = conn.prepareStatement(cmd);
+            rs = cntStmt.executeQuery();
+            rs.next(); 
+            autoListCnt = rs.getInt(1);
+            
+            result[0]="INSERTed " + autoListCnt + " table names (all) into list.";
+            rs.close();
+          }
+          catch(IOException err)
+          {
+            // Shell failure message.
+            result[0] = "Unable to " + operation + ".  Error: " + err.getMessage().trim();
+            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
+              result[0]=result[0].substring(0,result[0].length()-21);
+          }   
+        }
+        else if (operation.equals(exclStr) && 
+                 schema.equals("*") && table.equals("*")) 
+          result[0] = "EXCLUDE failed. Specifying '*', '*' not allowed.";
+        else
+        {
+          // User has requested to INSERT, INCLUDE, or EXCLUDE a specific table.
+          String addedBy="USER";
+          String action=operation+"d";
+          if (operation.equals(addStr))  action=operation+"ed";
+          if (operation.equals(exclStr))
+          {
+            addedBy="EXCLUD";
+            // For EXCLUDE, always delete the blank entry created when all entries are deleted.
+            // (See DELETE below.)  In addition, if EXCLUDing, and an entry already exists for
+            // this schema and table with ADDED_BY='SYSTEM', remove so it can be EXCLUDEd.
+            PreparedStatement delStmt1 =
+              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2''");
+            // Do not check for errors.
+            delStmt1.executeUpdate();
+            PreparedStatement delStmt2 =
+              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2'NEO' " +
+                                    "AND SCH_NAME=" + intSchInStrLit + " AND TBL_NAME=" + intTblInStrLit +
+                                    " AND ADDED_BY=_ISO88591'SYSTEM'");
+            // Do not check for errors.
+            delStmt2.executeUpdate();
+          }
+          
+          PreparedStatement insStmt =
+            conn.prepareStatement("INSERT INTO " + autoTable + " VALUES (_UCS2'NEO'," +
+                                  " ?, ?, TIMESTAMP '0001-01-01 00:00:00'," +
+                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'" +
+                                  addedBy + "')");
+          insStmt.setString(1, schema);  // Set first  argument in statement (1st '?').
+          insStmt.setString(2, table);   // Set second argument in statement (2nd '?').
+          if (insStmt.executeUpdate() == 1) 
+            result[0]="Table name "+extSchDotTbl+" " + action +".";
+
+        }
+      }
+      else if(operation.equals(delStr)) 
+      {
+        // Perform DELETE command.
+        if (schema.equals("*") && table.equals("*")) 
+        {
+          // If the user has specified '*'.'*' for schema and table, remove all 
+          // entries in list, then add an empty entry.
+          PreparedStatement delStmt = conn.prepareStatement("DELETE FROM " + autoTable);
+          delStmt.executeUpdate();
+          result[0]="All entries DELETEd.  Automation disabled.";
+          
+          // Add the empty entry, which is needed so that USAS.sh does not later insert all
+          // existing tables.  It would do so if the USTAT_AUTO_TABLES table were empty.
+          PreparedStatement insStmt =
+            conn.prepareStatement("INSERT INTO " + autoTable + 
+								  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
+                                  " TIMESTAMP '0001-01-01 00:00:00'," +
+                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
+          insStmt.executeUpdate();
+
+          try {
+            // Remove USTAT_AUTOMATION_INTERVAL entry from SYSTEM_DEFAULTS tables.
+
+            String os = System.getProperty("os.name").toLowerCase();
+  
+	    if ( os.indexOf("linux") >=0 ) {
+                PreparedStatement delStmt2 =
+                   conn.prepareStatement(
+        "DELETE FROM HP_SYSTEM_CATALOG.SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
+        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
+               // Do not check for errors.
+               delStmt2.executeUpdate();
+  
+               // Now remove AUTO_CQDS_SET file from the cluster.
+               String shellCmd;
+
+               String sqroot = System.getenv("MY_SQROOT");
+
+               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autodir/USTAT_CQDS_SET";
+               Process p = Runtime.getRuntime().exec(shellCmd);
+
+               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autoprev/USTAT_CQDS_SET";
+               p = Runtime.getRuntime().exec(shellCmd);
+
+            } else {
+  
+              // assume NSK
+              // Obtain system name.
+              String sys="";
+              String shellCmd = "/bin/gtacl -c SYSINFO";
+              Process p = Runtime.getRuntime().exec(shellCmd);
+              BufferedReader stdInput = new BufferedReader(new 
+                InputStreamReader(p.getInputStream()));
+              String s;
+              int pos;
+              while ((s = stdInput.readLine()) != null)
+                if ((pos = s.indexOf("System name")) >= 0)
+                {
+                  pos = s.indexOf("\\"); // Find beginning of system name.
+                  sys = s.substring(pos+1);
+                }
+  
+              // Obtain all segment names.  The grep here is really to avoid getting names
+              // of systems that are on expand which are not segments.
+              String sysprefix=sys.substring(0,3).toLowerCase();
+              shellCmd = "ls /E";
+              p = Runtime.getRuntime().exec(shellCmd);
+              stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
+              // For each segment, remove USTAT_AUTOMATION_INTERVAL from system defaults table.
+              // (make sure the segment name returned starts with 'sysprefix').
+              while ((s = stdInput.readLine()) != null && s.indexOf(sysprefix) == 0)
+              {
+                PreparedStatement delStmt2 =
+                  conn.prepareStatement("DELETE FROM HP_SYSTEM_CATALOG" + 
+                                        ".SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
+                                        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
+                // Do not check for errors.
+                delStmt2.executeUpdate();
+              }
+  
+              // Now remove AUTO_CQDS_SET file from primary segment.
+              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autodir/USTAT_CQDS_SET";
+              p = Runtime.getRuntime().exec(shellCmd);
+              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autoprev/USTAT_CQDS_SET";
+              p = Runtime.getRuntime().exec(shellCmd);
+            }
+
+          }
+          catch(IOException err)
+          {
+            // Shell failure message.
+            result[0] = "Unable to remove USTAT_AUTOMATION_INTERVAL from SYSTEM_DEFAULTS " + 
+                        "tables.  You must do this manually.";
+            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
+              result[0]=result[0].substring(0,result[0].length()-21);
+          }   
+        }
+        else 
+        {          
+          // User has requested to delete a specific table.
+          // First see if the table is 'EXCLUD' and can be deleted.  Note that deletion
+          // of the last 'USER' added table results in a blank 'USER' entry being added.
+          // This is not done for deletion of 'EXCLUD'ed tables.
+          PreparedStatement delete1 =
+            conn.prepareStatement("DELETE FROM " + autoTable + 
+            " WHERE SCH_NAME = ? AND TBL_NAME = ? AND ADDED_BY=_ISO88591'EXCLUD'");
+            delete1.setString(1, schema);  // Set first  argument in statement (1st '?').
+            delete1.setString(2, table);   // Set second argument in statement (2nd '?').
+          if (delete1.executeUpdate() == 0) 
+          {
+            // Failed to delete (0 rows deleted).  Either the table did not have 
+            // ADDED_BY='EXCLUD' or entry does not exist.  Try to delete for any ADDED_BY.
+            PreparedStatement delete2 =
+              conn.prepareStatement("DELETE FROM " + autoTable +
+              " WHERE SCH_NAME = ? AND TBL_NAME = ?");
+              delete2.setString(1, schema);  // Set first  argument in statement (1st '?').
+              delete2.setString(2, table);   // Set second argument in statement (2nd '?').
+            if (delete2.executeUpdate() == 0) 
+              result[0]="Table name  "+extSchDotTbl+" not found, not DELETEd.";
+            else
+            { 
+              // A 'SYSTEM' or 'USER' table DELETEd.
+              result[0]="Table name "+extSchDotTbl+" DELETEd.";
+
+              // Add the empty entry, if there are no rows with the ADDED_BY field set to
+              // 'USER'.  This keeps USAS.sh from inserting all existing tables later
+              // on.  It would do so if all 'USER' entries from USTAT_AUTO_TABLES table had
+              // been deleted.
+              PreparedStatement FindUserEnteredTables =
+                conn.prepareStatement("SELECT COUNT(*) FROM " + autoTable +
+                " WHERE ADDED_BY = _ISO88591'USER'" + 
+                " FOR READ UNCOMMITTED ACCESS");
+              ResultSet rs = FindUserEnteredTables.executeQuery();
+              rs.next();
+              if (rs.getInt(1) == 0)
+              {
+                PreparedStatement insStmt =
+                  conn.prepareStatement("INSERT INTO " + autoTable + 
+				  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
+                  " TIMESTAMP '0001-01-01 00:00:00'," +
+                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
+                insStmt.executeUpdate();
+              }
+              rs.close();
+            }
+          }
+          // 'EXCLUD' table was successfully DELETEd, set result string.
+          else result[0]="Table name "+extSchDotTbl+"\" DELETEd.";
+        }
+      }
+      else 
+      {
+        result[0] = operation + " is not a valid operation.";
+      }
+    } 
+    catch(SQLException err)
+    {
+      result[0] = err.getMessage().trim(); // Issue SQL error.
+      if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
+        result[0]=result[0].substring(0,result[0].length()-21);
+    } 
+    finally 
+    {
+      conn.close();
+    }
+    if (result[0].length() > 80) result[0]=result[0].substring(0,79);
+  }
+
+  public static String internalFormat(String name)
+  {
+    // Remove enclosing quotes
+    name=name.substring(1, name.length()-1);
+
+    // Change all occurrences of "" to ".
+    int index=-1;
+    while((index=name.indexOf("\"\"", index+1)) != -1)
+      name=name.substring(0,index+1)+name.substring(index+2);  
+
+    return name;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java b/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java
new file mode 100644
index 0000000..12f1f4c
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java
@@ -0,0 +1,442 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql.ustat;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.util.Properties;
+import java.util.Enumeration;
+
+
+public class UstatUtil extends Thread
+{
+
+   static BufferedWriter bw=null;
+   static boolean doneflag=false;
+   static StringBuffer outputStr=null;
+   static StringBuffer errStr=null;
+   static boolean nextCommand=false;
+   static boolean errorFlag=false;
+   static boolean statusFlag=false;
+   static Runtime rt=Runtime.getRuntime();
+   static boolean srunStatus=false;
+   final static String scriptIndexFile=".scriptIndex";  // script Index property file which contains the list of scripts that can be executed
+   static String lineSeperator=System.getProperty("line.separator");
+
+
+   public static void runStatsProfile(String arguments,String[] output) throws IOException
+   {
+      Process p;
+      String command=" ";
+
+      String os = System.getProperty("os.name").toLowerCase();
+
+      String cmd_path;
+
+      if ( os.indexOf("linux") >=0 ) {
+         cmd_path = "sh " + System.getenv("MY_SQROOT") + "/export/lib";
+      } else { // assume NSK
+         cmd_path = "/usr/tandem";
+      }
+
+      String cmd= cmd_path + "/mx_ustat/stats_profile ";
+      cmd=cmd+arguments;
+
+      p = rt.exec(cmd);
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+
+   public static void USASstop() throws IOException
+   {
+      Process p;
+      String command=" ";
+
+      String os = System.getProperty("os.name").toLowerCase();
+      String cmd_path;
+
+      if ( os.indexOf("linux") >=0 ) {
+         cmd_path = "sh " + System.getenv("MY_SQROOT") + "/export/lib";
+      } else { // assume NSK
+         cmd_path = "/usr/tandem";
+      }
+
+      String cmd= cmd_path + "/mx_ustat/StopAutoStats.sh";
+
+      String[] output=cmd.split("\\s+"); // Unused.
+
+      p = rt.exec(cmd);
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+
+/*
+   public static void  handleSrun(String command, String[] output) throws IOException
+   {
+
+      Properties props=null;
+      props=new Properties();
+      String[] envList={};
+
+      try
+      {
+         props.load(new FileInputStream("/usr/tandem/nvscript/admin/.scriptIndex"));
+      } catch (FileNotFoundException fnfe)
+      {
+         output[0]="Could not find the index file.";
+         return;
+      }
+
+      String[] commandArr=command.split("\\s+");
+      if (props.getProperty(commandArr[0].trim()) == null)
+      {
+         if (commandArr[0] != null && !commandArr[0].trim().equals(""))
+         {
+            output[0]= "Invalid script.";
+         }
+         if (props.size() >0)
+         {
+            output[0]="The valid scripts are:" + lineSeperator + lineSeperator;
+            Enumeration scriptNames=props.propertyNames();
+            while (scriptNames.hasMoreElements())
+            {
+               String scriptName=(String)scriptNames.nextElement();
+               output[0]+=format(scriptName,(String)props.get(scriptName))+ lineSeperator;
+            }
+         }
+         outputStr=null;
+         errStr=null;
+         return;
+      }
+
+      srunStatus = true;
+      Process p=rt.exec("/usr/bin/sh eval "+command,envList,new File("/usr/tandem/nvscript/script"));
+      try
+      {
+         execute(command, p, output, srunStatus);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+         outputStr.append("Could not create the sub process"+ioe);
+      }
+   }
+*/
+   public static void execute(String command,Process p, String[] output, boolean cmdStatus) throws IOException
+   {
+
+      outputStr=new StringBuffer();
+      errStr=new StringBuffer();
+      output[0]="";
+      statusFlag = false;
+
+      InputStream is = p.getInputStream();
+      OutputStream os = p.getOutputStream();
+      InputStream es = p.getErrorStream();
+
+      InputStreamReader isr = new InputStreamReader(is);
+      InputStreamReader iser = new InputStreamReader(es);
+      OutputStreamWriter osw = new OutputStreamWriter(os);
+
+      final BufferedReader br=new BufferedReader(isr);
+      final BufferedReader ber=new BufferedReader(iser);
+
+      bw = new BufferedWriter(osw);
+
+      // output thread
+      class OutputThread extends Thread
+      {
+
+         StringBuffer outputBuf=null;
+         OutputThread(StringBuffer outputBuf)
+         {
+            this.outputBuf=outputBuf;
+         }
+
+         public void run()
+         {
+            int i=0;
+            try
+            {
+               while ((i=br.read()) != -1)
+               {
+                  statusFlag = !statusFlag?true:statusFlag;
+                  if (errorFlag)
+                  {
+                     Thread.yield();
+                     errorFlag=false;
+                     try
+                     {
+                        sleep(100);
+                     } catch (InterruptedException ie)
+                     {
+                     }
+                  }
+                  if (nextCommand)
+                  {
+                     br.readLine();
+                     nextCommand=false;
+                  }else
+                  {
+                     outputBuf.append((char)i);
+                  }
+               }
+               doneflag=true;
+            } catch (IOException ote)
+            {
+               System.out.println("Error occurred in output Thread "+ote);
+            }
+         }
+      };
+
+      OutputThread outputt=new OutputThread(outputStr);
+      outputt.start();
+
+      // error thread
+      class ErrorThread extends Thread
+      {
+
+         StringBuffer outputBuf=null;
+         ErrorThread(StringBuffer outputBuf)
+         {
+            this.outputBuf=outputBuf;
+         }
+
+         public void run()
+         {
+            int i=0;
+            try
+            {
+               while ((i=ber.read()) != -1)
+               {
+                  errorFlag=true;
+                  outputBuf.append((char)i);
+               }
+            }catch (IOException ete)
+            {
+               System.out.println(" Error occurred in error thread "+ete);
+            }
+         }
+      };
+
+      ErrorThread errort=new ErrorThread(errStr);
+      errort.start();
+
+      // input thread
+      try
+      {
+         p.waitFor();
+         outputt.join();
+         errort.join();
+         if (!cmdStatus)
+         {
+            if (errStr.length() > 0)
+            {
+               errStr.delete(0, errStr.length());
+	       //     errStr.append("An internal server error has occurred. Please contact support.");
+            }
+         }
+         int count = errStr.indexOf("/sh:");
+         if (count > 0)
+            errStr.delete(0, count+5);
+
+         outputStr.append(errStr);
+      } catch (InterruptedException e)
+      {
+         // TODO Auto-generated catch block
+         //.printStackTrace();
+      }
+      isr=null;
+      iser=null;
+      osw=null;
+      bw=null;
+      outputt=null;
+      errort=null;
+      cmdStatus=false;
+
+      output[0]=outputStr.toString();
+      outputStr=null;
+      errStr=null;
+   }
+/*
+   private static String format(String scriptName,String description)
+   {
+
+      if (scriptName == null)
+      {
+         return null;
+      }
+      StringBuffer sb=null;
+      sb=new StringBuffer(scriptName);
+      while (sb.length() < 12)
+      {
+         sb.append(" ");
+      }
+      if (description != null)
+      {
+         sb.append("-");
+         sb.append(description);
+      }
+      return sb.toString().replaceAll(lineSeperator,lineSeperator + "            ");
+   }
+*/
+/*
+   public static void getTaclInfo(String command,String[] output) throws IOException
+   {
+
+      String[] commandArr=command.split("\\s+");
+      Process p;
+
+      if (commandArr.length == 1 && commandArr[0].equalsIgnoreCase("sutver"))
+      {
+         p = rt.exec("/usr/bin/sh eval  gtacl -c 'sutver'");
+      }
+      else if (commandArr.length == 2 && commandArr[0].equalsIgnoreCase("vproc") && commandArr[1].equalsIgnoreCase("$SYSTEM.ZMXODBC.MXOSRVR"))
+      {
+         p = rt.exec("/usr/bin/sh eval gtacl -c 'vproc $SYSTEM.ZMXODBC.MXOSRVR'");
+      }
+      else
+      {
+         output[0] = handleExceptions(commandArr[0]);
+         return;
+      }
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+
+   public static void onlineDBdump(String command,String[] output) throws IOException
+   {
+      handleDbaCmd(command, output);
+   }
+
+   public static void handleDbaCmd(String command,String[] output) throws IOException
+   {
+      String[] envList = {};
+      String[] commandArr=command.split("\\s+");
+      Process p = null;
+      String dbaScriptName = null;
+
+      int len = commandArr.length;
+      if (commandArr[0].equalsIgnoreCase("dbonlinedump"))
+         dbaScriptName = "dbonlinedump";
+      else if (commandArr[0].equalsIgnoreCase("updatestats"))
+         dbaScriptName = "updatestats";
+
+      if (dbaScriptName != null)
+      {
+         switch (len)
+         {
+            case 1:
+               p = rt.exec("/usr/bin/sh eval " + dbaScriptName, envList, new File("/usr/tandem/nvscript/dbascripts"));
+               break;
+            case 2:
+               if (commandArr[1].equalsIgnoreCase("INFO"))
+                  p = rt.exec("/usr/bin/sh eval " + dbaScriptName, envList, new File("/usr/tandem/nvscript/dbascripts"));
+
+               else
+                  output[0] = handleExceptions(commandArr[0]);
+               break;
+            case 3:
+               if (commandArr[1].equalsIgnoreCase("AT"))
+                  p = rt.exec("/usr/bin/sh eval " + dbaScriptName + " AT " + commandArr[2], envList, new File("/usr/tandem/nvscript/dbascripts"));
+
+               else
+                  output[0] = handleExceptions(commandArr[0]);
+               break;
+            default:
+               output[0] = handleExceptions(commandArr[0]);
+               return;
+         }
+      }
+      else
+      {
+         output[0] = handleExceptions(commandArr[0]);
+         return;
+      }
+
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+*/
+
+   public static String handleExceptions(String str)
+   {
+
+      str = "Invalid Command.";
+      return str;
+   }
+
+}
+
+/*
+DROP PROCEDURE NEO.HP_USTAT.STATS_PROFILE;
+CREATE PROCEDURE NEO.HP_USTAT.STATS_PROFILE
+  (
+    IN cmd VARCHAR(4000),
+    OUT response VARCHAR(240)
+  )
+  EXTERNAL NAME 'UstatUtil.runStatsProfile'
+  EXTERNAL PATH '/usr/tandem/mx_ustat'
+  LANGUAGE JAVA
+  PARAMETER STYLE JAVA
+  NO SQL
+  DYNAMIC RESULT SETS 0
+  ;
+DROP PROCEDURE NEO.HP_USTAT.STOP_AUTOMATED_STATS;
+CREATE PROCEDURE NEO.HP_USTAT.STOP_AUTOMATED_STATS
+  ()
+  EXTERNAL NAME 'UstatUtil.USASstop'
+  EXTERNAL PATH '/usr/tandem/mx_ustat'
+  LANGUAGE JAVA
+  PARAMETER STYLE JAVA
+  NO SQL
+  DYNAMIC RESULT SETS 0
+  ;
+
+*/

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/ustat/ChgAutoList.java
----------------------------------------------------------------------
diff --git a/core/sql/ustat/ChgAutoList.java b/core/sql/ustat/ChgAutoList.java
deleted file mode 100644
index 61315c9..0000000
--- a/core/sql/ustat/ChgAutoList.java
+++ /dev/null
@@ -1,426 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-//
-// This is the Java stored procedure for adding and deleting from the USTAT_AUTO_TABLES.
-// Compile this as: javac ChgAutoList.java;  
-//  (Note that the class must be the same name as the file).
-//
-package com.hp.mx_ustat;
-
-import java.sql.*;
-import java.io.*;
-
-public class ChgAutoList {
- 
-  public static void chg(String  operation,     // Input
-                         String  schema,        // Input
-                         String  table,         // Input
-                         String[] result)       // Output
-      throws SQLException
-  {
-    String tableCat  = "NEO";
-    String autoCat   = "MANAGEABILITY";
-    String autoSch   = "HP_USTAT";
-    String autoTable = autoCat + "." + autoSch + ".USTAT_AUTO_TABLES";
-
-    operation = operation.toUpperCase().trim();
-    schema    = schema.trim();
-    table     = table.trim();
-    if (schema.length() > 0)
-      if (schema.charAt(0) != '"') schema = schema.toUpperCase();
-      else                         schema = internalFormat(schema);
-    if (table.length() > 0)    
-      if (table.charAt(0)  != '"') table  = table.toUpperCase();
-      else                         table  = internalFormat(table);
-
-    String intSchInStrLit = schema;
-    String intTblInStrLit = table;
-    intSchInStrLit = "_UCS2'" + intSchInStrLit.replaceAll("'", "''") + "'";
-    intTblInStrLit = "_UCS2'" + intTblInStrLit.replaceAll("'", "''") + "'";
-
-    String extSchName = schema;
-    String extTblName = table;
-    extSchName = "\"" + extSchName.replaceAll("\"", "\"\"") + "\"";
-    extTblName = "\"" + extTblName.replaceAll("\"", "\"\"") + "\"";
-    String extSchDotTbl = extSchName+"."+extTblName;
-
-    String addStr  = "INSERT";
-    String inclStr = "INCLUDE"; // This is a synonym for INSERT.
-    String exclStr = "EXCLUDE";
-    String delStr  = "DELETE";
-    Connection conn   = DriverManager.getConnection("jdbc:default:connection");
-
-    // Check for valid schema and table names.
-    if      (schema.length() > 128) result[0]="Schema name too long. No changes made.";
-    else if (table.length()  > 128) result[0]="Table name too long. No changes made.";
-    else if (( schema.equals("*") && !table.equals("*")) ||
-             (!schema.equals("*") &&  table.equals("*")))
-      result[0]="You must specify '*' for both schema and table. No changes made.";
-    else if (schema.equals("") || table.equals(""))
-      result[0]="\"" + schema + "\".\"" + table + 
-                "\" is an invalid name. No changes made.";
-    else try {
-      if(operation.equals(addStr) || operation.equals(inclStr) || operation.equals(exclStr))
-      {          
-        // Perform INSERT, INCLUDE, and EXCLUDE command.
-        if (!operation.equals(exclStr) && schema.equals("*") && table.equals("*")) 
-        {
-          // Perform INSERT or INCLUDE of all tables ('*'.'*' for schema and table).          
-          try 
-          {
-
-            String os = System.getProperty("os.name").toLowerCase();
-            String sys = "";
-  
-            if ( os.indexOf("linux") >=0 ) {
-              sys = "NSK"; 
-            } 
-   
-            else { // assume NSK
-              // Obtain system name, which is needed for query to get all tables.
-              String shellCmd ="/bin/gtacl -c SYSINFO";
-              Process p = Runtime.getRuntime().exec(shellCmd);
-              BufferedReader stdInput = new BufferedReader(new 
-                                        InputStreamReader(p.getInputStream()));
-              String s;
-              int pos;
-              while ((s = stdInput.readLine()) != null)
-              if ((pos = s.indexOf("System name")) >= 0)
-              {
-                pos = s.indexOf("\\"); // Find beginning of system name.
-                sys = s.substring(pos+1);
-              }
-            }
-
-            PreparedStatement findSchemaVersion, insStmt, delStmt, cntStmt;
-            // Obtain a list of all schema versions >= 2300 present on system.
-            String verCmd="SELECT DISTINCT S.SCHEMA_VERSION " +
-                       " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
-                       "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S " +
-                       " WHERE C.CAT_UID=S.CAT_UID AND " +
-                       "      C.CAT_NAME=_UCS2'NEO' AND " +
-                       "      S.SCHEMA_VERSION >= 2300";
-            findSchemaVersion = conn.prepareStatement(verCmd);
-            ResultSet rs = findSchemaVersion.executeQuery();
-
-            String ver, cmd;
-            int autoListCnt=0;
-            // Loop through all schema versions >= 2300:
-            while (rs.next()) // Advance to next row in result set
-            {
-              ver=""+rs.getInt(1); // Get current row (version) from result set.
-
-              String cqdCmd="CONTROL QUERY DEFAULT BLOCK_TO_PREVENT_HALLOWEEN 'ON'";
-              PreparedStatement cqdStmt = conn.prepareStatement(cqdCmd);
-              cqdStmt.executeUpdate();
-
-              // Insert all tables and MVs in NEO catalog that don't already exist in list.
-              cmd="INSERT INTO " + autoTable +
-                " SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME, " +
-                "       TIMESTAMP '0001-01-01 00:00:00', " +
-                "       TIMESTAMP '0001-01-01 00:00:00', " +
-                "       0, _UCS2'', _ISO88591'SYSTEM' " +
-                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
-                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
-                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
-                " WHERE C.CAT_UID=S.CAT_UID AND " +
-                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
-                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
-                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
-                "      C.CAT_NAME=_UCS2'NEO' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD' AND " +
-                "      (C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME) NOT IN " +
-                "        (SELECT CAT_NAME, SCH_NAME, TBL_NAME FROM " + autoTable + ")";
-              insStmt = conn.prepareStatement(cmd);
-              insStmt.executeUpdate();
-
-              // Delete all tables and MVs in list that no longer exist in NEO catalog.
-              cmd="DELETE FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD' AND " +
-                " (CAT_NAME, SCH_NAME, TBL_NAME) NOT IN " +
-                " (SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME " +
-                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
-                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
-                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
-                " WHERE C.CAT_UID=S.CAT_UID AND " +
-                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
-                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
-                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
-                "      C.CAT_NAME=_UCS2'NEO' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD')";
-              delStmt = conn.prepareStatement(cmd);
-              delStmt.executeUpdate();                                                                     
-            }
-            // Get current count of tables that will be automated.
-            cmd="SELECT COUNT(*) FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD'";
-            cntStmt = conn.prepareStatement(cmd);
-            rs = cntStmt.executeQuery();
-            rs.next(); 
-            autoListCnt = rs.getInt(1);
-            
-            result[0]="INSERTed " + autoListCnt + " table names (all) into list.";
-            rs.close();
-          }
-          catch(IOException err)
-          {
-            // Shell failure message.
-            result[0] = "Unable to " + operation + ".  Error: " + err.getMessage().trim();
-            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
-              result[0]=result[0].substring(0,result[0].length()-21);
-          }   
-        }
-        else if (operation.equals(exclStr) && 
-                 schema.equals("*") && table.equals("*")) 
-          result[0] = "EXCLUDE failed. Specifying '*', '*' not allowed.";
-        else
-        {
-          // User has requested to INSERT, INCLUDE, or EXCLUDE a specific table.
-          String addedBy="USER";
-          String action=operation+"d";
-          if (operation.equals(addStr))  action=operation+"ed";
-          if (operation.equals(exclStr))
-          {
-            addedBy="EXCLUD";
-            // For EXCLUDE, always delete the blank entry created when all entries are deleted.
-            // (See DELETE below.)  In addition, if EXCLUDing, and an entry already exists for
-            // this schema and table with ADDED_BY='SYSTEM', remove so it can be EXCLUDEd.
-            PreparedStatement delStmt1 =
-              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2''");
-            // Do not check for errors.
-            delStmt1.executeUpdate();
-            PreparedStatement delStmt2 =
-              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2'NEO' " +
-                                    "AND SCH_NAME=" + intSchInStrLit + " AND TBL_NAME=" + intTblInStrLit +
-                                    " AND ADDED_BY=_ISO88591'SYSTEM'");
-            // Do not check for errors.
-            delStmt2.executeUpdate();
-          }
-          
-          PreparedStatement insStmt =
-            conn.prepareStatement("INSERT INTO " + autoTable + " VALUES (_UCS2'NEO'," +
-                                  " ?, ?, TIMESTAMP '0001-01-01 00:00:00'," +
-                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'" +
-                                  addedBy + "')");
-          insStmt.setString(1, schema);  // Set first  argument in statement (1st '?').
-          insStmt.setString(2, table);   // Set second argument in statement (2nd '?').
-          if (insStmt.executeUpdate() == 1) 
-            result[0]="Table name "+extSchDotTbl+" " + action +".";
-
-        }
-      }
-      else if(operation.equals(delStr)) 
-      {
-        // Perform DELETE command.
-        if (schema.equals("*") && table.equals("*")) 
-        {
-          // If the user has specified '*'.'*' for schema and table, remove all 
-          // entries in list, then add an empty entry.
-          PreparedStatement delStmt = conn.prepareStatement("DELETE FROM " + autoTable);
-          delStmt.executeUpdate();
-          result[0]="All entries DELETEd.  Automation disabled.";
-          
-          // Add the empty entry, which is needed so that USAS.sh does not later insert all
-          // existing tables.  It would do so if the USTAT_AUTO_TABLES table were empty.
-          PreparedStatement insStmt =
-            conn.prepareStatement("INSERT INTO " + autoTable + 
-								  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
-                                  " TIMESTAMP '0001-01-01 00:00:00'," +
-                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
-          insStmt.executeUpdate();
-
-          try {
-            // Remove USTAT_AUTOMATION_INTERVAL entry from SYSTEM_DEFAULTS tables.
-
-            String os = System.getProperty("os.name").toLowerCase();
-  
-	    if ( os.indexOf("linux") >=0 ) {
-                PreparedStatement delStmt2 =
-                   conn.prepareStatement(
-        "DELETE FROM HP_SYSTEM_CATALOG.SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
-        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
-               // Do not check for errors.
-               delStmt2.executeUpdate();
-  
-               // Now remove AUTO_CQDS_SET file from the cluster.
-               String shellCmd;
-
-               String sqroot = System.getenv("MY_SQROOT");
-
-               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autodir/USTAT_CQDS_SET";
-               Process p = Runtime.getRuntime().exec(shellCmd);
-
-               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autoprev/USTAT_CQDS_SET";
-               p = Runtime.getRuntime().exec(shellCmd);
-
-            } else {
-  
-              // assume NSK
-              // Obtain system name.
-              String sys="";
-              String shellCmd = "/bin/gtacl -c SYSINFO";
-              Process p = Runtime.getRuntime().exec(shellCmd);
-              BufferedReader stdInput = new BufferedReader(new 
-                InputStreamReader(p.getInputStream()));
-              String s;
-              int pos;
-              while ((s = stdInput.readLine()) != null)
-                if ((pos = s.indexOf("System name")) >= 0)
-                {
-                  pos = s.indexOf("\\"); // Find beginning of system name.
-                  sys = s.substring(pos+1);
-                }
-  
-              // Obtain all segment names.  The grep here is really to avoid getting names
-              // of systems that are on expand which are not segments.
-              String sysprefix=sys.substring(0,3).toLowerCase();
-              shellCmd = "ls /E";
-              p = Runtime.getRuntime().exec(shellCmd);
-              stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
-              // For each segment, remove USTAT_AUTOMATION_INTERVAL from system defaults table.
-              // (make sure the segment name returned starts with 'sysprefix').
-              while ((s = stdInput.readLine()) != null && s.indexOf(sysprefix) == 0)
-              {
-                PreparedStatement delStmt2 =
-                  conn.prepareStatement("DELETE FROM HP_SYSTEM_CATALOG" + 
-                                        ".SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
-                                        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
-                // Do not check for errors.
-                delStmt2.executeUpdate();
-              }
-  
-              // Now remove AUTO_CQDS_SET file from primary segment.
-              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autodir/USTAT_CQDS_SET";
-              p = Runtime.getRuntime().exec(shellCmd);
-              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autoprev/USTAT_CQDS_SET";
-              p = Runtime.getRuntime().exec(shellCmd);
-            }
-
-          }
-          catch(IOException err)
-          {
-            // Shell failure message.
-            result[0] = "Unable to remove USTAT_AUTOMATION_INTERVAL from SYSTEM_DEFAULTS " + 
-                        "tables.  You must do this manually.";
-            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
-              result[0]=result[0].substring(0,result[0].length()-21);
-          }   
-        }
-        else 
-        {          
-          // User has requested to delete a specific table.
-          // First see if the table is 'EXCLUD' and can be deleted.  Note that deletion
-          // of the last 'USER' added table results in a blank 'USER' entry being added.
-          // This is not done for deletion of 'EXCLUD'ed tables.
-          PreparedStatement delete1 =
-            conn.prepareStatement("DELETE FROM " + autoTable + 
-            " WHERE SCH_NAME = ? AND TBL_NAME = ? AND ADDED_BY=_ISO88591'EXCLUD'");
-            delete1.setString(1, schema);  // Set first  argument in statement (1st '?').
-            delete1.setString(2, table);   // Set second argument in statement (2nd '?').
-          if (delete1.executeUpdate() == 0) 
-          {
-            // Failed to delete (0 rows deleted).  Either the table did not have 
-            // ADDED_BY='EXCLUD' or entry does not exist.  Try to delete for any ADDED_BY.
-            PreparedStatement delete2 =
-              conn.prepareStatement("DELETE FROM " + autoTable +
-              " WHERE SCH_NAME = ? AND TBL_NAME = ?");
-              delete2.setString(1, schema);  // Set first  argument in statement (1st '?').
-              delete2.setString(2, table);   // Set second argument in statement (2nd '?').
-            if (delete2.executeUpdate() == 0) 
-              result[0]="Table name  "+extSchDotTbl+" not found, not DELETEd.";
-            else
-            { 
-              // A 'SYSTEM' or 'USER' table DELETEd.
-              result[0]="Table name "+extSchDotTbl+" DELETEd.";
-
-              // Add the empty entry, if there are no rows with the ADDED_BY field set to
-              // 'USER'.  This keeps USAS.sh from inserting all existing tables later
-              // on.  It would do so if all 'USER' entries from USTAT_AUTO_TABLES table had
-              // been deleted.
-              PreparedStatement FindUserEnteredTables =
-                conn.prepareStatement("SELECT COUNT(*) FROM " + autoTable +
-                " WHERE ADDED_BY = _ISO88591'USER'" + 
-                " FOR READ UNCOMMITTED ACCESS");
-              ResultSet rs = FindUserEnteredTables.executeQuery();
-              rs.next();
-              if (rs.getInt(1) == 0)
-              {
-                PreparedStatement insStmt =
-                  conn.prepareStatement("INSERT INTO " + autoTable + 
-				  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
-                  " TIMESTAMP '0001-01-01 00:00:00'," +
-                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
-                insStmt.executeUpdate();
-              }
-              rs.close();
-            }
-          }
-          // 'EXCLUD' table was successfully DELETEd, set result string.
-          else result[0]="Table name "+extSchDotTbl+"\" DELETEd.";
-        }
-      }
-      else 
-      {
-        result[0] = operation + " is not a valid operation.";
-      }
-    } 
-    catch(SQLException err)
-    {
-      result[0] = err.getMessage().trim(); // Issue SQL error.
-      if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
-        result[0]=result[0].substring(0,result[0].length()-21);
-    } 
-    finally 
-    {
-      conn.close();
-    }
-    if (result[0].length() > 80) result[0]=result[0].substring(0,79);
-  }
-
-  public static String internalFormat(String name)
-  {
-    // Remove enclosing quotes
-    name=name.substring(1, name.length()-1);
-
-    // Change all occurrences of "" to ".
-    int index=-1;
-    while((index=name.indexOf("\"\"", index+1)) != -1)
-      name=name.substring(0,index+1)+name.substring(index+2);  
-
-    return name;
-  }
-}



[7/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

Posted by db...@apache.org.
Most of the Trafodion Java source files are built through Maven, using
projects DCS, REST, HBase-trx and SQL. A few files remain in the
core/sql/executor and core/sql/ustat directories that are built
through javac commands in the Makefile and helper files. Also, these
files don't conform to common naming standards and therefore are a bit
harder to debug with tools like Eclipse.

I would like to do the following:

1. Move these remaining Java source files under core/sql/src,
   where the UDR-related files already are.
2. Include the class files in trafodion-sql-1.2.0.jar, the
   deliverable of the core/sql Maven project.
3. Change the package name to org.trafodion.sql for all these files.
4. Remove the trafodion-HBaseAccess-1.2.0.jar file (content
   will be included in trafodion-sql-1.2.0.jar). This jar file
   used to be built with a Makefile.
5. Make some source code changes to reflect the new package names
   and to avoid calling private method of class
   org.apache.hadoop.hive.ql.io.orc.OrcStruct.
6. Remove Makefile rules for compiling Java files.

Also, this change includes a change of the component name for the
event logger. We use SQL.COMP now, to make analyzing log messages
for Trafodion easier. Venkat asked for this change.


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/a44823fe
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/a44823fe
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/a44823fe

Branch: refs/heads/master
Commit: a44823fe2fd3991bc13434f73692a70b661efd43
Parents: 0a8da29
Author: Hans Zeller <ha...@esgyn.com>
Authored: Thu Oct 1 02:01:45 2015 +0000
Committer: Hans Zeller <ha...@esgyn.com>
Committed: Thu Oct 1 02:01:45 2015 +0000

----------------------------------------------------------------------
 core/sqf/sqenvcom.sh                            |    1 -
 core/sqf/src/seatrans/hbase-trx/Makefile        |    2 +-
 core/sql/executor/ByteArrayList.java            |   54 -
 core/sql/executor/HBaseClient.java              | 1596 ------------------
 core/sql/executor/HBaseClient_JNI.cpp           |   30 +-
 core/sql/executor/HBaseClient_JNI.h             |    2 +-
 core/sql/executor/HBulkLoadClient.java          |  533 ------
 core/sql/executor/HTableClient.h                |   65 -
 core/sql/executor/HTableClient.java             | 1334 ---------------
 core/sql/executor/HiveClient.java               |  301 ----
 core/sql/executor/OrcFileReader.cpp             |    4 +-
 core/sql/executor/OrcFileReader.java            |  518 ------
 core/sql/executor/ResultIterator.java           |  133 --
 core/sql/executor/ResultKeyValueList.java       |  100 --
 core/sql/executor/RowToInsert.java              |   44 -
 core/sql/executor/RowsToInsert.java             |   57 -
 core/sql/executor/SequenceFileReader.cpp        |    4 +-
 core/sql/executor/SequenceFileReader.java       |  448 -----
 core/sql/executor/SequenceFileWriter.java       |  467 -----
 core/sql/executor/StringArrayList.java          |   47 -
 .../executor/org_trafodion_sql_HTableClient.h   |   43 +
 core/sql/nskgmake/Makerules.build               |   13 -
 core/sql/nskgmake/Makerules.linux               |   13 -
 core/sql/nskgmake/Makerules.mk                  |   73 +-
 core/sql/nskgmake/executor/Makefile             |   54 -
 core/sql/nskgmake/ustat/Makefile                |   13 -
 core/sql/pom.xml                                |   49 +
 core/sql/qmscommon/QRLogger.cpp                 |   18 +-
 .../java/org/trafodion/sql/ByteArrayList.java   |   54 +
 .../java/org/trafodion/sql/HBaseClient.java     | 1596 ++++++++++++++++++
 .../java/org/trafodion/sql/HBulkLoadClient.java |  533 ++++++
 .../java/org/trafodion/sql/HTableClient.java    | 1337 +++++++++++++++
 .../main/java/org/trafodion/sql/HiveClient.java |  301 ++++
 .../java/org/trafodion/sql/OrcFileReader.java   |  500 ++++++
 .../java/org/trafodion/sql/ResultIterator.java  |  133 ++
 .../org/trafodion/sql/ResultKeyValueList.java   |  100 ++
 .../java/org/trafodion/sql/RowToInsert.java     |   44 +
 .../java/org/trafodion/sql/RowsToInsert.java    |   57 +
 .../org/trafodion/sql/SequenceFileReader.java   |  448 +++++
 .../org/trafodion/sql/SequenceFileWriter.java   |  467 +++++
 .../java/org/trafodion/sql/StringArrayList.java |   47 +
 .../org/trafodion/sql/ustat/ChgAutoList.java    |  426 +++++
 .../java/org/trafodion/sql/ustat/UstatUtil.java |  442 +++++
 core/sql/ustat/ChgAutoList.java                 |  426 -----
 core/sql/ustat/UstatUtil.java                   |  442 -----
 45 files changed, 6610 insertions(+), 6759 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sqf/sqenvcom.sh
----------------------------------------------------------------------
diff --git a/core/sqf/sqenvcom.sh b/core/sqf/sqenvcom.sh
index df7eb52..2de3983 100644
--- a/core/sqf/sqenvcom.sh
+++ b/core/sqf/sqenvcom.sh
@@ -872,7 +872,6 @@ if [[ -n "$SQ_CLASSPATH"   ]]; then SQ_CLASSPATH="$SQ_CLASSPATH:";   fi
 SQ_CLASSPATH=${SQ_CLASSPATH}${HBASE_TRXDIR}:\
 ${HBASE_TRXDIR}/${HBASE_TRX_JAR}:\
 $MY_SQROOT/export/lib/trafodion-sql-${TRAFODION_VER}.jar:\
-$MY_SQROOT/export/lib/trafodion-HBaseAccess-${TRAFODION_VER}.jar:\
 $MY_SQROOT/export/lib/jdbcT2.jar
 
 # Check whether the current shell environment changed from a previous execution of this

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sqf/src/seatrans/hbase-trx/Makefile
----------------------------------------------------------------------
diff --git a/core/sqf/src/seatrans/hbase-trx/Makefile b/core/sqf/src/seatrans/hbase-trx/Makefile
index fd99bad..7abfa16 100644
--- a/core/sqf/src/seatrans/hbase-trx/Makefile
+++ b/core/sqf/src/seatrans/hbase-trx/Makefile
@@ -36,7 +36,7 @@ all: build_all
 
 jdk_1_7_cdh:
 	$(MAKE) build_chk_cdh
-	set -o pipefail && $(MAVEN) -f pom.xml.cdh package -DskipTests | tee -a build_trx.log
+	set -o pipefail && $(MAVEN) -f pom.xml.cdh package install -DskipTests | tee -a build_trx.log
 	cp -pf target/$(BLD_HBASE_CDH_TRX_JARNAME) $(MY_SQROOT)/export/lib
 	$(RM) $(VFILE)
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/ByteArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/ByteArrayList.java b/core/sql/executor/ByteArrayList.java
deleted file mode 100644
index cb8d6ac..0000000
--- a/core/sql/executor/ByteArrayList.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.ArrayList;
-
-public class ByteArrayList extends ArrayList<byte[]> {
-
-	private static final long serialVersionUID = -3557219337406352735L;
-
-	void addElement(byte[] ba) {
-	        add(ba);
-	}
-
-	byte[] getElement(int i) {
-	    if (size() == 0)
-		return null;
-	    else if (i < size())
-		return get(i);
-	    else
-		return null;
-	}
-
-        int getSize() {
-           return size();
-	}
-
-        int getEntrySize(int i) {
-          return get(i).length;
-        }
-
-        byte[] getEntry(int i) {
-          return get(i);
-        }
-}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HBaseClient.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBaseClient.java b/core/sql/executor/HBaseClient.java
deleted file mode 100644
index 1bc02f3..0000000
--- a/core/sql/executor/HBaseClient.java
+++ /dev/null
@@ -1,1596 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import com.google.protobuf.ServiceException;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.NavigableMap;
-import java.util.Map;
-import java.util.Arrays;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.apache.log4j.PropertyConfigurator;
-import org.apache.log4j.Logger;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.transactional.RMInterface;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.PoolMap;
-import org.apache.hadoop.hbase.util.PoolMap.PoolType;
-import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
-//import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-
-import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-//import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.regionserver.BloomType; 
-//import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType ;
-import org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy;
-import org.apache.hadoop.hbase.client.Durability;
-import org.trafodion.sql.HBaseAccess.HTableClient;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.ServerName;
-
-import java.util.concurrent.ExecutionException;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.DtmConst;
-import org.apache.commons.codec.binary.Hex;
-
-import com.google.protobuf.ServiceException;
-
-public class HBaseClient {
-
-    static Logger logger = Logger.getLogger(HBaseClient.class.getName());
-    public static Configuration config = HBaseConfiguration.create();
-    String lastError;
-    RMInterface table = null;
-
-    private PoolMap<String, HTableClient> hTableClientsFree;
-    private PoolMap<String, HTableClient> hTableClientsInUse;
-    // this set of constants MUST be kept in sync with the C++ enum in
-    // ExpHbaseDefs.h
-    public static final int HBASE_NAME = 0;
-    public static final int HBASE_MAX_VERSIONS = 1;
-    public static final int HBASE_MIN_VERSIONS = 2;
-    public static final int HBASE_TTL = 3;
-    public static final int HBASE_BLOCKCACHE = 4;
-    public static final int HBASE_IN_MEMORY = 5;
-    public static final int HBASE_COMPRESSION = 6;
-    public static final int HBASE_BLOOMFILTER = 7;
-    public static final int HBASE_BLOCKSIZE = 8;
-    public static final int HBASE_DATA_BLOCK_ENCODING = 9;
-    public static final int HBASE_CACHE_BLOOMS_ON_WRITE = 10;
-    public static final int HBASE_CACHE_DATA_ON_WRITE = 11;
-    public static final int HBASE_CACHE_INDEXES_ON_WRITE = 12;
-    public static final int HBASE_COMPACT_COMPRESSION = 13;
-    public static final int HBASE_PREFIX_LENGTH_KEY = 14;
-    public static final int HBASE_EVICT_BLOCKS_ON_CLOSE = 15;
-    public static final int HBASE_KEEP_DELETED_CELLS = 16;
-    public static final int HBASE_REPLICATION_SCOPE = 17;
-    public static final int HBASE_MAX_FILESIZE = 18;
-    public static final int HBASE_COMPACT = 19;
-    public static final int HBASE_DURABILITY = 20;
-    public static final int HBASE_MEMSTORE_FLUSH_SIZE = 21;
-    public static final int HBASE_SPLIT_POLICY = 22;
-
-    
-    public HBaseClient() {
-      if (hTableClientsFree == null)
-         hTableClientsFree = new PoolMap<String, HTableClient>
-                 (PoolType.Reusable, Integer.MAX_VALUE);
-      hTableClientsInUse = new PoolMap<String, HTableClient>
-               (PoolType.Reusable, Integer.MAX_VALUE);
-    }
-
-    public String getLastError() {
-        return lastError;
-    }
-
-    void setLastError(String err) {
-        lastError = err;
-    }
-
-    static {
-    	//Some clients of this class e.g., DcsServer/JdbcT2 
-    	//want to use use their own log4j.properties file instead
-    	//of the /conf/lo4j.hdf.config so they can see their
-    	//log events in their own log files or console.
-    	//So, check for alternate log4j.properties otherwise
-    	//use the default HBaseClient config.
-    	String confFile = System.getProperty("hbaseclient.log4j.properties");
-    	if(confFile == null) {
-    		System.setProperty("trafodion.hdfs.log", System.getenv("MY_SQROOT") + "/logs/trafodion.hdfs.log");
-    		confFile = System.getenv("MY_SQROOT") + "/conf/log4j.hdfs.config";
-    	}
-    	PropertyConfigurator.configure(confFile);
-    }
-
-    public boolean init(String zkServers, String zkPort) 
-	throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
-    {
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.init(" + zkServers + ", " + zkPort
-                         + ") called.");
-        HBaseAdmin.checkHBaseAvailable(config);
-
-        try {
-            table = new RMInterface();
-        } catch (Exception e) {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.init: Error in RMInterface instace creation.");
-        }
-        
-        return true;
-    }
- 
-    private void  cleanup(PoolMap hTableClientsPool) throws IOException
-    {
-       Collection hTableClients;
-       Iterator<HTableClient> iter;
-       HTableClient htable;
-       boolean clearRegionCache = false;
-       boolean cleanJniObject = true;
-
-       hTableClients = hTableClientsPool.values();
-       iter = hTableClients.iterator();
-       while (iter.hasNext())
-       {
-         htable = iter.next();
-         htable.close(clearRegionCache, cleanJniObject);          
-       }
-       hTableClientsPool.clear();
-    }
-
-    public boolean cleanup() throws IOException {
-       cleanup(hTableClientsInUse);
-       cleanup(hTableClientsFree);
-       return true;
-    }
-
-   public void cleanupCache(Collection hTableClients) throws IOException
-    {
-       Iterator<HTableClient> iter;
-       HTableClient htable;
-       boolean clearRegionCache = true;
-       boolean cleanJniObject = false;
- 
-       iter = hTableClients.iterator();
-       while (iter.hasNext())
-       {
-          htable = iter.next();
-          htable.close(clearRegionCache, cleanJniObject);     
-       }
-    }
-
-    public boolean cleanupCache(String tblName) throws IOException
-    {
-       Collection hTableClients;
-       hTableClients = hTableClientsFree.values(tblName);
-       cleanupCache(hTableClients);  
-       hTableClientsFree.remove(tblName);
-       hTableClients = hTableClientsInUse.values(tblName);
-       cleanupCache(hTableClients);  
-       hTableClientsInUse.remove(tblName);
-       return true;
-    }
-
-    public boolean create(String tblName, Object[]  colFamNameList,
-                          boolean isMVCC) 
-        throws IOException, MasterNotRunningException {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.create(" + tblName + ") called, and MVCC is " + isMVCC + ".");
-            cleanupCache(tblName);
-            HTableDescriptor desc = new HTableDescriptor(tblName);
-            for (int i = 0; i < colFamNameList.length ; i++) {
-		String  colFam = (String)colFamNameList[i];
-                HColumnDescriptor colDesc = new HColumnDescriptor(colFam);
-                if (isMVCC)
-                  colDesc.setMaxVersions(DtmConst.MVCC_MAX_VERSION);
-                else
-                  colDesc.setMaxVersions(DtmConst.SSCC_MAX_VERSION);
-                desc.addFamily(colDesc);
-            }
-            HColumnDescriptor metaColDesc = new HColumnDescriptor(DtmConst.TRANSACTION_META_FAMILY);
-            if (isMVCC)
-              metaColDesc.setMaxVersions(DtmConst.MVCC_MAX_DATA_VERSION);
-            else
-              metaColDesc.setMaxVersions(DtmConst.SSCC_MAX_DATA_VERSION);
-            metaColDesc.setInMemory(true);
-            desc.addFamily(metaColDesc);
-            HBaseAdmin admin = new HBaseAdmin(config);
-            admin.createTable(desc);
-            admin.close();
-            return true;
-   } 
-
-   // used for returning two flags from setDescriptors method
-
-   private class ChangeFlags {
-       boolean tableDescriptorChanged;
-       boolean columnDescriptorChanged;
-
-       ChangeFlags() {
-           tableDescriptorChanged = false;
-           columnDescriptorChanged = false;
-       }
-
-       void setTableDescriptorChanged() {
-           tableDescriptorChanged = true;
-       }
-
-       void setColumnDescriptorChanged() {
-           columnDescriptorChanged = true;
-       }
-
-       boolean tableDescriptorChanged() {
-           return tableDescriptorChanged;
-       }
-
-       boolean columnDescriptorChanged() {
-           return columnDescriptorChanged;
-       }
-   }
-
-   private ChangeFlags setDescriptors(Object[] tableOptions,
-                                      HTableDescriptor desc,
-                                      HColumnDescriptor colDesc,
-                                      int defaultVersionsValue) {
-       ChangeFlags returnStatus = new ChangeFlags();
-       String trueStr = "TRUE";
-       for (int i = 0; i < tableOptions.length; i++) {
-           if (i == HBASE_NAME)	
-               continue ;
-           String tableOption = (String)tableOptions[i];
-           if ((i != HBASE_MAX_VERSIONS) && (tableOption.isEmpty()))
-               continue ;
-           switch (i) {
-           case HBASE_MAX_VERSIONS:
-               if (tableOption.isEmpty()) {
-                   if (colDesc.getMaxVersions() != defaultVersionsValue) {
-                       colDesc.setMaxVersions(defaultVersionsValue);
-                       returnStatus.setColumnDescriptorChanged();
-                   }
-               }
-               else {
-                   colDesc.setMaxVersions
-                       (Integer.parseInt(tableOption));
-                   returnStatus.setColumnDescriptorChanged();
-               }
-               break ;
-           case HBASE_MIN_VERSIONS:
-               colDesc.setMinVersions
-                   (Integer.parseInt(tableOption));
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_TTL:
-               colDesc.setTimeToLive
-                   (Integer.parseInt(tableOption));
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_BLOCKCACHE:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setBlockCacheEnabled(true);
-               else
-                   colDesc.setBlockCacheEnabled(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_IN_MEMORY:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setInMemory(true);
-               else
-                   colDesc.setInMemory(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_COMPRESSION:
-               if (tableOption.equalsIgnoreCase("GZ"))
-                   colDesc.setCompressionType(Algorithm.GZ);
-               else if (tableOption.equalsIgnoreCase("LZ4"))
-                   colDesc.setCompressionType(Algorithm.LZ4);
-               else if (tableOption.equalsIgnoreCase("LZO"))
-                   colDesc.setCompressionType(Algorithm.LZO);
-               else if (tableOption.equalsIgnoreCase("NONE"))
-                   colDesc.setCompressionType(Algorithm.NONE);
-               else if (tableOption.equalsIgnoreCase("SNAPPY"))
-                   colDesc.setCompressionType(Algorithm.SNAPPY); 
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_BLOOMFILTER:
-               if (tableOption.equalsIgnoreCase("NONE"))
-                   colDesc.setBloomFilterType(BloomType.NONE);
-               else if (tableOption.equalsIgnoreCase("ROW"))
-                   colDesc.setBloomFilterType(BloomType.ROW);
-               else if (tableOption.equalsIgnoreCase("ROWCOL"))
-                   colDesc.setBloomFilterType(BloomType.ROWCOL); 
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_BLOCKSIZE:
-               colDesc.setBlocksize
-                   (Integer.parseInt(tableOption));
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_DATA_BLOCK_ENCODING:
-               if (tableOption.equalsIgnoreCase("DIFF"))
-                   colDesc.setDataBlockEncoding(DataBlockEncoding.DIFF);
-               else if (tableOption.equalsIgnoreCase("FAST_DIFF"))
-                   colDesc.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
-               else if (tableOption.equalsIgnoreCase("NONE"))
-                   colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
-               else if (tableOption.equalsIgnoreCase("PREFIX"))
-                   colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX);
-               else if (tableOption.equalsIgnoreCase("PREFIX_TREE"))
-                   colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_CACHE_BLOOMS_ON_WRITE:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setCacheBloomsOnWrite(true);
-               else
-                   colDesc.setCacheBloomsOnWrite(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_CACHE_DATA_ON_WRITE:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setCacheDataOnWrite(true);
-               else
-                   colDesc.setCacheDataOnWrite(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_CACHE_INDEXES_ON_WRITE:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setCacheIndexesOnWrite(true);
-               else
-                   colDesc.setCacheIndexesOnWrite(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_COMPACT_COMPRESSION:
-               if (tableOption.equalsIgnoreCase("GZ"))
-                   colDesc.setCompactionCompressionType(Algorithm.GZ);
-               else if (tableOption.equalsIgnoreCase("LZ4"))
-                   colDesc.setCompactionCompressionType(Algorithm.LZ4);
-               else if (tableOption.equalsIgnoreCase("LZO"))
-                   colDesc.setCompactionCompressionType(Algorithm.LZO);
-               else if (tableOption.equalsIgnoreCase("NONE"))
-                   colDesc.setCompactionCompressionType(Algorithm.NONE);
-               else if (tableOption.equalsIgnoreCase("SNAPPY"))
-                   colDesc.setCompactionCompressionType(Algorithm.SNAPPY); 
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_PREFIX_LENGTH_KEY:
-               desc.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY,
-                             tableOption);
-               returnStatus.setTableDescriptorChanged();
-               break ;
-           case HBASE_EVICT_BLOCKS_ON_CLOSE:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setEvictBlocksOnClose(true);
-               else
-                   colDesc.setEvictBlocksOnClose(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_KEEP_DELETED_CELLS:
-               if (tableOption.equalsIgnoreCase(trueStr))
-                   colDesc.setKeepDeletedCells(true);
-               else
-                   colDesc.setKeepDeletedCells(false);
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_REPLICATION_SCOPE:
-               colDesc.setScope
-                   (Integer.parseInt(tableOption));
-               returnStatus.setColumnDescriptorChanged();
-               break ;
-           case HBASE_MAX_FILESIZE:
-               desc.setMaxFileSize
-                   (Long.parseLong(tableOption));
-               returnStatus.setTableDescriptorChanged();
-               break ;
-           case HBASE_COMPACT:
-              if (tableOption.equalsIgnoreCase(trueStr))
-                   desc.setCompactionEnabled(true);
-               else
-                   desc.setCompactionEnabled(false); 
-               returnStatus.setTableDescriptorChanged();
-               break ;
-           case HBASE_DURABILITY:
-               if (tableOption.equalsIgnoreCase("ASYNC_WAL"))
-                   desc.setDurability(Durability.ASYNC_WAL);
-               else if (tableOption.equalsIgnoreCase("FSYNC_WAL"))
-                   desc.setDurability(Durability.FSYNC_WAL);
-               else if (tableOption.equalsIgnoreCase("SKIP_WAL"))
-                   desc.setDurability(Durability.SKIP_WAL);
-               else if (tableOption.equalsIgnoreCase("SYNC_WAL"))
-                   desc.setDurability(Durability.SYNC_WAL);
-               else if (tableOption.equalsIgnoreCase("USE_DEFAULT"))
-                   desc.setDurability(Durability.USE_DEFAULT);
-               returnStatus.setTableDescriptorChanged(); 
-               break ;
-           case HBASE_MEMSTORE_FLUSH_SIZE:
-               desc.setMemStoreFlushSize
-                   (Long.parseLong(tableOption));
-               returnStatus.setTableDescriptorChanged();
-               break ;
-           case HBASE_SPLIT_POLICY:
-               // This method not yet available in earlier versions
-               // desc.setRegionSplitPolicyClassName(tableOption));
-               desc.setValue(desc.SPLIT_POLICY, tableOption);
-               returnStatus.setTableDescriptorChanged();
-               break ;
-           default:
-               break;
-           }
-       }
-
-       return returnStatus;
-   }
-   
-
-   public boolean createk(String tblName, Object[] tableOptions,
-       Object[]  beginEndKeys, long transID, int numSplits, int keyLength,
-       boolean isMVCC)
-       throws IOException, MasterNotRunningException {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.createk(" + tblName + ") called.");
-            String trueStr = "TRUE";
-            cleanupCache(tblName);
-            HTableDescriptor desc = new HTableDescriptor(tblName);
-
-            int defaultVersionsValue = 0;
-            if (isMVCC)
-                defaultVersionsValue = DtmConst.MVCC_MAX_VERSION;
-            else
-                defaultVersionsValue = DtmConst.SSCC_MAX_VERSION;
-
-            // column family names are space delimited list of names.
-            // extract all family names and add to table descriptor.
-            // All other default and specified options remain the same for all families.
-            String colFamsStr = (String)tableOptions[HBASE_NAME];
-            String[] colFamsArr = colFamsStr.split("\\s+"); 
-
-            for (int i = 0; i < colFamsArr.length; i++){            
-                String colFam = colFamsArr[i];
-
-                HColumnDescriptor colDesc = new HColumnDescriptor(colFam);
-
-                // change the descriptors based on the tableOptions; 
-                setDescriptors(tableOptions,desc /*out*/,colDesc /*out*/, defaultVersionsValue);
-                
-                desc.addFamily(colDesc);
-            }
-
-            HColumnDescriptor metaColDesc = new HColumnDescriptor(DtmConst.TRANSACTION_META_FAMILY);
-            if (isMVCC)
-              metaColDesc.setMaxVersions(DtmConst.MVCC_MAX_DATA_VERSION);
-            else
-              metaColDesc.setMaxVersions(DtmConst.SSCC_MAX_DATA_VERSION);
-            metaColDesc.setInMemory(true);
-            desc.addFamily(metaColDesc);
-            HBaseAdmin admin = new HBaseAdmin(config);
-
-            try {
-               if (beginEndKeys != null && beginEndKeys.length > 0)
-               {
-                  byte[][] keys = new byte[beginEndKeys.length][];
-                  for (int i = 0; i < beginEndKeys.length; i++){
-                     keys[i] = (byte[])beginEndKeys[i]; 
-                     if (logger.isDebugEnabled()) logger.debug("HBaseClient.createk key #" + i + "value" + keys[i] + ") called.");
-                  }
-                  if (transID != 0) {
-                     table.createTable(desc, keys, numSplits, keyLength, transID);
-                     if (logger.isDebugEnabled()) logger.debug("HBaseClient.createk beginEndKeys(" + beginEndKeys + ") called.");
-                  } else {
-                     admin.createTable(desc, keys);
-                  }
-               }
-               else {
-                  if (transID != 0) {
-                     table.createTable(desc, null, numSplits, keyLength, transID);
-                  } else {
-                     admin.createTable(desc);
-                  }
-               }
-            }
-            catch (IOException e)
-            {
-               if (logger.isDebugEnabled()) logger.debug("HbaseClient.createk : createTable error" + e);
-               throw e;
-            }
-            admin.close();
-        return true;
-    }
-
-    public boolean registerTruncateOnAbort(String tblName, long transID)
-        throws MasterNotRunningException, IOException {
-
-        try {
-           if(transID != 0) {
-              table.truncateTableOnAbort(tblName, transID);
-           }
-        }
-        catch (IOException e) {
-           if (logger.isDebugEnabled()) logger.debug("HbaseClient.registerTruncateOnAbort error" + e);
-           throw e;
-        }
-        return true;
-    }
-
-    private void waitForCompletion(String tblName,HBaseAdmin admin) 
-        throws IOException {
-        // poll for completion of an asynchronous operation
-        boolean keepPolling = true;
-        while (keepPolling) {
-            // status.getFirst() returns the number of regions yet to be updated
-            // status.getSecond() returns the total number of regions
-            Pair<Integer,Integer> status = admin.getAlterStatus(tblName.getBytes());
-
-            keepPolling = (status.getFirst() > 0) && (status.getSecond() > 0);
-            if (keepPolling) {
-                try {
-                    Thread.sleep(2000);  // sleep two seconds or until interrupted
-                }
-                catch (InterruptedException e) {
-                    // ignore the interruption and keep going
-                }    
-            }
-        }
-    }
-
-    public boolean alter(String tblName, Object[] tableOptions, long transID)
-        throws IOException, MasterNotRunningException {
-
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.alter(" + tblName + ") called.");
-        cleanupCache(tblName);
-        HBaseAdmin admin = new HBaseAdmin(config);
-        HTableDescriptor htblDesc = admin.getTableDescriptor(tblName.getBytes());       
-        HColumnDescriptor[] families = htblDesc.getColumnFamilies();
-
-        String colFam = (String)tableOptions[HBASE_NAME];
-        if (colFam == null)
-            return true; // must have col fam name
-
-        // if the only option specified is col fam name and this family doesnt already
-        // exist, then add it.
-        boolean onlyColFamOptionSpecified = true;
-        for (int i = 0; (onlyColFamOptionSpecified && (i < tableOptions.length)); i++) {
-            if (i == HBASE_NAME)	
-                continue ;
-
-            if (((String)tableOptions[i]).length() != 0)
-                {
-                    onlyColFamOptionSpecified = false;
-                }
-        }
-
-        HColumnDescriptor colDesc = htblDesc.getFamily(colFam.getBytes());
-
-        ChangeFlags status = new ChangeFlags();
-        if (onlyColFamOptionSpecified) {
-            if (colDesc == null) {
-                colDesc = new HColumnDescriptor(colFam);
-                
-                htblDesc.addFamily(colDesc);
-                
-                status.setTableDescriptorChanged();
-            } else
-                return true; // col fam already exists
-        }
-        else {
-            if (colDesc == null)
-                return true; // colDesc must exist
-
-            int defaultVersionsValue = colDesc.getMaxVersions(); 
-
-            status = 
-                setDescriptors(tableOptions,htblDesc /*out*/,colDesc /*out*/, defaultVersionsValue);
-        }
-
-        try {
-            if (transID != 0) {
-                // Transactional alter support
-                table.alter(tblName, tableOptions, transID);
-                if (logger.isDebugEnabled()) logger.debug("HBaseClient.alter(" + tblName + ") called with object length: " + java.lang.reflect.Array.getLength(tableOptions));
-            }
-            else {
-                // the modifyTable and modifyColumn operations are asynchronous,
-                // so we have to have additional code to poll for their completion
-                // (I hear that synchronous versions will be available in HBase 1.x)
-                if (status.tableDescriptorChanged()) {
-                    admin.modifyTable(tblName,htblDesc);
-                    waitForCompletion(tblName,admin);
-                }
-                else if (status.columnDescriptorChanged()) {
-                    admin.modifyColumn(tblName,colDesc);                  
-                    waitForCompletion(tblName,admin);
-                }
-                admin.close();
-            }
-        }
-        catch (IOException e) {
-            if (logger.isDebugEnabled()) logger.debug("HbaseClient.drop  error" + e);
-            throw e;
-        }
-
-        cleanupCache(tblName);
-        return true;
-    }
-
-    public boolean drop(String tblName, long transID)
-             throws MasterNotRunningException, IOException {
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.drop(" + tblName + ") called.");
-        HBaseAdmin admin = new HBaseAdmin(config);
-        //			admin.disableTableAsync(tblName);
-
-        try {
-           if(transID != 0) {
-              table.dropTable(tblName, transID);
-           }
-           else {
-              admin.disableTable(tblName);
-              admin.deleteTable(tblName);
-              admin.close();
-           }
-        }
-        catch (IOException e) {
-           if (logger.isDebugEnabled()) logger.debug("HbaseClient.drop  error" + e);
-           throw e;
-        }
-
-        return cleanupCache(tblName);
-    }
-
-    public boolean dropAll(String pattern) 
-             throws MasterNotRunningException, IOException {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.dropAll(" + pattern + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
-
-	    HTableDescriptor[] htdl = admin.listTables(pattern);
-	    if (htdl == null) // no tables match the given pattern.
-		return true;
-
-	    for (HTableDescriptor htd : htdl) {
-		String tblName = htd.getNameAsString();
-
-                // do not drop DTM log files which have the format: TRAFODION._DTM_.*
-                int idx = tblName.indexOf("TRAFODION._DTM_");
-                if (idx == 0)
-                    continue;
-                
-                //                System.out.println(tblName);
-                admin.disableTable(tblName);
-                admin.deleteTable(tblName);
-	    }
- 	    
-            admin.close();
-            return cleanup();
-    }
-
-    public ByteArrayList listAll(String pattern) 
-             throws MasterNotRunningException, IOException {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.listAll(" + pattern + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
-
-            ByteArrayList hbaseTables = new ByteArrayList();
-
-	    HTableDescriptor[] htdl = 
-                (pattern.isEmpty() ? admin.listTables() : admin.listTables(pattern));
-
-	    for (HTableDescriptor htd : htdl) {
-		String tblName = htd.getNameAsString();
-
-                //                System.out.println(tblName);
-
-                byte[] b = tblName.getBytes();
-                hbaseTables.add(b);
-	    }
- 	    
-            admin.close();
-            cleanup();
-            
-            return hbaseTables;
-    }
-
-    public boolean copy(String currTblName, String oldTblName)
-	throws MasterNotRunningException, IOException, SnapshotCreationException, InterruptedException {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.copy(" + currTblName + oldTblName + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
-	    
-	    String snapshotName = currTblName + "_SNAPSHOT";
-	    
-	    List<SnapshotDescription> l = new ArrayList<SnapshotDescription>(); 
-	    //	    l = admin.listSnapshots(snapshotName);
-	    l = admin.listSnapshots();
-	    if (! l.isEmpty())
-		{
-		    for (SnapshotDescription sd : l) {
-			//			System.out.println("here 1");
-			//			System.out.println(snapshotName);
-			//			System.out.println(sd.getName());
-			if (sd.getName().compareTo(snapshotName) == 0)
-			    {
-				//				System.out.println("here 2");
-				//			    admin.enableTable(snapshotName);
-				//				System.out.println("here 3");
-				admin.deleteSnapshot(snapshotName);
-				//				System.out.println("here 4");
-			    }
-		    }
-		}
-	    //	    System.out.println(snapshotName);
-	    if (! admin.isTableDisabled(currTblName))
-		admin.disableTable(currTblName);
-	    //	    System.out.println("here 5");
-	    admin.snapshot(snapshotName, currTblName);
-	    admin.cloneSnapshot(snapshotName, oldTblName);
-	    admin.deleteSnapshot(snapshotName);
-	    //	    System.out.println("here 6");
-	    admin.enableTable(currTblName);
-            admin.close();
-            return true;
-    }
-
-    public boolean exists(String tblName)  
-           throws MasterNotRunningException, IOException {
-            if (logger.isDebugEnabled()) logger.debug("HBaseClient.exists(" + tblName + ") called.");
-            HBaseAdmin admin = new HBaseAdmin(config);
-            boolean result = admin.tableExists(tblName);
-            admin.close();
-            return result;
-    }
-
-    public HTableClient getHTableClient(long jniObject, String tblName, 
-                  boolean useTRex) throws IOException 
-    {
-       if (logger.isDebugEnabled()) logger.debug("HBaseClient.getHTableClient(" + tblName
-                         + (useTRex ? ", use TRX" : ", no TRX") + ") called.");
-       HTableClient htable = hTableClientsFree.get(tblName);
-       if (htable == null) {
-          htable = new HTableClient();
-          if (htable.init(tblName, useTRex) == false) {
-             if (logger.isDebugEnabled()) logger.debug("  ==> Error in init(), returning empty.");
-             return null;
-          }
-          if (logger.isDebugEnabled()) logger.debug("  ==> Created new object.");
-          hTableClientsInUse.put(htable.getTableName(), htable);
-          htable.setJniObject(jniObject);
-          return htable;
-       } else {
-            if (logger.isDebugEnabled()) logger.debug("  ==> Returning existing object, removing from container.");
-            hTableClientsInUse.put(htable.getTableName(), htable);
-            htable.resetAutoFlush();
-           htable.setJniObject(jniObject);
-            return htable;
-       }
-    }
-
-
-    public void releaseHTableClient(HTableClient htable) 
-                    throws IOException {
-        if (htable == null)
-            return;
-	                
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.releaseHTableClient(" + htable.getTableName() + ").");
-        boolean cleanJniObject = false;
-        if (htable.release(cleanJniObject))
-        // If the thread is interrupted, then remove the table from cache
-        // because the table connection is retried when the table is used
-        // next time
-
-           cleanupCache(htable.getTableName());
-        else
-        {
-           if (hTableClientsInUse.remove(htable.getTableName(), htable))
-              hTableClientsFree.put(htable.getTableName(), htable);
-           else
-              if (logger.isDebugEnabled()) logger.debug("Table not found in inUse Pool");
-        }
-    }
-
-    public boolean flushAllTables() throws IOException {
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.flushAllTables() called.");
-       if (hTableClientsInUse.isEmpty()) {
-          return true;
-        }
-        for (HTableClient htable : hTableClientsInUse.values()) {
-		  htable.flush();
-        }
-	return true; 
-    }
-
-    public boolean grant(byte[] user, byte[] tblName,
-                         Object[] actionCodes) throws IOException {
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.grant(" + new String(user) + ", "
-                     + new String(tblName) + ") called.");
-		byte[] colFamily = null;
-
-		Permission.Action[] assigned = new Permission.Action[actionCodes.length];
-		for (int i = 0 ; i < actionCodes.length; i++) {
-			String actionCode = (String)actionCodes[i];
-			assigned[i] = Permission.Action.valueOf(actionCode);
-		}
-
-	    //HB98
-	    TableName htblName = TableName.valueOf(new String(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME)
-						   ,new String(tblName));
-            UserPermission userPerm = new UserPermission(user, htblName,
-                                                         colFamily, assigned);
-
-            AccessController accessController = new AccessController();
-	    //HB98 The grant() method is very different in HB98 (commenting out for now)
-            //accessController.grant(userPerm);
-        return true;
-    }
-
-   public boolean revoke(byte[] user, byte[] tblName,
-                          Object[] actionCodes) 
-                     throws IOException {
-        if (logger.isDebugEnabled()) logger.debug("HBaseClient.revoke(" + new String(user) + ", "
-                     + new String(tblName) + ") called.");
-        byte[] colFamily = null;
-
-        Permission.Action[] assigned = new Permission.Action[actionCodes.length];
-        for (int i = 0 ; i < actionCodes.length; i++) {
-            String actionCode = (String)actionCodes[i];
-            assigned[i] = Permission.Action.valueOf(actionCode);
-        }
-
-	    //HB98
-	    TableName htblName = TableName.valueOf(new String(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME)
-						   ,new String(tblName));
-            UserPermission userPerm = new UserPermission(user, htblName,
-                                                         colFamily, assigned);
-
-            AccessController accessController = new AccessController();
-	    
-	    //HB98 The revoke() method is very different in HB98 (commenting out for now)
-            //accessController.revoke(userPerm);
-        return true;
-    }
-
-    // Debugging method to display initial set of KeyValues and sequence
-    // of column qualifiers.
-    private void printQualifiers(HFile.Reader reader, int maxKeys) 
-                 throws IOException {
-      String qualifiers = new String();
-      HFileScanner scanner = reader.getScanner(false, false, false);
-      scanner.seekTo();
-      int kvCount = 0;
-      int nonPuts = 0;
-      do {
-        KeyValue kv = scanner.getKeyValue();
-        System.out.println(kv.toString());
-        if (kv.getType() == KeyValue.Type.Put.getCode())
-          qualifiers = qualifiers + kv.getQualifier()[0] + " ";
-        else
-          nonPuts++;
-      } while (++kvCount < maxKeys && scanner.next());
-      System.out.println("First " + kvCount + " column qualifiers: " + qualifiers);
-      if (nonPuts > 0)
-        System.out.println("Encountered " + nonPuts + " non-PUT KeyValue types.");
-    }
-
-    // Estimates the number of rows still in the MemStores of the regions
-    // associated with the passed table name. The number of bytes in the
-    // MemStores is divided by the passed row size in bytes, which is
-    // derived by comparing the row count for an HFile (which in turn is
-    // derived by the number of KeyValues in the file and the number of
-    // columns in the table) to the size of the HFile.
-    private long estimateMemStoreRows(String tblName, int rowSize)
-                 throws MasterNotRunningException, IOException {
-      if (rowSize == 0)
-        return 0;
-
-      HBaseAdmin admin = new HBaseAdmin(config);
-      HTable htbl = new HTable(config, tblName);
-      long totalMemStoreBytes = 0;
-      try {
-        // Get a set of all the regions for the table.
-        Set<HRegionInfo> tableRegionInfos = htbl.getRegionLocations().keySet();
-        Set tableRegions = new TreeSet(Bytes.BYTES_COMPARATOR);
-        for (HRegionInfo regionInfo : tableRegionInfos) {
-          tableRegions.add(regionInfo.getRegionName());
-        }
-     
-        // Get collection of all servers in the cluster.
-        ClusterStatus clusterStatus = admin.getClusterStatus();
-        Collection<ServerName> servers = clusterStatus.getServers();
-        final long bytesPerMeg = 1024L * 1024L;
-     
-        // For each server, look at each region it contains and see if 
-        // it is in the set of regions for the table. If so, add the
-        // size of its the running total.
-        for (ServerName serverName : servers) {
-          ServerLoad serverLoad = clusterStatus.getLoad(serverName);
-          for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
-            byte[] regionId = regionLoad.getName();
-            if (tableRegions.contains(regionId)) {
-              long regionMemStoreBytes = bytesPerMeg * regionLoad.getMemStoreSizeMB();
-              if (logger.isDebugEnabled()) logger.debug("Region " + regionLoad.getNameAsString()
-                           + " has MemStore size " + regionMemStoreBytes);
-              totalMemStoreBytes += regionMemStoreBytes;
-            }
-          }
-        }
-      }
-      finally {
-        admin.close();
-      }
-
-      // Divide the total MemStore size by the size of a single row.
-      if (logger.isDebugEnabled()) logger.debug("Estimating " + (totalMemStoreBytes / rowSize)
-                   + " rows in MemStores of table's regions.");
-      return totalMemStoreBytes / rowSize;
-    }
-
-
-    public float getBlockCacheFraction()
-    {
-        float defCacheFraction = 0.4f;
-        return config.getFloat("hfile.block.cache.size",defCacheFraction);
-    }
-    // Estimates row count for tblName by iterating over the HFiles for
-    // the table, extracting the KeyValue entry count from the file's
-    // trailer block, summing the counts, and dividing by the number of
-    // columns in the table. An adjustment is made for the estimated
-    // number of missing (null) values by sampling the first several
-    // hundred KeyValues to see how many are missing.
-    public boolean estimateRowCount(String tblName, int partialRowSize,
-                                    int numCols, long[] rc)
-                   throws MasterNotRunningException, IOException, ClassNotFoundException, URISyntaxException {
-      if (logger.isDebugEnabled()) logger.debug("HBaseClient.estimateRowCount(" + tblName + ") called.");
-
-      final String REGION_NAME_PATTERN = "[0-9a-f]*";
-      final String HFILE_NAME_PATTERN  = "[0-9a-f]*";
-
-      // To estimate incidence of nulls, read the first 500 rows worth
-      // of KeyValues.
-      final int ROWS_TO_SAMPLE = 500;
-      int putKVsSampled = 0;
-      int nonPutKVsSampled = 0;
-      int nullCount = 0;
-      long totalEntries = 0;   // KeyValues in all HFiles for table
-      long totalSizeBytes = 0; // Size of all HFiles for table 
-      long estimatedTotalPuts = 0;
-      boolean more = true;
-
-      // Access the file system to go directly to the table's HFiles.
-      // Create a reader for the file to access the entry count stored
-      // in the trailer block, and a scanner to iterate over a few
-      // hundred KeyValues to estimate the incidence of nulls.
-      long nano1, nano2;
-      nano1 = System.nanoTime();
-      FileSystem fileSystem = FileSystem.get(config);
-      nano2 = System.nanoTime();
-      if (logger.isDebugEnabled()) logger.debug("FileSystem.get() took " + ((nano2 - nano1) + 500000) / 1000000 + " milliseconds.");
-      CacheConfig cacheConf = new CacheConfig(config);
-      String hbaseRootPath = config.get(HConstants.HBASE_DIR).trim();
-      if (hbaseRootPath.charAt(0) != '/')
-        hbaseRootPath = new URI(hbaseRootPath).getPath();
-      if (logger.isDebugEnabled()) logger.debug("hbaseRootPath = " + hbaseRootPath);
-      FileStatus[] fsArr = fileSystem.globStatus(new Path(
-                               hbaseRootPath + "/data/default/" +
-                               tblName + "/" + REGION_NAME_PATTERN +
-                               "/#1/" + HFILE_NAME_PATTERN));
-      for (FileStatus fs : fsArr) {
-        // Make sure the file name conforms to HFile name pattern.
-        if (!StoreFileInfo.isHFile(fs.getPath())) {
-          if (logger.isDebugEnabled()) logger.debug("Skipped file " + fs.getPath() + " -- not a valid HFile name.");
-          continue;
-        }
-        HFile.Reader reader = HFile.createReader(fileSystem, fs.getPath(), cacheConf, config);
-        try {
-          totalEntries += reader.getEntries();
-          totalSizeBytes += reader.length();
-          //printQualifiers(reader, 100);
-          if (ROWS_TO_SAMPLE > 0 &&
-              totalEntries == reader.getEntries()) {  // first file only
-            // Trafodion column qualifiers are ordinal numbers, which
-            // makes it easy to count missing (null) values. We also count
-            // the non-Put KVs (typically delete-row markers) to estimate
-            // their frequency in the full file set.
-            HFileScanner scanner = reader.getScanner(false, false, false);
-            scanner.seekTo();  //position at beginning of first data block
-            byte currQual = 0;
-            byte nextQual;
-            do {
-              KeyValue kv = scanner.getKeyValue();
-              if (kv.getType() == KeyValue.Type.Put.getCode()) {
-                nextQual = kv.getQualifier()[0];
-                if (nextQual <= currQual)
-                  nullCount += ((numCols - currQual)  // nulls at end of this row
-                              + (nextQual - 1));      // nulls at start of next row
-                else
-                  nullCount += (nextQual - currQual - 1);
-                currQual = nextQual;
-                putKVsSampled++;
-              } else {
-                nonPutKVsSampled++;  // don't count these toward the number
-              }                      //   we want to scan
-            } while ((putKVsSampled + nullCount) < (numCols * ROWS_TO_SAMPLE)
-                     && (more = scanner.next()));
-
-            // If all rows were read, count any nulls at end of last row.
-            if (!more && putKVsSampled > 0)
-              nullCount += (numCols - currQual);
-
-            if (logger.isDebugEnabled()) logger.debug("Sampled " + nullCount + " nulls.");
-          }  // code for first file
-        } finally {
-          reader.close(false);
-        }
-      } // for
-
-      long estimatedEntries = (ROWS_TO_SAMPLE > 0
-                                 ? 0               // get from sample data, below
-                                 : totalEntries);  // no sampling, use stored value
-      if (putKVsSampled > 0) // avoid div by 0 if no Put KVs in sample
-        {
-          estimatedTotalPuts = (putKVsSampled * totalEntries) / 
-                               (putKVsSampled + nonPutKVsSampled);
-          estimatedEntries = ((putKVsSampled + nullCount) * estimatedTotalPuts)
-                                   / putKVsSampled;
-        }
-
-      // Calculate estimate of rows in all HFiles of table.
-      rc[0] = (estimatedEntries + (numCols/2)) / numCols; // round instead of truncate
-
-      // Estimate # of rows in MemStores of all regions of table. Pass
-      // a value to divide the size of the MemStore by. Base this on the
-      // ratio of bytes-to-rows in the HFiles, or the actual row size if
-      // the HFiles were empty.
-      int rowSize;
-      if (rc[0] > 0)
-        rowSize = (int)(totalSizeBytes / rc[0]);
-      else {
-        // From Traf metadata we have calculated and passed in part of the row
-        // size, including size of column qualifiers (col names), which are not
-        // known to HBase.  Add to this the length of the fixed part of the
-        // KeyValue format, times the number of columns.
-        int fixedSizePartOfKV = KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE // key len + value len
-                              + KeyValue.KEY_INFRASTRUCTURE_SIZE;     // rowkey & col family len, timestamp, key type
-        rowSize = partialRowSize   // for all cols: row key + col qualifiers + values
-                      + (fixedSizePartOfKV * numCols);
-
-        // Trafodion tables have a single col family at present, so we only look
-        // at the first family name, and multiply its length times the number of
-        // columns. Even if more than one family is used in the future, presumably
-        // they will all be the same short size.
-        HTable htbl = new HTable(config, tblName);
-        HTableDescriptor htblDesc = htbl.getTableDescriptor();
-        HColumnDescriptor[] families = htblDesc.getColumnFamilies();
-        rowSize += (families[0].getName().length * numCols);
-      }
-
-      // Get the estimate of MemStore rows. Add to total after logging
-      // of individual sums below.
-      long memStoreRows = estimateMemStoreRows(tblName, rowSize);
-
-      if (logger.isDebugEnabled()) logger.debug(tblName + " contains a total of " + totalEntries + " KeyValues in all HFiles.");
-      if (logger.isDebugEnabled()) logger.debug("Based on a sample, it is estimated that " + estimatedTotalPuts +
-                   " of these KeyValues are of type Put.");
-      if (putKVsSampled + nullCount > 0)
-        if (logger.isDebugEnabled()) logger.debug("Sampling indicates a null incidence of " + 
-                     (nullCount * 100)/(putKVsSampled + nullCount) +
-                     " percent.");
-      if (logger.isDebugEnabled()) logger.debug("Estimated number of actual values (including nulls) is " + estimatedEntries);
-      if (logger.isDebugEnabled()) logger.debug("Estimated row count in HFiles = " + estimatedEntries +
-                   " / " + numCols + " (# columns) = " + rc[0]);
-      if (logger.isDebugEnabled()) logger.debug("Estimated row count from MemStores = " + memStoreRows);
-
-      rc[0] += memStoreRows;  // Add memstore estimate to total
-      if (logger.isDebugEnabled()) logger.debug("Total estimated row count for " + tblName + " = " + rc[0]);
-      return true;
-    }
-
-
-    /**
-    This method returns node names where Hbase Table regions reside
-    **/
-    public boolean getRegionsNodeName(String tblName, String[] nodeNames)
-                   throws IOException
-    {
-      if (logger.isDebugEnabled()) 
-        logger.debug("HBaseClient.getRegionsNodeName(" + tblName + ") called.");
-
-      HRegionInfo regInfo = null;
-
-
-      HTable htbl = new HTable(config, tblName);
-      if (logger.isDebugEnabled())
-         logger.debug("after HTable call in getRegionsNodeName");
-
-      try {
-        NavigableMap<HRegionInfo, ServerName> locations = htbl.getRegionLocations();
-        if (logger.isDebugEnabled())
-           logger.debug("after htable.getRegionLocations call in getRegionsNodeName");
-
-      
-        String hostName;
-        int regCount = 0;
-
-        for (Map.Entry<HRegionInfo, ServerName> entry: locations.entrySet()) {
-          if (logger.isDebugEnabled()) logger.debug("Entered for loop in getRegionsNodeName");
-          regInfo = entry.getKey();
-          hostName = entry.getValue().getHostname();
-          nodeNames[regCount] = hostName;
-          if (logger.isDebugEnabled()) logger.debug("Hostname for region " + regCount + " is " + hostName);
-          regCount++;
-        }
-      } catch (Exception ie) {
-        if (logger.isDebugEnabled())
-          logger.debug("getRegionLocations throws exception " + ie.getMessage());
-        return false;
-      }
-
-      return true;
-    }
-
-
-
-    /**
-    This method returns index levels and block size of Hbase Table.
-    Index level is read from  Hfiles trailer block. Randomly selects one region and iterates through all Hfiles
-    in the chosen region and gets the maximum index level.
-    Block size is read from HColumnDescriptor.
-    **/
-    public boolean getHbaseTableInfo(String tblName, int[] tblInfo)
-                   throws MasterNotRunningException, IOException, ClassNotFoundException, URISyntaxException {
-
-      if (logger.isDebugEnabled()) logger.debug("HBaseClient.getHbaseTableInfo(" + tblName + ") called.");
-      final String REGION_NAME_PATTERN = "[0-9a-f]*";
-      final String HFILE_NAME_PATTERN  = "[0-9a-f]*";
-
-      // initialize 
-      int indexLevel = 0;
-      int currIndLevel = 0;
-      int blockSize = 0;
-      tblInfo[0] = indexLevel;
-      tblInfo[1] = blockSize;
-
-      // get block size
-      HTable htbl = new HTable(config, tblName);
-      HTableDescriptor htblDesc = htbl.getTableDescriptor();
-      HColumnDescriptor[] families = htblDesc.getColumnFamilies();
-      blockSize = families[0].getBlocksize();
-      tblInfo[1] = blockSize;
-
-      // Access the file system to go directly to the table's HFiles.
-      long nano1 = 0, nano2 = 0;
-      if (logger.isDebugEnabled())
-        nano1 = System.nanoTime();
-      FileSystem fileSystem = FileSystem.get(config);
-
-      if (logger.isDebugEnabled()) {
-        nano2 = System.nanoTime();
-        logger.debug("FileSystem.get() took " + ((nano2 - nano1) + 500000) / 1000000 + " milliseconds.");
-      }
-      CacheConfig cacheConf = new CacheConfig(config);
-      String hbaseRootPath = config.get(HConstants.HBASE_DIR).trim();
-      if (hbaseRootPath.charAt(0) != '/')
-        hbaseRootPath = new URI(hbaseRootPath).getPath();
-      if (logger.isDebugEnabled()) logger.debug("hbaseRootPath = " + hbaseRootPath);
-
-      String regDir = hbaseRootPath + "/data/default/" + 
-                      tblName + "/" + REGION_NAME_PATTERN + "/#1";
-      if (logger.isDebugEnabled()) logger.debug("region dir = " + regDir);
-
-      //get random region from the list of regions and look at all Hfiles in that region
-      FileStatus[] regArr;
-      try {
-        regArr = fileSystem.globStatus(new Path(regDir));
-      } catch (IOException ioe) {
-        if (logger.isDebugEnabled()) logger.debug("fs.globStatus on region throws IOException");
-        return false; // return index level = 0; and  block size
-      }
-      
-      // logging
-      if (logger.isDebugEnabled()) {
-        for (int i =0; i < regArr.length; i++) 
-          logger.debug("Region Path is " + regArr[i].getPath());
-      }
-      // get random region from the region array
-      int regInd = 0;
-      regInd = tblName.hashCode() % regArr.length;
-
-      Path regName = regArr[regInd].getPath();
-      // extract MD5 hash name of random region from its path including colFam name. 
-      // we just need part2 and looks something like /c8fe2d575de62d5d5ffc530bda497bca/#1
-      String strRegPath = regName.toString();
-      String parts[] = strRegPath.split(tblName);
-      String part2 = parts[1];
-
-      // now remove regular expression from the region path.
-      // would look something like /hbase/data/default/<cat.sch.tab>/[0-9a-f]*/#1
-      int j = regDir.indexOf("/[");
-      String regPrefix = regDir.substring(0,j);
-      if (logger.isDebugEnabled()) logger.debug("Region Path prefix = " + regPrefix);
-      String hfilePath = regPrefix + part2 + "/" + HFILE_NAME_PATTERN;
-      
-      if (logger.isDebugEnabled()) logger.debug("Random = " + regInd + ", region is " + regName);
-      if (logger.isDebugEnabled()) logger.debug("Hfile path = " + hfilePath);
-
-      FileStatus[] fsArr;
-      try {
-        fsArr = fileSystem.globStatus(new Path(hfilePath));
-      } catch (IOException ioe) {
-        if (logger.isDebugEnabled()) logger.debug("fs.globStatus on Hfile throws IOException");
-        return false; // return index level = 0; and  block size
-      }
-
-      if (logger.isDebugEnabled()) {
-        for (int i =0; i < fsArr.length; i++)
-          logger.debug("Hfile Path is " + fsArr[i].getPath());
-      }
-     
-      // no Hfiles return from here
-      if (fsArr.length == 0)
-        return true; // return index level = 0; and  block size
-
-      // get maximum index level going through all Hfiles of randomly chosen region
-      if (logger.isDebugEnabled())
-        nano1 = System.nanoTime();
-      for (FileStatus fs : fsArr) {
-        // Make sure the file name conforms to HFile name pattern.
-        if (!StoreFileInfo.isHFile(fs.getPath())) {
-          if (logger.isDebugEnabled()) logger.debug("Skipped file " + fs.getPath() + " -- not a valid HFile name.");
-          continue;
-        }
-
-        // Create a reader for the file to access the index levels stored
-        // in the trailer block
-        HFile.Reader reader = HFile.createReader(fileSystem, fs.getPath(), cacheConf, config);
-        try {
-          FixedFileTrailer trailer = reader.getTrailer();
-          currIndLevel = trailer.getNumDataIndexLevels();
-          // index levels also include data block, should be excluded.
-          if (currIndLevel > 0)
-            currIndLevel = currIndLevel - 1;
-          if (logger.isDebugEnabled()) 
-            logger.debug("currIndLevel = " + currIndLevel+ ", indexLevel = " + indexLevel);
-          if (currIndLevel > indexLevel)
-            indexLevel = currIndLevel;
-       } finally {
-         reader.close(false);
-       }
-      } // for
-
-      if (logger.isDebugEnabled()) {
-        nano2 = System.nanoTime();
-        logger.debug("get index level took " + ((nano2 - nano1) + 500000) / 1000000 + " milliseconds.");
-      }
-
-      tblInfo[0] = indexLevel;
-      if (logger.isDebugEnabled()) {
-        logger.debug("Index Levels for " + tblName + " = " + tblInfo[0]);
-        logger.debug("Block Size for " + tblName + " = " + tblInfo[1]);
-      }
-      
-      return true;
-    }
-
-    void printCell(KeyValue kv) {
-        String rowID = new String(kv.getRow());
-        String colFamily = new String(kv.getFamily());
-        String colName = new String(kv.getQualifier());
-        String colValue = new String(kv.getValue());
-        String row = rowID + ", " + colFamily + ", " + colName + ", "
-            + colValue + ", " + kv.getTimestamp();
-        System.out.println(row);
-    }
-
-    
-  public  HBulkLoadClient getHBulkLoadClient() throws IOException 
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBaseClient.getHBulkLoadClient() called.");
-    HBulkLoadClient hblc = null;
-    try 
-    {
-       hblc = new HBulkLoadClient( config);
-    
-    if (hblc == null)
-      throw new IOException ("hbkc is null");
-    }
-    catch (IOException e)
-    {
-      return null;
-    }
-    
-    return hblc;
-    
-  }
-  public void releaseHBulkLoadClient(HBulkLoadClient hblc) 
-      throws IOException 
-  {
-     if (hblc == null)
-       return;
-          
-      if (logger.isDebugEnabled()) logger.debug("HBaseClient.releaseHBulkLoadClient().");
-      hblc.release();
-   }
-  
-  //returns the latest snapshot name for a table. returns null if table has no snapshots
-  //associated with it
-  public String getLatestSnapshot(String tabName) throws IOException
-  {
-    HBaseAdmin admin = new HBaseAdmin(config);
-    List<SnapshotDescription> snapDescs = admin.listSnapshots();
-    long maxTimeStamp = 0;
-    String latestsnpName = null;
-    for (SnapshotDescription snp :snapDescs )
-    {
-      if (snp.getTable().compareTo(tabName) == 0 && 
-          snp.getCreationTime() > maxTimeStamp)
-      {
-        latestsnpName= snp.getName();
-        maxTimeStamp = snp.getCreationTime();
-      }
-      
-    }
-    admin.close();
-    admin = null;
-    return latestsnpName;
-  }
-  public boolean cleanSnpScanTmpLocation(String pathStr) throws Exception
-  {
-    if (logger.isDebugEnabled()) logger.debug("HbaseClient.cleanSnpScanTmpLocation() - start - Path: " + pathStr);
-    try 
-    {
-      Path delPath = new Path(pathStr );
-      delPath = delPath.makeQualified(delPath.toUri(), null);
-      FileSystem fs = FileSystem.get(delPath.toUri(),config);
-      fs.delete(delPath, true);
-    }
-    catch (IOException e)
-    {
-      if (logger.isDebugEnabled()) logger.debug("HbaseClient.cleanSnpScanTmpLocation() --exception:" + e);
-      throw e;
-    }
-    
-    return true;
-  }
-  private boolean updatePermissionForEntries(FileStatus[] entries, String hbaseUser, FileSystem fs) throws IOException 
-  {
-    if (entries == null) {
-      return true;
-    }
-    
-    for (FileStatus child : entries) {
-      Path path = child.getPath();
-      List<AclEntry> lacl = AclEntry.parseAclSpec("user:" + hbaseUser + ":rwx", true) ;
-      try 
-      {
-        fs.modifyAclEntries(path, lacl);
-      }
-      catch (IOException e)
-      {
-        //if failure just log exception and continue
-        if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.updatePermissionForEntries() exception. " + e);
-      }
-      if (child.isDir()) 
-      {
-        FileStatus[] files = FSUtils.listStatus(fs,path);
-        updatePermissionForEntries(files,hbaseUser, fs);
-      } 
-    }
-    return true;
-  }
-  
-  public boolean setArchivePermissions( String tabName) throws IOException,ServiceException
-  {
-    if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.setArchivePermissions() called. ");
-    Path rootDir = FSUtils.getRootDir(config);
-    FileSystem myfs = FileSystem.get(rootDir.toUri(),config);
-    FileStatus fstatus = myfs.getFileStatus(rootDir);
-    String hbaseUser = fstatus.getOwner(); 
-    assert (hbaseUser != null && hbaseUser.length() != 0);
-    Path tabArcPath = HFileArchiveUtil.getTableArchivePath(config,  TableName.valueOf(tabName));
-    if (tabArcPath == null)
-      return true;
-    List<AclEntry> lacl = AclEntry.parseAclSpec("user:" + hbaseUser + ":rwx", true) ;
-    try
-    {
-      myfs.modifyAclEntries(tabArcPath, lacl);
-    }
-    catch (IOException e)
-    {
-      //if failure just log exception and continue
-      if (logger.isTraceEnabled()) logger.trace("[Snapshot Scan] SnapshotScanHelper.setArchivePermissions() exception. " + e);
-    }
-    FileStatus[] files = FSUtils.listStatus(myfs,tabArcPath);
-    updatePermissionForEntries(files,  hbaseUser, myfs); 
-    return true;
-  }
-
-  public int startGet(long jniObject, String tblName, boolean useTRex, long transID, byte[] rowID,
-                        Object[] columns, long timestamp)
-                        throws IOException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      return htc.startGet(transID, rowID, columns, timestamp);
-  }
-
-  public int startGet(long jniObject, String tblName, boolean useTRex, long transID, Object[] rowIDs,
-                        Object[] columns, long timestamp)
-                        throws IOException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      return htc.startGet(transID, rowIDs, columns, timestamp);
-  }
-
-  public int startGet(long jniObject, String tblName, boolean useTRex, long transID, short rowIDLen, Object rowIDs,
-                        Object[] columns)
-                        throws IOException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      return htc.getRows(transID, rowIDLen, rowIDs, columns);
-  }
-
-  public boolean insertRow(long jniObject, String tblName, boolean useTRex, long transID, byte[] rowID,
-                         Object row,
-                         long timestamp,
-                         boolean checkAndPut,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
-
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      boolean ret = htc.putRow(transID, rowID, row, null, null,
-                                checkAndPut, asyncOperation);
-      if (asyncOperation == true)
-         htc.setJavaObject(jniObject);
-      else
-         releaseHTableClient(htc);
-      return ret;
-  }
-
-  public boolean checkAndUpdateRow(long jniObject, String tblName, boolean useTRex, long transID, byte[] rowID,
-                         Object columnsToUpdate,
-                         byte[] columnToCheck, byte[] columnValToCheck,
-                         long timestamp,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
-      boolean checkAndPut = true;
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      boolean ret = htc.putRow(transID, rowID, columnsToUpdate, columnToCheck, columnValToCheck,
-                                checkAndPut, asyncOperation);
-      if (asyncOperation == true)
-         htc.setJavaObject(jniObject);
-      else
-         releaseHTableClient(htc);
-      return ret;
-  }
-
-  public boolean insertRows(long jniObject, String tblName, boolean useTRex, long transID, 
-			 short rowIDLen,
-                         Object rowIDs,
-                         Object rows,
-                         long timestamp,
-                         boolean autoFlush,
-                         boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      boolean ret = htc.putRows(transID, rowIDLen, rowIDs, rows, timestamp, autoFlush, asyncOperation);
-      if (asyncOperation == true)
-         htc.setJavaObject(jniObject);
-      else
-         releaseHTableClient(htc);
-      return ret;
-  }
-
-  public boolean deleteRow(long jniObject, String tblName, boolean useTRex, long transID, 
-                                 byte[] rowID,
-                                 Object[] columns,
-                                 long timestamp, boolean asyncOperation) throws IOException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      boolean ret = htc.deleteRow(transID, rowID, columns, timestamp, asyncOperation);
-      if (asyncOperation == true)
-         htc.setJavaObject(jniObject);
-      else
-         releaseHTableClient(htc);
-      return ret;
-  }
-
-  public boolean deleteRows(long jniObject, String tblName, boolean useTRex, long transID, short rowIDLen, Object rowIDs,
-                      long timestamp, 
-                      boolean asyncOperation) throws IOException, InterruptedException, ExecutionException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      boolean ret = htc.deleteRows(transID, rowIDLen, rowIDs, timestamp, asyncOperation);
-      if (asyncOperation == true)
-         htc.setJavaObject(jniObject);
-      else
-         releaseHTableClient(htc);
-      return ret;
-  }
-
-  public boolean checkAndDeleteRow(long jniObject, String tblName, boolean useTRex, long transID, 
-                                 byte[] rowID,
-                                 byte[] columnToCheck, byte[] colValToCheck,
-                                 long timestamp, boolean asyncOperation) throws IOException {
-      HTableClient htc = getHTableClient(jniObject, tblName, useTRex);
-      boolean ret = htc.checkAndDeleteRow(transID, rowID, columnToCheck, colValToCheck, timestamp);
-      if (asyncOperation == true)
-         htc.setJavaObject(jniObject);
-      else
-         releaseHTableClient(htc);
-      return ret;
-  }
-
-  public boolean  createCounterTable(String tabName,  String famName) throws IOException, MasterNotRunningException
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBaseClient.createCounterTable() - start");
-    HBaseAdmin admin = new HBaseAdmin(config);
-    TableName tn =  TableName.valueOf (tabName);
-    if (admin.tableExists(tabName)) {
-        admin.close();
-        return true;
-    }
-    HTableDescriptor desc = new HTableDescriptor(tn);
-    HColumnDescriptor colDesc = new HColumnDescriptor(famName);
-    // A counter table is non-DTM-transactional.
-    // Use the default maximum versions for MVCC.
-    colDesc.setMaxVersions(DtmConst.MVCC_MAX_VERSION);
-    desc.addFamily(colDesc);
-    admin.createTable(desc);
-    admin.close();
-    if (logger.isDebugEnabled()) logger.debug("HBaseClient.createCounterTable() - end");
-    return true;
-  }
-
-  public long incrCounter(String tabName, String rowId, String famName, String qualName, long incrVal) throws Exception
-  {
-    if (logger.isDebugEnabled()) logger.debug("HBaseClient.incrCounter() - start");
-
-    HTable myHTable = new HTable(config, tabName);
-    long count = myHTable.incrementColumnValue(Bytes.toBytes(rowId), Bytes.toBytes(famName), Bytes.toBytes(qualName), incrVal);
-    myHTable.close();
-    return count;
-  }
-
-}
-    
-
-

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HBaseClient_JNI.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBaseClient_JNI.cpp b/core/sql/executor/HBaseClient_JNI.cpp
index 5135c84..fbe5056 100644
--- a/core/sql/executor/HBaseClient_JNI.cpp
+++ b/core/sql/executor/HBaseClient_JNI.cpp
@@ -69,7 +69,7 @@ ByteArrayList::~ByteArrayList()
 //////////////////////////////////////////////////////////////////////////////
 BAL_RetCode ByteArrayList::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/ByteArrayList";
+  static char className[]="org/trafodion/sql/ByteArrayList";
   BAL_RetCode rc;
   
   if (isInitialized())
@@ -405,7 +405,7 @@ HBaseClient_JNI::~HBaseClient_JNI()
 //////////////////////////////////////////////////////////////////////////////
 HBC_RetCode HBaseClient_JNI::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/HBaseClient";
+  static char className[]="org/trafodion/sql/HBaseClient";
   HBC_RetCode rc;
   
   if (isInitialized())
@@ -432,9 +432,9 @@ HBC_RetCode HBaseClient_JNI::init()
     JavaMethods_[JM_CLEANUP    ].jm_name      = "cleanup";
     JavaMethods_[JM_CLEANUP    ].jm_signature = "()Z";
     JavaMethods_[JM_GET_HTC    ].jm_name      = "getHTableClient";
-    JavaMethods_[JM_GET_HTC    ].jm_signature = "(JLjava/lang/String;Z)Lorg/trafodion/sql/HBaseAccess/HTableClient;";
+    JavaMethods_[JM_GET_HTC    ].jm_signature = "(JLjava/lang/String;Z)Lorg/trafodion/sql/HTableClient;";
     JavaMethods_[JM_REL_HTC    ].jm_name      = "releaseHTableClient";
-    JavaMethods_[JM_REL_HTC    ].jm_signature = "(Lorg/trafodion/sql/HBaseAccess/HTableClient;)V";
+    JavaMethods_[JM_REL_HTC    ].jm_signature = "(Lorg/trafodion/sql/HTableClient;)V";
     JavaMethods_[JM_CREATE     ].jm_name      = "create";
     JavaMethods_[JM_CREATE     ].jm_signature = "(Ljava/lang/String;[Ljava/lang/Object;Z)Z";
     JavaMethods_[JM_CREATEK    ].jm_name      = "createk";
@@ -448,7 +448,7 @@ HBC_RetCode HBaseClient_JNI::init()
     JavaMethods_[JM_DROP_ALL       ].jm_name      = "dropAll";
     JavaMethods_[JM_DROP_ALL       ].jm_signature = "(Ljava/lang/String;)Z";
     JavaMethods_[JM_LIST_ALL       ].jm_name      = "listAll";
-    JavaMethods_[JM_LIST_ALL       ].jm_signature = "(Ljava/lang/String;)Lorg/trafodion/sql/HBaseAccess/ByteArrayList;";
+    JavaMethods_[JM_LIST_ALL       ].jm_signature = "(Ljava/lang/String;)Lorg/trafodion/sql/ByteArrayList;";
     JavaMethods_[JM_COPY       ].jm_name      = "copy";
     JavaMethods_[JM_COPY       ].jm_signature = "(Ljava/lang/String;Ljava/lang/String;)Z";
     JavaMethods_[JM_EXISTS     ].jm_name      = "exists";
@@ -460,11 +460,11 @@ HBC_RetCode HBaseClient_JNI::init()
     JavaMethods_[JM_FLUSHALL   ].jm_name      = "flushAllTables";
     JavaMethods_[JM_FLUSHALL   ].jm_signature = "()Z";
     JavaMethods_[JM_GET_HBLC   ].jm_name      = "getHBulkLoadClient";
-    JavaMethods_[JM_GET_HBLC   ].jm_signature = "()Lorg/trafodion/sql/HBaseAccess/HBulkLoadClient;";
+    JavaMethods_[JM_GET_HBLC   ].jm_signature = "()Lorg/trafodion/sql/HBulkLoadClient;";
     JavaMethods_[JM_EST_RC     ].jm_name      = "estimateRowCount";
     JavaMethods_[JM_EST_RC     ].jm_signature = "(Ljava/lang/String;II[J)Z";
     JavaMethods_[JM_REL_HBLC   ].jm_name      = "releaseHBulkLoadClient";
-    JavaMethods_[JM_REL_HBLC   ].jm_signature = "(Lorg/trafodion/sql/HBaseAccess/HBulkLoadClient;)V";
+    JavaMethods_[JM_REL_HBLC   ].jm_signature = "(Lorg/trafodion/sql/HBulkLoadClient;)V";
     JavaMethods_[JM_GET_CAC_FRC].jm_name      = "getBlockCacheFraction";
     JavaMethods_[JM_GET_CAC_FRC].jm_signature = "()F";
     JavaMethods_[JM_GET_LATEST_SNP].jm_name      = "getLatestSnapshot";
@@ -1884,7 +1884,7 @@ static const char* const hblcErrorEnumStr[] = ///need to update content
 };
 HBLC_RetCode HBulkLoadClient_JNI::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/HBulkLoadClient";
+  static char className[]="org/trafodion/sql/HBulkLoadClient";
   HBLC_RetCode rc;
 
   if (isInitialized())
@@ -3545,7 +3545,7 @@ HTableClient_JNI::~HTableClient_JNI()
 //////////////////////////////////////////////////////////////////////////////
 HTC_RetCode HTableClient_JNI::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/HTableClient";
+  static char className[]="org/trafodion/sql/HTableClient";
   HTC_RetCode rc;
   
   if (isInitialized())
@@ -3578,7 +3578,7 @@ HTC_RetCode HTableClient_JNI::init()
     JavaMethods_[JM_GET_HTNAME ].jm_name      = "getTableName";
     JavaMethods_[JM_GET_HTNAME ].jm_signature = "()Ljava/lang/String;";
     JavaMethods_[JM_GETENDKEYS ].jm_name      = "getEndKeys";
-    JavaMethods_[JM_GETENDKEYS ].jm_signature = "()Lorg/trafodion/sql/HBaseAccess/ByteArrayList;";
+    JavaMethods_[JM_GETENDKEYS ].jm_signature = "()Lorg/trafodion/sql/ByteArrayList;";
     JavaMethods_[JM_FLUSHT     ].jm_name      = "flush";
     JavaMethods_[JM_FLUSHT     ].jm_signature = "()Z";
     JavaMethods_[JM_SET_WB_SIZE ].jm_name      = "setWriteBufferSize";
@@ -3590,7 +3590,7 @@ HTC_RetCode HTableClient_JNI::init()
     JavaMethods_[JM_COMPLETE_PUT ].jm_name      = "completeAsyncOperation";
     JavaMethods_[JM_COMPLETE_PUT ].jm_signature = "(I[Z)Z";
     JavaMethods_[JM_GETBEGINKEYS ].jm_name      = "getStartKeys";
-    JavaMethods_[JM_GETBEGINKEYS ].jm_signature = "()Lorg/trafodion/sql/HBaseAccess/ByteArrayList;";
+    JavaMethods_[JM_GETBEGINKEYS ].jm_signature = "()Lorg/trafodion/sql/ByteArrayList;";
    
     rc = (HTC_RetCode)JavaObjectInterface::init(className, javaClass_, JavaMethods_, (Int32)JM_LAST, javaMethodsInitialized_);
     javaMethodsInitialized_ = TRUE;
@@ -4277,7 +4277,7 @@ HiveClient_JNI::~HiveClient_JNI()
 //////////////////////////////////////////////////////////////////////////////
 HVC_RetCode HiveClient_JNI::init()
 {
-  static char className[]="org/trafodion/sql/HBaseAccess/HiveClient";
+  static char className[]="org/trafodion/sql/HiveClient";
   HVC_RetCode rc;
   
   if (isInitialized())
@@ -4752,7 +4752,7 @@ void HiveClient_JNI::logIt(const char* str)
 extern "C" {
 #endif
 
-JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setResultInfo
+JNIEXPORT jint JNICALL Java_org_trafodion_sql_HTableClient_setResultInfo
   (JNIEnv *jenv, jobject jobj, jlong jniObject, 
 	jintArray jKvValLen, jintArray jKvValOffset, 
         jintArray jKvQualLen, jintArray jKvQualOffset,
@@ -4771,7 +4771,7 @@ JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setResult
    return 0;
 }
 
-JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setJavaObject
+JNIEXPORT jint JNICALL Java_org_trafodion_sql_HTableClient_setJavaObject
   (JNIEnv *jenv, jobject jobj, jlong jniObject)
 {
    HTableClient_JNI *htc = (HTableClient_JNI *)jniObject;
@@ -4779,7 +4779,7 @@ JNIEXPORT jint JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_setJavaOb
    return 0;
 }
 
-JNIEXPORT void JNICALL Java_org_trafodion_sql_HBaseAccess_HTableClient_cleanup
+JNIEXPORT void JNICALL Java_org_trafodion_sql_HTableClient_cleanup
   (JNIEnv *jenv, jobject jobj, jlong jniObject)
 {
    HTableClient_JNI *htc = (HTableClient_JNI *)jniObject;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/HBaseClient_JNI.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/HBaseClient_JNI.h b/core/sql/executor/HBaseClient_JNI.h
index 6941069..e4129d1 100644
--- a/core/sql/executor/HBaseClient_JNI.h
+++ b/core/sql/executor/HBaseClient_JNI.h
@@ -34,7 +34,7 @@
 #include "Hbase_types.h"
 #include "ExpHbaseDefs.h"
 #include "NAMemory.h"
-#include "HTableClient.h"
+#include "org_trafodion_sql_HTableClient.h"
 
 // forward declare
 class ExHbaseAccessStats;