You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by db...@apache.org on 2015/10/02 18:16:31 UTC

[2/9] incubator-trafodion git commit: Most of the Trafodion Java source files are built through Maven, using projects DCS, REST, HBase-trx and SQL. A few files remain in the core/sql/executor and core/sql/ustat directories that are built through javac co

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java b/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java
new file mode 100644
index 0000000..2547af0
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/SequenceFileReader.java
@@ -0,0 +1,448 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+/**
+ * 
+ */
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.ReflectionUtils;
+//import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+//import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+//import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+
+
+public class SequenceFileReader {
+
+  Configuration conf = null;           // File system configuration
+  SequenceFile.Reader reader = null;   // The HDFS SequenceFile reader object.
+  Writable key = null;
+  Writable row = null;
+//    LazySimpleSerDe serde = null;
+  boolean isEOF = false;
+  String lastError = null;  
+    
+	/**
+	 * Class Constructor
+	 */
+	SequenceFileReader() {
+    	conf = new Configuration();
+    	conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
+  }
+    
+  String getLastError() {
+      return lastError;
+  }
+    
+	/**
+	 * Initialize the SerDe object. Needed only before calling fetchArrayOfColumns(). 
+	 * @param numColumns The number of columns in the table.
+	 * @param fieldDelim The delimiter between fields.
+	 * @param columns A comma delimited list of column names. 
+	 * @param colTypes A comma delimited list of column types.
+	 * @param nullFormat NULL representation.
+	 */
+//	public void initSerDe(String numColumns, String fieldDelim, String columns, String colTypes, String nullFormat) throws IllegalStateException {
+//		
+//            serde = new LazySimpleSerDe();
+//            Properties tbl = new Properties();
+//            tbl.setProperty("serialization.format", numColumns);
+//            tbl.setProperty("field.delim", fieldDelim);
+//            tbl.setProperty("columns", columns);
+//            tbl.setProperty("columns.types", colTypes);
+//            tbl.setProperty("serialization.null.format", colTypes);
+//            serde.initialize(conf, tbl);
+//	}
+	
+	/**
+	 * Open the SequenceFile for reading.
+	 * @param path The HDFS path to the file.
+	 */
+	public String open(String path) throws IOException {
+
+        Path filename = new Path(path);
+        	
+        reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(filename));
+	
+        key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
+        row = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
+        
+        return null;
+            
+	}
+	
+	/**
+	 * Get the current position in the file.
+	 * @return The current position or -1 if error.
+	 */
+	public long getPosition() throws IOException {
+
+    lastError = null;		
+		if (reader == null) {
+			lastError = "open() was not called first.";
+			return -1;
+		}
+		
+        return reader.getPosition();
+	}	
+	
+    /**
+     * Have we reached the end of the file yet?
+     * @return
+     */
+  public boolean isEOF() {
+		return isEOF;
+	}
+
+	/**
+	 * Seek to the specified position in the file, and then to the beginning 
+	 * of the record after the next sync mark.
+	 * @param pos Required file position.
+	 * @return null if OK, or error message.
+	 */
+	public String seeknSync(long pos) throws IOException {
+
+		if (reader == null) {
+			return "open() was not called first.";
+		}
+		
+			reader.sync(pos);
+			return null;
+	}
+	
+	/**
+	 * Fetch the next row as an array of columns.
+	 * @return An array of columns.
+	 */
+//	public String[] fetchArrayOfColumns() throws IllegalStateException {
+//		if (reader == null)
+//			throw new IllegalStateException("open() was not called first.");
+//		if (serde == null)
+//			throw new IllegalStateException("initSerDe() was not called first.");
+//		
+//		ArrayList<String> result = new ArrayList<String>();
+//            boolean theresMore = reader.next(key, row);
+//            if (!theresMore)
+//            	return null;
+//            StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector();
+//            List<? extends StructField> fieldRefs = soi.getAllStructFieldRefs();
+//            Object data = serde.deserialize(row);
+//            
+//            for (StructField fieldRef : fieldRefs) {
+//                ObjectInspector oi = fieldRef.getFieldObjectInspector();
+//                Object obj = soi.getStructFieldData(data, fieldRef);
+//                Object column = convertLazyToJava(obj, oi);
+//                if (column == null)
+//                	result.add(null);
+//                else
+//                	result.add(column.toString());
+//              }
+//    		String[] resultArray = new String[result.size()];
+//    		result.toArray(resultArray);
+//    		return resultArray;
+//	}
+	
+	/**
+	 * Fetch the next row as a single String, that still needs to be parsed.
+	 * @return The next row.
+	 */
+	public String fetchNextRow() throws IOException {
+
+    lastError = null;		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+		
+			boolean result = reader.next(key, row);
+			if (result)	{
+				return row.toString();
+			}
+			else {				
+				return null;
+			}
+	}
+	
+	/**
+	 * @param minSize Minimum size of the result. If the file is compressed, 
+	 * the result may be much larger. The reading starts at the current 
+	 * position in the file, and stops once the limit has been reached.
+	 * @return An array of result rows.
+	 * @throws IllegalStateException
+	 */
+	public String[] fetchArrayOfRows(int minSize) throws IOException {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+		
+		ArrayList<String> result = new ArrayList<String>();
+		long initialPos = getPosition();
+		boolean stop = false;
+		do {
+			String newRow = fetchNextRow();
+			
+			if (newRow==null && lastError!=null)
+			  return null;
+			  
+			boolean reachedEOF = (newRow == null || newRow == "");
+			if (!reachedEOF)
+				result.add(newRow);
+			
+			long bytesRead = getPosition() - initialPos;
+			stop = reachedEOF || (bytesRead > minSize);
+		} while (!stop);
+		
+		String[] resultArray = new String[result.size()];
+		result.toArray(resultArray);
+		return resultArray;
+	}
+	
+	/**
+	 * Read a block of data from the file and return it as an array of rows.
+	 * First sync to startOffset, and skip the first row, then keep reading
+	 * Until passing stopOffset and passing the next Sync marker.
+	 * @param startOffset
+	 * @param stopOffset
+	 * @return
+	 * @throws IllegalStateException
+	 * @throws IOException
+	 */
+	public String[] fetchArrayOfRows(int startOffset, int stopOffset)
+                  throws IOException  {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+		
+		seeknSync(startOffset);
+		
+		ArrayList<String> result = new ArrayList<String>();
+		boolean stop = false;
+		do {
+			long startingPosition = getPosition();
+			String newRow = fetchNextRow();
+
+			if (newRow==null && lastError!=null)
+			  return null;
+			  
+			boolean reachedEOF = (newRow == null || newRow == "");
+			
+			boolean reachedSize = (startingPosition > stopOffset);
+			boolean lastSyncSeen = (reachedSize && reader.syncSeen());
+			// Stop reading if there is no more data, or if we have read 
+			// enough bytes and have seen the Sync mark.
+			stop = reachedEOF || (reachedSize && lastSyncSeen);
+			
+			if (!stop)
+				result.add(newRow);
+			
+		} while (!stop);
+		
+		String[] resultArray = new String[result.size()];
+		result.toArray(resultArray);
+		return resultArray;
+	}
+	
+	/**
+	 * Fetch the next row from the file.
+	 * @param stopOffset File offset at which to start looking for a sync marker
+	 * @return The next row, or null if we have reached EOF or have passed stopOffset and then
+	 *         the sync marker.
+	 */
+	public String fetchNextRow(long stopOffset) throws IOException {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+
+		long startingPosition = getPosition();
+		
+		String newRow = fetchNextRow();
+		
+    if (newRow==null && lastError!=null)
+	    return null;
+
+		if (newRow == null)
+			isEOF = true;
+		
+		if (newRow == "")
+			newRow = null;
+		
+		// If we have already read past the stopOffset on a previous row, 
+		// and have seen the sync marker, then this row belongs to the next block.
+		if ((startingPosition > stopOffset) && reader.syncSeen())
+			newRow = null;
+		
+		return newRow;
+	}
+	
+	/**
+	 * Close the reader.
+	 */
+	public String close() {
+
+    lastError = "";		
+		if (reader == null) {		
+			lastError = "open() was not called first.";
+			return null;
+		}
+
+      IOUtils.closeStream(reader);            
+    
+    return null;
+	}
+
+	private boolean ReadnPrint(int start, int end) 
+                       throws IOException {
+		System.out.println("Beginning position: " + getPosition());
+		String[] batch;
+    batch = fetchArrayOfRows(start, end);
+    if (batch==null)
+      return false;
+      
+		boolean theresMore = (batch.length > 0);
+		for (String newRow : batch)
+			System.out.println(newRow);
+		System.out.println("Ending position: " + getPosition());
+		System.out.println("===> Buffer Split <===");
+		return theresMore;
+	}
+
+	private boolean ReadnPrint2(int start, int end) throws IOException {
+			System.out.println("Read from: " + start + " to: " + end + ".");
+			seeknSync(start);
+			System.out.println("Beginning position: " + getPosition());
+			String newRow = null;
+			do {
+				newRow = fetchNextRow(end);
+				
+				if (newRow != null)
+					System.out.println(newRow);
+			} while (newRow != null); 
+			
+		System.out.println("Ending position: " + getPosition());
+		System.out.println("===> Buffer Split <===");
+		return !isEOF();
+	}
+
+	/**
+	 * @param args
+	 * @throws IOException 
+	 */
+	public static void main(String[] args) throws IOException {
+		
+		SequenceFileReader sfReader = new SequenceFileReader();
+		byte[] fieldDelim = new byte[2];
+		fieldDelim[0] = 1;
+		fieldDelim[1] = 0;
+		//sfReader.initSerDe("19", "\01",
+                //           "p_promo_sk,p_promo_id,p_start_date_sk,p_end_date_sk,p_item_sk,p_cost,p_response_target,p_promo_name,p_channel_dmail,p_channel_email,p_channel_catalog,p_channel_tv,p_channel_radio,p_channel_press,p_channel_event,p_channel_demo,p_channel_details,p_purpose,p_discount_active",
+                //           "int,string,int,int,int,float,int,string,string,string,string,string,string,string,string,string,string,string,string",
+                //          "NULL");
+                          
+		//sfReader.open("hdfs://localhost:9000/user/hive/warehouse/promotion_seq/000000_0");
+		sfReader.seeknSync(300);
+
+		int opType = 4;
+		switch (opType)
+		{
+//		case 1:
+//			boolean theresMoreRows = true;
+//			do {
+//				String[] columns = sfReader.fetchArrayOfColumns();
+//				theresMoreRows = (columns != null);
+//				if (theresMoreRows)
+//				{
+//					for (String col : columns)
+//					{
+//						if (col == null)
+//							System.out.print("<NULL>, ");
+//						else
+//							System.out.print(col + ", ");
+//					}
+//					System.out.println();
+//				}
+//			} while (theresMoreRows); 
+//			break;
+			
+		case 2: // Return row as String
+			String row;
+			do {
+				row = sfReader.fetchNextRow();
+				if (row != null)
+					System.out.println(row);
+			} while (row != null);
+			break;
+			
+		case 3:
+		case 4:
+			int size = 3000;
+			int start = 0;
+			int end = size;
+			boolean theresMore3 = true;
+			
+			while (theresMore3) {
+				if (opType == 3)
+					theresMore3 = sfReader.ReadnPrint(start, end);
+				else
+					theresMore3 = sfReader.ReadnPrint2(start, end);
+				start += size;
+				end += size;				
+			}
+			break;
+
+		}
+		
+		sfReader.close();
+	}
+
+//	private static Object convertLazyToJava(Object o, ObjectInspector oi) {
+//	    Object obj = ObjectInspectorUtils.copyToStandardObject(o, oi, ObjectInspectorCopyOption.JAVA);
+//
+//	    // for now, expose non-primitive as a string
+//	    // TODO: expose non-primitive as a structured object while maintaining JDBC compliance
+//	    if (obj != null && oi.getCategory() != ObjectInspector.Category.PRIMITIVE) {
+//	      obj = obj.toString();
+//	    }
+//
+//	    return obj;
+//	  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java b/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
new file mode 100644
index 0000000..0950431
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/SequenceFileWriter.java
@@ -0,0 +1,467 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+/**
+ * 
+ */
+package org.trafodion.sql;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.TableSnapshotScanner;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.ByteWritable;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.CodecPool;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.GzipCodec;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.io.compress.*;
+import org.apache.hadoop.io.compress.zlib.*;
+import org.apache.hadoop.fs.*;
+
+import java.io.*;
+import java.util.List;
+
+import org.apache.hadoop.util.*;
+import org.apache.hadoop.io.*;
+import org.apache.log4j.Logger;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsPermission;
+public class SequenceFileWriter {
+
+    static Logger logger = Logger.getLogger(SequenceFileWriter.class.getName());
+    Configuration conf = null;           // File system configuration
+    HBaseAdmin admin = null;
+    
+    SequenceFile.Writer writer = null;
+
+    FSDataOutputStream fsOut = null;
+    OutputStream outStream = null;
+    
+    FileSystem  fs = null;
+    /**
+     * Class Constructor
+     */
+    SequenceFileWriter() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
+    {
+      init("", "");
+      conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
+    }
+    
+	
+    public String open(String path)	{
+      try {
+        Path filename = new Path(path);
+        writer = SequenceFile.createWriter(conf, 
+          	       SequenceFile.Writer.file(filename),
+          	       SequenceFile.Writer.keyClass(ByteWritable.class),
+          	       SequenceFile.Writer.valueClass(BytesWritable.class),
+          	       SequenceFile.Writer.compression(CompressionType.NONE));
+        return null;
+      } catch (Exception e) {
+        //e.printStackTrace();
+        return e.getMessage();
+      }	
+    }
+	
+    public String open(String path, int compressionType)	{
+      try {
+        Path filename = new Path(path);
+        
+        CompressionType compType=null;
+        switch (compressionType) {
+          case 0:
+            compType = CompressionType.NONE;
+            break;
+            
+          case 1:
+            compType = CompressionType.RECORD;
+            break;
+            
+          case 2:
+            compType = CompressionType.BLOCK;
+            break;
+          
+          default:
+            return "Wrong argument for compression type.";
+        }
+        
+        writer = SequenceFile.createWriter(conf, 
+          	                               SequenceFile.Writer.file(filename),
+          	                               SequenceFile.Writer.keyClass(BytesWritable.class),
+          	                               SequenceFile.Writer.valueClass(Text.class),
+          	                               SequenceFile.Writer.compression(compType));
+        return null;
+      } catch (Exception e) {
+        //e.printStackTrace();
+        return e.getMessage();
+      }	
+    }
+	
+    public String write(String data) {
+		  if (writer == null)
+			  return "open() was not called first.";
+			
+      try {
+	      writer.append(new BytesWritable(), new Text(data.getBytes()));
+        return null;
+    	} catch (IOException e) {
+    	  //e.printStackTrace();
+        return e.getMessage();
+    	}
+    }
+	
+    public String close() {
+		  if (writer == null)
+			  return "open() was not called first.";
+			
+      try {
+        writer.close();
+        return null;
+      } catch (Exception e) {
+        //e.printStackTrace();
+        return e.getMessage();
+      }
+    }
+    
+    
+    
+    boolean hdfsCreate(String fname , boolean compress) throws IOException
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - started" );
+      Path filePath = null;
+      if (!compress || (compress && fname.endsWith(".gz")))
+        filePath = new Path(fname);
+      else
+        filePath = new Path(fname + ".gz");
+        
+      fs = FileSystem.get(filePath.toUri(),conf);
+      fsOut = fs.create(filePath, true);
+      
+      outStream = fsOut;
+      
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - file created" );
+      if (compress)
+      {
+        GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, conf);
+        Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
+        try 
+        {
+          outStream = gzipCodec.createOutputStream(fsOut, gzipCompressor);
+        }
+        catch (IOException e)
+        {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() --exception :" + e);
+          throw e;
+        }
+      }
+      
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - compressed output stream created" );
+      return true;
+    }
+    
+    boolean hdfsWrite(byte[] buff, long len) throws Exception,OutOfMemoryError
+    {
+
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - started" );
+      try
+      {
+        outStream.write(buff);
+        outStream.flush();
+      }
+      catch (Exception e)
+      {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() -- exception: " + e);
+        throw e;
+      }
+      catch (OutOfMemoryError e1)
+      {
+        logger.debug("SequenceFileWriter.hdfsWrite() -- OutOfMemory Error: " + e1);
+        throw e1;
+      }
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - bytes written and flushed:" + len  );
+      
+      return true;
+    }
+    
+    boolean hdfsClose() throws IOException
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - started" );
+      try
+      {
+        outStream.close();
+        fsOut.close();
+      }
+      catch (IOException e)
+      {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - exception:" + e);
+        throw e;
+      }
+      return true;
+    }
+
+    
+    public boolean hdfsMergeFiles(String srcPathStr, String dstPathStr) throws Exception
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - start");
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - source Path: " + srcPathStr + 
+                                               ", destination File:" + dstPathStr );
+      try 
+      {
+        Path srcPath = new Path(srcPathStr );
+        srcPath = srcPath.makeQualified(srcPath.toUri(), null);
+        FileSystem srcFs = FileSystem.get(srcPath.toUri(),conf);
+  
+        Path dstPath = new Path(dstPathStr);
+        dstPath = dstPath.makeQualified(dstPath.toUri(), null);
+        FileSystem dstFs = FileSystem.get(dstPath.toUri(),conf);
+        
+        if (dstFs.exists(dstPath))
+        {
+          if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - destination files exists" );
+          // for this prototype we just delete the file-- will change in next code drops
+          dstFs.delete(dstPath, false);
+           // The caller should already have checked existence of file-- throw exception 
+           //throw new FileAlreadyExistsException(dstPath.toString());
+        }
+        
+        Path tmpSrcPath = new Path(srcPath, "tmp");
+
+        FileSystem.mkdirs(srcFs, tmpSrcPath,srcFs.getFileStatus(srcPath).getPermission());
+        logger.debug("SequenceFileWriter.hdfsMergeFiles() - tmp folder created." );
+        Path[] files = FileUtil.stat2Paths(srcFs.listStatus(srcPath));
+        for (Path f : files)
+        {
+          srcFs.rename(f, tmpSrcPath);
+        }
+        // copyMerge and use false for the delete option since it removes the whole directory
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - copyMerge" );
+        FileUtil.copyMerge(srcFs, tmpSrcPath, dstFs, dstPath, false, conf, null);
+        
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - delete intermediate files" );
+        srcFs.delete(tmpSrcPath, true);
+      }
+      catch (IOException e)
+      {
+        if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() --exception:" + e);
+        throw e;
+      }
+      
+      
+      return true;
+    }
+    public boolean hdfsCleanUnloadPath(String uldPathStr
+                         /*, boolean checkExistence, String mergeFileStr*/) throws Exception
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - start");
+      logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - unload Path: " + uldPathStr );
+      
+      try 
+      {
+      Path uldPath = new Path(uldPathStr );
+      uldPath = uldPath.makeQualified(uldPath.toUri(), null);
+      FileSystem srcFs = FileSystem.get(uldPath.toUri(),conf);
+      if (!srcFs.exists(uldPath))
+      {
+        //unload location does not exist. hdfscreate will create it later
+        //nothing to do 
+        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -- unload location does not exist." );
+        return true;
+      }
+       
+      Path[] files = FileUtil.stat2Paths(srcFs.listStatus(uldPath));
+      logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - delete files" );
+      for (Path f : files){
+        srcFs.delete(f, false);
+      }
+      }
+      catch (IOException e)
+      {
+        logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -exception:" + e);
+        throw e;
+      }
+      
+      return true;
+    }
+
+  public boolean hdfsExists(String filePathStr) throws Exception 
+  {
+    logger.debug("SequenceFileWriter.hdfsExists() - start");
+    logger.debug("SequenceFileWriter.hdfsExists() - Path: " + filePathStr);
+
+    try 
+    {
+        //check existence of the merge Path
+       Path filePath = new Path(filePathStr );
+       filePath = filePath.makeQualified(filePath.toUri(), null);
+       FileSystem mergeFs = FileSystem.get(filePath.toUri(),conf);
+       if (mergeFs.exists( filePath))
+       {
+       logger.debug("SequenceFileWriter.hdfsExists() - Path: "
+       + filePath + " exists" );
+         return true;
+       }
+
+    } catch (IOException e) {
+      logger.debug("SequenceFileWriter.hdfsExists() -exception:" + e);
+      throw e;
+    }
+    return false;
+  }
+
+  public boolean hdfsDeletePath(String pathStr) throws Exception
+  {
+    if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() - start - Path: " + pathStr);
+    try 
+    {
+      Path delPath = new Path(pathStr );
+      delPath = delPath.makeQualified(delPath.toUri(), null);
+      FileSystem fs = FileSystem.get(delPath.toUri(),conf);
+      fs.delete(delPath, true);
+    }
+    catch (IOException e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() --exception:" + e);
+      throw e;
+    }
+    
+    return true;
+  }
+
+  private boolean init(String zkServers, String zkPort) 
+      throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
+  {
+    logger.debug("SequenceFileWriter.init(" + zkServers + ", " + zkPort + ") called.");
+    if (conf != null)		
+       return true;		
+    conf = HBaseConfiguration.create();		
+    if (zkServers.length() > 0)		
+      conf.set("hbase.zookeeper.quorum", zkServers);		
+    if (zkPort.length() > 0)		
+      conf.set("hbase.zookeeper.property.clientPort", zkPort);		
+    HBaseAdmin.checkHBaseAvailable(conf);
+    return true;
+  }
+  
+  public boolean createSnapshot( String tableName, String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, 
+             InterruptedException, ZooKeeperConnectionException, ServiceException, Exception
+  {
+    try 
+    {
+      if (admin == null)
+        admin = new HBaseAdmin(conf);
+      admin.snapshot(snapshotName, tableName);
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Snapshot created: " + snapshotName);
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Exception: " + e);
+      throw e;
+    }
+    return true;
+  }
+  public boolean verifySnapshot( String tableName, String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, 
+             InterruptedException, ZooKeeperConnectionException, ServiceException, Exception
+  {
+    try 
+    {
+      if (admin == null)
+        admin = new HBaseAdmin(conf);
+      List<SnapshotDescription>  lstSnaps = admin.listSnapshots();
+
+      for (SnapshotDescription snpd : lstSnaps) 
+      {
+        if (snpd.getName().compareTo(snapshotName) == 0 && 
+            snpd.getTable().compareTo(tableName) == 0)
+        {
+          if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Snapshot verified: " + snapshotName);
+          return true;
+        }
+      }
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Exception: " + e);
+      throw e;
+    }
+    return false;
+  }
+ 
+  public boolean deleteSnapshot( String snapshotName)
+      throws MasterNotRunningException, IOException, SnapshotCreationException, 
+             InterruptedException, ZooKeeperConnectionException, ServiceException, Exception
+  {
+    try 
+    {
+      if (admin == null)
+        admin = new HBaseAdmin(conf);
+      admin.deleteSnapshot(snapshotName);
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Snapshot deleted: " + snapshotName);
+    }
+    catch (Exception e)
+    {
+      if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Exception: " + e);
+      throw e;
+    }
+
+    return true;
+  }
+
+  public boolean release()  throws IOException
+  {
+    if (admin != null)
+    {
+      admin.close();
+      admin = null;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java b/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java
new file mode 100644
index 0000000..6a2672c
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/StringArrayList.java
@@ -0,0 +1,47 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.ArrayList;
+
+public class StringArrayList extends ArrayList<String> {
+
+	private static final long serialVersionUID = -3557219338406352735L;
+
+	void addElement(String st) {
+	        add(st);
+	}
+
+	String getElement(int i) {
+	    if (size() == 0)
+		return null;
+	    else if (i < size())
+		return get(i);
+	    else
+		return null;
+	}
+
+        int getSize() {
+           return size();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java b/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java
new file mode 100644
index 0000000..21a058b
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ustat/ChgAutoList.java
@@ -0,0 +1,426 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+//
+// This is the Java stored procedure for adding and deleting from the USTAT_AUTO_TABLES.
+// Compile this as: javac ChgAutoList.java;  
+//  (Note that the class must be the same name as the file).
+//
+package org.trafodion.sql.ustat;
+
+import java.sql.*;
+import java.io.*;
+
+public class ChgAutoList {
+ 
+  public static void chg(String  operation,     // Input
+                         String  schema,        // Input
+                         String  table,         // Input
+                         String[] result)       // Output
+      throws SQLException
+  {
+    String tableCat  = "NEO";
+    String autoCat   = "MANAGEABILITY";
+    String autoSch   = "HP_USTAT";
+    String autoTable = autoCat + "." + autoSch + ".USTAT_AUTO_TABLES";
+
+    operation = operation.toUpperCase().trim();
+    schema    = schema.trim();
+    table     = table.trim();
+    if (schema.length() > 0)
+      if (schema.charAt(0) != '"') schema = schema.toUpperCase();
+      else                         schema = internalFormat(schema);
+    if (table.length() > 0)    
+      if (table.charAt(0)  != '"') table  = table.toUpperCase();
+      else                         table  = internalFormat(table);
+
+    String intSchInStrLit = schema;
+    String intTblInStrLit = table;
+    intSchInStrLit = "_UCS2'" + intSchInStrLit.replaceAll("'", "''") + "'";
+    intTblInStrLit = "_UCS2'" + intTblInStrLit.replaceAll("'", "''") + "'";
+
+    String extSchName = schema;
+    String extTblName = table;
+    extSchName = "\"" + extSchName.replaceAll("\"", "\"\"") + "\"";
+    extTblName = "\"" + extTblName.replaceAll("\"", "\"\"") + "\"";
+    String extSchDotTbl = extSchName+"."+extTblName;
+
+    String addStr  = "INSERT";
+    String inclStr = "INCLUDE"; // This is a synonym for INSERT.
+    String exclStr = "EXCLUDE";
+    String delStr  = "DELETE";
+    Connection conn   = DriverManager.getConnection("jdbc:default:connection");
+
+    // Check for valid schema and table names.
+    if      (schema.length() > 128) result[0]="Schema name too long. No changes made.";
+    else if (table.length()  > 128) result[0]="Table name too long. No changes made.";
+    else if (( schema.equals("*") && !table.equals("*")) ||
+             (!schema.equals("*") &&  table.equals("*")))
+      result[0]="You must specify '*' for both schema and table. No changes made.";
+    else if (schema.equals("") || table.equals(""))
+      result[0]="\"" + schema + "\".\"" + table + 
+                "\" is an invalid name. No changes made.";
+    else try {
+      if(operation.equals(addStr) || operation.equals(inclStr) || operation.equals(exclStr))
+      {          
+        // Perform INSERT, INCLUDE, and EXCLUDE command.
+        if (!operation.equals(exclStr) && schema.equals("*") && table.equals("*")) 
+        {
+          // Perform INSERT or INCLUDE of all tables ('*'.'*' for schema and table).          
+          try 
+          {
+
+            String os = System.getProperty("os.name").toLowerCase();
+            String sys = "";
+  
+            if ( os.indexOf("linux") >=0 ) {
+              sys = "NSK"; 
+            } 
+   
+            else { // assume NSK
+              // Obtain system name, which is needed for query to get all tables.
+              String shellCmd ="/bin/gtacl -c SYSINFO";
+              Process p = Runtime.getRuntime().exec(shellCmd);
+              BufferedReader stdInput = new BufferedReader(new 
+                                        InputStreamReader(p.getInputStream()));
+              String s;
+              int pos;
+              while ((s = stdInput.readLine()) != null)
+              if ((pos = s.indexOf("System name")) >= 0)
+              {
+                pos = s.indexOf("\\"); // Find beginning of system name.
+                sys = s.substring(pos+1);
+              }
+            }
+
+            PreparedStatement findSchemaVersion, insStmt, delStmt, cntStmt;
+            // Obtain a list of all schema versions >= 2300 present on system.
+            String verCmd="SELECT DISTINCT S.SCHEMA_VERSION " +
+                       " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
+                       "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S " +
+                       " WHERE C.CAT_UID=S.CAT_UID AND " +
+                       "      C.CAT_NAME=_UCS2'NEO' AND " +
+                       "      S.SCHEMA_VERSION >= 2300";
+            findSchemaVersion = conn.prepareStatement(verCmd);
+            ResultSet rs = findSchemaVersion.executeQuery();
+
+            String ver, cmd;
+            int autoListCnt=0;
+            // Loop through all schema versions >= 2300:
+            while (rs.next()) // Advance to next row in result set
+            {
+              ver=""+rs.getInt(1); // Get current row (version) from result set.
+
+              String cqdCmd="CONTROL QUERY DEFAULT BLOCK_TO_PREVENT_HALLOWEEN 'ON'";
+              PreparedStatement cqdStmt = conn.prepareStatement(cqdCmd);
+              cqdStmt.executeUpdate();
+
+              // Insert all tables and MVs in NEO catalog that don't already exist in list.
+              cmd="INSERT INTO " + autoTable +
+                " SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME, " +
+                "       TIMESTAMP '0001-01-01 00:00:00', " +
+                "       TIMESTAMP '0001-01-01 00:00:00', " +
+                "       0, _UCS2'', _ISO88591'SYSTEM' " +
+                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
+                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
+                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
+                " WHERE C.CAT_UID=S.CAT_UID AND " +
+                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
+                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
+                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
+                "      C.CAT_NAME=_UCS2'NEO' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD' AND " +
+                "      (C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME) NOT IN " +
+                "        (SELECT CAT_NAME, SCH_NAME, TBL_NAME FROM " + autoTable + ")";
+              insStmt = conn.prepareStatement(cmd);
+              insStmt.executeUpdate();
+
+              // Delete all tables and MVs in list that no longer exist in NEO catalog.
+              cmd="DELETE FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD' AND " +
+                " (CAT_NAME, SCH_NAME, TBL_NAME) NOT IN " +
+                " (SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME " +
+                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
+                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
+                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
+                " WHERE C.CAT_UID=S.CAT_UID AND " +
+                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
+                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
+                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
+                "      C.CAT_NAME=_UCS2'NEO' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
+                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
+                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
+                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD')";
+              delStmt = conn.prepareStatement(cmd);
+              delStmt.executeUpdate();                                                                     
+            }
+            // Get current count of tables that will be automated.
+            cmd="SELECT COUNT(*) FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD'";
+            cntStmt = conn.prepareStatement(cmd);
+            rs = cntStmt.executeQuery();
+            rs.next(); 
+            autoListCnt = rs.getInt(1);
+            
+            result[0]="INSERTed " + autoListCnt + " table names (all) into list.";
+            rs.close();
+          }
+          catch(IOException err)
+          {
+            // Shell failure message.
+            result[0] = "Unable to " + operation + ".  Error: " + err.getMessage().trim();
+            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
+              result[0]=result[0].substring(0,result[0].length()-21);
+          }   
+        }
+        else if (operation.equals(exclStr) && 
+                 schema.equals("*") && table.equals("*")) 
+          result[0] = "EXCLUDE failed. Specifying '*', '*' not allowed.";
+        else
+        {
+          // User has requested to INSERT, INCLUDE, or EXCLUDE a specific table.
+          String addedBy="USER";
+          String action=operation+"d";
+          if (operation.equals(addStr))  action=operation+"ed";
+          if (operation.equals(exclStr))
+          {
+            addedBy="EXCLUD";
+            // For EXCLUDE, always delete the blank entry created when all entries are deleted.
+            // (See DELETE below.)  In addition, if EXCLUDing, and an entry already exists for
+            // this schema and table with ADDED_BY='SYSTEM', remove so it can be EXCLUDEd.
+            PreparedStatement delStmt1 =
+              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2''");
+            // Do not check for errors.
+            delStmt1.executeUpdate();
+            PreparedStatement delStmt2 =
+              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2'NEO' " +
+                                    "AND SCH_NAME=" + intSchInStrLit + " AND TBL_NAME=" + intTblInStrLit +
+                                    " AND ADDED_BY=_ISO88591'SYSTEM'");
+            // Do not check for errors.
+            delStmt2.executeUpdate();
+          }
+          
+          PreparedStatement insStmt =
+            conn.prepareStatement("INSERT INTO " + autoTable + " VALUES (_UCS2'NEO'," +
+                                  " ?, ?, TIMESTAMP '0001-01-01 00:00:00'," +
+                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'" +
+                                  addedBy + "')");
+          insStmt.setString(1, schema);  // Set first  argument in statement (1st '?').
+          insStmt.setString(2, table);   // Set second argument in statement (2nd '?').
+          if (insStmt.executeUpdate() == 1) 
+            result[0]="Table name "+extSchDotTbl+" " + action +".";
+
+        }
+      }
+      else if(operation.equals(delStr)) 
+      {
+        // Perform DELETE command.
+        if (schema.equals("*") && table.equals("*")) 
+        {
+          // If the user has specified '*'.'*' for schema and table, remove all 
+          // entries in list, then add an empty entry.
+          PreparedStatement delStmt = conn.prepareStatement("DELETE FROM " + autoTable);
+          delStmt.executeUpdate();
+          result[0]="All entries DELETEd.  Automation disabled.";
+          
+          // Add the empty entry, which is needed so that USAS.sh does not later insert all
+          // existing tables.  It would do so if the USTAT_AUTO_TABLES table were empty.
+          PreparedStatement insStmt =
+            conn.prepareStatement("INSERT INTO " + autoTable + 
+								  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
+                                  " TIMESTAMP '0001-01-01 00:00:00'," +
+                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
+          insStmt.executeUpdate();
+
+          try {
+            // Remove USTAT_AUTOMATION_INTERVAL entry from SYSTEM_DEFAULTS tables.
+
+            String os = System.getProperty("os.name").toLowerCase();
+  
+	    if ( os.indexOf("linux") >=0 ) {
+                PreparedStatement delStmt2 =
+                   conn.prepareStatement(
+        "DELETE FROM HP_SYSTEM_CATALOG.SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
+        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
+               // Do not check for errors.
+               delStmt2.executeUpdate();
+  
+               // Now remove AUTO_CQDS_SET file from the cluster.
+               String shellCmd;
+
+               String sqroot = System.getenv("MY_SQROOT");
+
+               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autodir/USTAT_CQDS_SET";
+               Process p = Runtime.getRuntime().exec(shellCmd);
+
+               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autoprev/USTAT_CQDS_SET";
+               p = Runtime.getRuntime().exec(shellCmd);
+
+            } else {
+  
+              // assume NSK
+              // Obtain system name.
+              String sys="";
+              String shellCmd = "/bin/gtacl -c SYSINFO";
+              Process p = Runtime.getRuntime().exec(shellCmd);
+              BufferedReader stdInput = new BufferedReader(new 
+                InputStreamReader(p.getInputStream()));
+              String s;
+              int pos;
+              while ((s = stdInput.readLine()) != null)
+                if ((pos = s.indexOf("System name")) >= 0)
+                {
+                  pos = s.indexOf("\\"); // Find beginning of system name.
+                  sys = s.substring(pos+1);
+                }
+  
+              // Obtain all segment names.  The grep here is really to avoid getting names
+              // of systems that are on expand which are not segments.
+              String sysprefix=sys.substring(0,3).toLowerCase();
+              shellCmd = "ls /E";
+              p = Runtime.getRuntime().exec(shellCmd);
+              stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
+              // For each segment, remove USTAT_AUTOMATION_INTERVAL from system defaults table.
+              // (make sure the segment name returned starts with 'sysprefix').
+              while ((s = stdInput.readLine()) != null && s.indexOf(sysprefix) == 0)
+              {
+                PreparedStatement delStmt2 =
+                  conn.prepareStatement("DELETE FROM HP_SYSTEM_CATALOG" + 
+                                        ".SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
+                                        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
+                // Do not check for errors.
+                delStmt2.executeUpdate();
+              }
+  
+              // Now remove AUTO_CQDS_SET file from primary segment.
+              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autodir/USTAT_CQDS_SET";
+              p = Runtime.getRuntime().exec(shellCmd);
+              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autoprev/USTAT_CQDS_SET";
+              p = Runtime.getRuntime().exec(shellCmd);
+            }
+
+          }
+          catch(IOException err)
+          {
+            // Shell failure message.
+            result[0] = "Unable to remove USTAT_AUTOMATION_INTERVAL from SYSTEM_DEFAULTS " + 
+                        "tables.  You must do this manually.";
+            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
+              result[0]=result[0].substring(0,result[0].length()-21);
+          }   
+        }
+        else 
+        {          
+          // User has requested to delete a specific table.
+          // First see if the table is 'EXCLUD' and can be deleted.  Note that deletion
+          // of the last 'USER' added table results in a blank 'USER' entry being added.
+          // This is not done for deletion of 'EXCLUD'ed tables.
+          PreparedStatement delete1 =
+            conn.prepareStatement("DELETE FROM " + autoTable + 
+            " WHERE SCH_NAME = ? AND TBL_NAME = ? AND ADDED_BY=_ISO88591'EXCLUD'");
+            delete1.setString(1, schema);  // Set first  argument in statement (1st '?').
+            delete1.setString(2, table);   // Set second argument in statement (2nd '?').
+          if (delete1.executeUpdate() == 0) 
+          {
+            // Failed to delete (0 rows deleted).  Either the table did not have 
+            // ADDED_BY='EXCLUD' or entry does not exist.  Try to delete for any ADDED_BY.
+            PreparedStatement delete2 =
+              conn.prepareStatement("DELETE FROM " + autoTable +
+              " WHERE SCH_NAME = ? AND TBL_NAME = ?");
+              delete2.setString(1, schema);  // Set first  argument in statement (1st '?').
+              delete2.setString(2, table);   // Set second argument in statement (2nd '?').
+            if (delete2.executeUpdate() == 0) 
+              result[0]="Table name  "+extSchDotTbl+" not found, not DELETEd.";
+            else
+            { 
+              // A 'SYSTEM' or 'USER' table DELETEd.
+              result[0]="Table name "+extSchDotTbl+" DELETEd.";
+
+              // Add the empty entry, if there are no rows with the ADDED_BY field set to
+              // 'USER'.  This keeps USAS.sh from inserting all existing tables later
+              // on.  It would do so if all 'USER' entries from USTAT_AUTO_TABLES table had
+              // been deleted.
+              PreparedStatement FindUserEnteredTables =
+                conn.prepareStatement("SELECT COUNT(*) FROM " + autoTable +
+                " WHERE ADDED_BY = _ISO88591'USER'" + 
+                " FOR READ UNCOMMITTED ACCESS");
+              ResultSet rs = FindUserEnteredTables.executeQuery();
+              rs.next();
+              if (rs.getInt(1) == 0)
+              {
+                PreparedStatement insStmt =
+                  conn.prepareStatement("INSERT INTO " + autoTable + 
+				  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
+                  " TIMESTAMP '0001-01-01 00:00:00'," +
+                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
+                insStmt.executeUpdate();
+              }
+              rs.close();
+            }
+          }
+          // 'EXCLUD' table was successfully DELETEd, set result string.
+          else result[0]="Table name "+extSchDotTbl+"\" DELETEd.";
+        }
+      }
+      else 
+      {
+        result[0] = operation + " is not a valid operation.";
+      }
+    } 
+    catch(SQLException err)
+    {
+      result[0] = err.getMessage().trim(); // Issue SQL error.
+      if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
+        result[0]=result[0].substring(0,result[0].length()-21);
+    } 
+    finally 
+    {
+      conn.close();
+    }
+    if (result[0].length() > 80) result[0]=result[0].substring(0,79);
+  }
+
+  public static String internalFormat(String name)
+  {
+    // Remove enclosing quotes
+    name=name.substring(1, name.length()-1);
+
+    // Change all occurrences of "" to ".
+    int index=-1;
+    while((index=name.indexOf("\"\"", index+1)) != -1)
+      name=name.substring(0,index+1)+name.substring(index+2);  
+
+    return name;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java b/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java
new file mode 100644
index 0000000..12f1f4c
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ustat/UstatUtil.java
@@ -0,0 +1,442 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql.ustat;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.util.Properties;
+import java.util.Enumeration;
+
+
+public class UstatUtil extends Thread
+{
+
+   static BufferedWriter bw=null;
+   static boolean doneflag=false;
+   static StringBuffer outputStr=null;
+   static StringBuffer errStr=null;
+   static boolean nextCommand=false;
+   static boolean errorFlag=false;
+   static boolean statusFlag=false;
+   static Runtime rt=Runtime.getRuntime();
+   static boolean srunStatus=false;
+   final static String scriptIndexFile=".scriptIndex";  // script Index property file which contains the list of scripts that can be executed
+   static String lineSeperator=System.getProperty("line.separator");
+
+
+   public static void runStatsProfile(String arguments,String[] output) throws IOException
+   {
+      Process p;
+      String command=" ";
+
+      String os = System.getProperty("os.name").toLowerCase();
+
+      String cmd_path;
+
+      if ( os.indexOf("linux") >=0 ) {
+         cmd_path = "sh " + System.getenv("MY_SQROOT") + "/export/lib";
+      } else { // assume NSK
+         cmd_path = "/usr/tandem";
+      }
+
+      String cmd= cmd_path + "/mx_ustat/stats_profile ";
+      cmd=cmd+arguments;
+
+      p = rt.exec(cmd);
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+
+   public static void USASstop() throws IOException
+   {
+      Process p;
+      String command=" ";
+
+      String os = System.getProperty("os.name").toLowerCase();
+      String cmd_path;
+
+      if ( os.indexOf("linux") >=0 ) {
+         cmd_path = "sh " + System.getenv("MY_SQROOT") + "/export/lib";
+      } else { // assume NSK
+         cmd_path = "/usr/tandem";
+      }
+
+      String cmd= cmd_path + "/mx_ustat/StopAutoStats.sh";
+
+      String[] output=cmd.split("\\s+"); // Unused.
+
+      p = rt.exec(cmd);
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+
+/*
+   public static void  handleSrun(String command, String[] output) throws IOException
+   {
+
+      Properties props=null;
+      props=new Properties();
+      String[] envList={};
+
+      try
+      {
+         props.load(new FileInputStream("/usr/tandem/nvscript/admin/.scriptIndex"));
+      } catch (FileNotFoundException fnfe)
+      {
+         output[0]="Could not find the index file.";
+         return;
+      }
+
+      String[] commandArr=command.split("\\s+");
+      if (props.getProperty(commandArr[0].trim()) == null)
+      {
+         if (commandArr[0] != null && !commandArr[0].trim().equals(""))
+         {
+            output[0]= "Invalid script.";
+         }
+         if (props.size() >0)
+         {
+            output[0]="The valid scripts are:" + lineSeperator + lineSeperator;
+            Enumeration scriptNames=props.propertyNames();
+            while (scriptNames.hasMoreElements())
+            {
+               String scriptName=(String)scriptNames.nextElement();
+               output[0]+=format(scriptName,(String)props.get(scriptName))+ lineSeperator;
+            }
+         }
+         outputStr=null;
+         errStr=null;
+         return;
+      }
+
+      srunStatus = true;
+      Process p=rt.exec("/usr/bin/sh eval "+command,envList,new File("/usr/tandem/nvscript/script"));
+      try
+      {
+         execute(command, p, output, srunStatus);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+         outputStr.append("Could not create the sub process"+ioe);
+      }
+   }
+*/
+   public static void execute(String command,Process p, String[] output, boolean cmdStatus) throws IOException
+   {
+
+      outputStr=new StringBuffer();
+      errStr=new StringBuffer();
+      output[0]="";
+      statusFlag = false;
+
+      InputStream is = p.getInputStream();
+      OutputStream os = p.getOutputStream();
+      InputStream es = p.getErrorStream();
+
+      InputStreamReader isr = new InputStreamReader(is);
+      InputStreamReader iser = new InputStreamReader(es);
+      OutputStreamWriter osw = new OutputStreamWriter(os);
+
+      final BufferedReader br=new BufferedReader(isr);
+      final BufferedReader ber=new BufferedReader(iser);
+
+      bw = new BufferedWriter(osw);
+
+      // output thread
+      class OutputThread extends Thread
+      {
+
+         StringBuffer outputBuf=null;
+         OutputThread(StringBuffer outputBuf)
+         {
+            this.outputBuf=outputBuf;
+         }
+
+         public void run()
+         {
+            int i=0;
+            try
+            {
+               while ((i=br.read()) != -1)
+               {
+                  statusFlag = !statusFlag?true:statusFlag;
+                  if (errorFlag)
+                  {
+                     Thread.yield();
+                     errorFlag=false;
+                     try
+                     {
+                        sleep(100);
+                     } catch (InterruptedException ie)
+                     {
+                     }
+                  }
+                  if (nextCommand)
+                  {
+                     br.readLine();
+                     nextCommand=false;
+                  }else
+                  {
+                     outputBuf.append((char)i);
+                  }
+               }
+               doneflag=true;
+            } catch (IOException ote)
+            {
+               System.out.println("Error occurred in output Thread "+ote);
+            }
+         }
+      };
+
+      OutputThread outputt=new OutputThread(outputStr);
+      outputt.start();
+
+      // error thread
+      class ErrorThread extends Thread
+      {
+
+         StringBuffer outputBuf=null;
+         ErrorThread(StringBuffer outputBuf)
+         {
+            this.outputBuf=outputBuf;
+         }
+
+         public void run()
+         {
+            int i=0;
+            try
+            {
+               while ((i=ber.read()) != -1)
+               {
+                  errorFlag=true;
+                  outputBuf.append((char)i);
+               }
+            }catch (IOException ete)
+            {
+               System.out.println(" Error occurred in error thread "+ete);
+            }
+         }
+      };
+
+      ErrorThread errort=new ErrorThread(errStr);
+      errort.start();
+
+      // input thread
+      try
+      {
+         p.waitFor();
+         outputt.join();
+         errort.join();
+         if (!cmdStatus)
+         {
+            if (errStr.length() > 0)
+            {
+               errStr.delete(0, errStr.length());
+	       //     errStr.append("An internal server error has occurred. Please contact support.");
+            }
+         }
+         int count = errStr.indexOf("/sh:");
+         if (count > 0)
+            errStr.delete(0, count+5);
+
+         outputStr.append(errStr);
+      } catch (InterruptedException e)
+      {
+         // TODO Auto-generated catch block
+         //.printStackTrace();
+      }
+      isr=null;
+      iser=null;
+      osw=null;
+      bw=null;
+      outputt=null;
+      errort=null;
+      cmdStatus=false;
+
+      output[0]=outputStr.toString();
+      outputStr=null;
+      errStr=null;
+   }
+/*
+   private static String format(String scriptName,String description)
+   {
+
+      if (scriptName == null)
+      {
+         return null;
+      }
+      StringBuffer sb=null;
+      sb=new StringBuffer(scriptName);
+      while (sb.length() < 12)
+      {
+         sb.append(" ");
+      }
+      if (description != null)
+      {
+         sb.append("-");
+         sb.append(description);
+      }
+      return sb.toString().replaceAll(lineSeperator,lineSeperator + "            ");
+   }
+*/
+/*
+   public static void getTaclInfo(String command,String[] output) throws IOException
+   {
+
+      String[] commandArr=command.split("\\s+");
+      Process p;
+
+      if (commandArr.length == 1 && commandArr[0].equalsIgnoreCase("sutver"))
+      {
+         p = rt.exec("/usr/bin/sh eval  gtacl -c 'sutver'");
+      }
+      else if (commandArr.length == 2 && commandArr[0].equalsIgnoreCase("vproc") && commandArr[1].equalsIgnoreCase("$SYSTEM.ZMXODBC.MXOSRVR"))
+      {
+         p = rt.exec("/usr/bin/sh eval gtacl -c 'vproc $SYSTEM.ZMXODBC.MXOSRVR'");
+      }
+      else
+      {
+         output[0] = handleExceptions(commandArr[0]);
+         return;
+      }
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+
+   public static void onlineDBdump(String command,String[] output) throws IOException
+   {
+      handleDbaCmd(command, output);
+   }
+
+   public static void handleDbaCmd(String command,String[] output) throws IOException
+   {
+      String[] envList = {};
+      String[] commandArr=command.split("\\s+");
+      Process p = null;
+      String dbaScriptName = null;
+
+      int len = commandArr.length;
+      if (commandArr[0].equalsIgnoreCase("dbonlinedump"))
+         dbaScriptName = "dbonlinedump";
+      else if (commandArr[0].equalsIgnoreCase("updatestats"))
+         dbaScriptName = "updatestats";
+
+      if (dbaScriptName != null)
+      {
+         switch (len)
+         {
+            case 1:
+               p = rt.exec("/usr/bin/sh eval " + dbaScriptName, envList, new File("/usr/tandem/nvscript/dbascripts"));
+               break;
+            case 2:
+               if (commandArr[1].equalsIgnoreCase("INFO"))
+                  p = rt.exec("/usr/bin/sh eval " + dbaScriptName, envList, new File("/usr/tandem/nvscript/dbascripts"));
+
+               else
+                  output[0] = handleExceptions(commandArr[0]);
+               break;
+            case 3:
+               if (commandArr[1].equalsIgnoreCase("AT"))
+                  p = rt.exec("/usr/bin/sh eval " + dbaScriptName + " AT " + commandArr[2], envList, new File("/usr/tandem/nvscript/dbascripts"));
+
+               else
+                  output[0] = handleExceptions(commandArr[0]);
+               break;
+            default:
+               output[0] = handleExceptions(commandArr[0]);
+               return;
+         }
+      }
+      else
+      {
+         output[0] = handleExceptions(commandArr[0]);
+         return;
+      }
+
+      try
+      {
+         execute(command, p, output, false);
+      } catch (IOException ioe)
+      {
+         statusFlag = false;
+      }
+   }
+*/
+
+   public static String handleExceptions(String str)
+   {
+
+      str = "Invalid Command.";
+      return str;
+   }
+
+}
+
+/*
+DROP PROCEDURE NEO.HP_USTAT.STATS_PROFILE;
+CREATE PROCEDURE NEO.HP_USTAT.STATS_PROFILE
+  (
+    IN cmd VARCHAR(4000),
+    OUT response VARCHAR(240)
+  )
+  EXTERNAL NAME 'UstatUtil.runStatsProfile'
+  EXTERNAL PATH '/usr/tandem/mx_ustat'
+  LANGUAGE JAVA
+  PARAMETER STYLE JAVA
+  NO SQL
+  DYNAMIC RESULT SETS 0
+  ;
+DROP PROCEDURE NEO.HP_USTAT.STOP_AUTOMATED_STATS;
+CREATE PROCEDURE NEO.HP_USTAT.STOP_AUTOMATED_STATS
+  ()
+  EXTERNAL NAME 'UstatUtil.USASstop'
+  EXTERNAL PATH '/usr/tandem/mx_ustat'
+  LANGUAGE JAVA
+  PARAMETER STYLE JAVA
+  NO SQL
+  DYNAMIC RESULT SETS 0
+  ;
+
+*/

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/ustat/ChgAutoList.java
----------------------------------------------------------------------
diff --git a/core/sql/ustat/ChgAutoList.java b/core/sql/ustat/ChgAutoList.java
deleted file mode 100644
index 61315c9..0000000
--- a/core/sql/ustat/ChgAutoList.java
+++ /dev/null
@@ -1,426 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-//
-// This is the Java stored procedure for adding and deleting from the USTAT_AUTO_TABLES.
-// Compile this as: javac ChgAutoList.java;  
-//  (Note that the class must be the same name as the file).
-//
-package com.hp.mx_ustat;
-
-import java.sql.*;
-import java.io.*;
-
-public class ChgAutoList {
- 
-  public static void chg(String  operation,     // Input
-                         String  schema,        // Input
-                         String  table,         // Input
-                         String[] result)       // Output
-      throws SQLException
-  {
-    String tableCat  = "NEO";
-    String autoCat   = "MANAGEABILITY";
-    String autoSch   = "HP_USTAT";
-    String autoTable = autoCat + "." + autoSch + ".USTAT_AUTO_TABLES";
-
-    operation = operation.toUpperCase().trim();
-    schema    = schema.trim();
-    table     = table.trim();
-    if (schema.length() > 0)
-      if (schema.charAt(0) != '"') schema = schema.toUpperCase();
-      else                         schema = internalFormat(schema);
-    if (table.length() > 0)    
-      if (table.charAt(0)  != '"') table  = table.toUpperCase();
-      else                         table  = internalFormat(table);
-
-    String intSchInStrLit = schema;
-    String intTblInStrLit = table;
-    intSchInStrLit = "_UCS2'" + intSchInStrLit.replaceAll("'", "''") + "'";
-    intTblInStrLit = "_UCS2'" + intTblInStrLit.replaceAll("'", "''") + "'";
-
-    String extSchName = schema;
-    String extTblName = table;
-    extSchName = "\"" + extSchName.replaceAll("\"", "\"\"") + "\"";
-    extTblName = "\"" + extTblName.replaceAll("\"", "\"\"") + "\"";
-    String extSchDotTbl = extSchName+"."+extTblName;
-
-    String addStr  = "INSERT";
-    String inclStr = "INCLUDE"; // This is a synonym for INSERT.
-    String exclStr = "EXCLUDE";
-    String delStr  = "DELETE";
-    Connection conn   = DriverManager.getConnection("jdbc:default:connection");
-
-    // Check for valid schema and table names.
-    if      (schema.length() > 128) result[0]="Schema name too long. No changes made.";
-    else if (table.length()  > 128) result[0]="Table name too long. No changes made.";
-    else if (( schema.equals("*") && !table.equals("*")) ||
-             (!schema.equals("*") &&  table.equals("*")))
-      result[0]="You must specify '*' for both schema and table. No changes made.";
-    else if (schema.equals("") || table.equals(""))
-      result[0]="\"" + schema + "\".\"" + table + 
-                "\" is an invalid name. No changes made.";
-    else try {
-      if(operation.equals(addStr) || operation.equals(inclStr) || operation.equals(exclStr))
-      {          
-        // Perform INSERT, INCLUDE, and EXCLUDE command.
-        if (!operation.equals(exclStr) && schema.equals("*") && table.equals("*")) 
-        {
-          // Perform INSERT or INCLUDE of all tables ('*'.'*' for schema and table).          
-          try 
-          {
-
-            String os = System.getProperty("os.name").toLowerCase();
-            String sys = "";
-  
-            if ( os.indexOf("linux") >=0 ) {
-              sys = "NSK"; 
-            } 
-   
-            else { // assume NSK
-              // Obtain system name, which is needed for query to get all tables.
-              String shellCmd ="/bin/gtacl -c SYSINFO";
-              Process p = Runtime.getRuntime().exec(shellCmd);
-              BufferedReader stdInput = new BufferedReader(new 
-                                        InputStreamReader(p.getInputStream()));
-              String s;
-              int pos;
-              while ((s = stdInput.readLine()) != null)
-              if ((pos = s.indexOf("System name")) >= 0)
-              {
-                pos = s.indexOf("\\"); // Find beginning of system name.
-                sys = s.substring(pos+1);
-              }
-            }
-
-            PreparedStatement findSchemaVersion, insStmt, delStmt, cntStmt;
-            // Obtain a list of all schema versions >= 2300 present on system.
-            String verCmd="SELECT DISTINCT S.SCHEMA_VERSION " +
-                       " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
-                       "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S " +
-                       " WHERE C.CAT_UID=S.CAT_UID AND " +
-                       "      C.CAT_NAME=_UCS2'NEO' AND " +
-                       "      S.SCHEMA_VERSION >= 2300";
-            findSchemaVersion = conn.prepareStatement(verCmd);
-            ResultSet rs = findSchemaVersion.executeQuery();
-
-            String ver, cmd;
-            int autoListCnt=0;
-            // Loop through all schema versions >= 2300:
-            while (rs.next()) // Advance to next row in result set
-            {
-              ver=""+rs.getInt(1); // Get current row (version) from result set.
-
-              String cqdCmd="CONTROL QUERY DEFAULT BLOCK_TO_PREVENT_HALLOWEEN 'ON'";
-              PreparedStatement cqdStmt = conn.prepareStatement(cqdCmd);
-              cqdStmt.executeUpdate();
-
-              // Insert all tables and MVs in NEO catalog that don't already exist in list.
-              cmd="INSERT INTO " + autoTable +
-                " SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME, " +
-                "       TIMESTAMP '0001-01-01 00:00:00', " +
-                "       TIMESTAMP '0001-01-01 00:00:00', " +
-                "       0, _UCS2'', _ISO88591'SYSTEM' " +
-                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
-                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
-                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
-                " WHERE C.CAT_UID=S.CAT_UID AND " +
-                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
-                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
-                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
-                "      C.CAT_NAME=_UCS2'NEO' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD' AND " +
-                "      (C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME) NOT IN " +
-                "        (SELECT CAT_NAME, SCH_NAME, TBL_NAME FROM " + autoTable + ")";
-              insStmt = conn.prepareStatement(cmd);
-              insStmt.executeUpdate();
-
-              // Delete all tables and MVs in list that no longer exist in NEO catalog.
-              cmd="DELETE FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD' AND " +
-                " (CAT_NAME, SCH_NAME, TBL_NAME) NOT IN " +
-                " (SELECT C.CAT_NAME, S.SCHEMA_NAME, O.OBJECT_NAME " +
-                " FROM HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.CATSYS C, " +
-                "     HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.SCHEMATA S, " +
-                "     "+tableCat+".HP_DEFINITION_SCHEMA.OBJECTS O " +
-                " WHERE C.CAT_UID=S.CAT_UID AND " +
-                "      S.SCHEMA_UID=O.SCHEMA_UID AND " +
-                "     (O.OBJECT_TYPE=_ISO88591'BT' OR O.OBJECT_TYPE=_ISO88591'MV') AND " +
-                "      O.OBJECT_NAME_SPACE=_ISO88591'TA' AND " +
-                "      C.CAT_NAME=_UCS2'NEO' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'HP_DEFINITION_SCHEMA' AND " +
-                "      S.SCHEMA_NAME<>_UCS2'PUBLIC_ACCESS_SCHEMA' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'HP\\_%' ESCAPE _UCS2'\\' AND " +
-                "      S.SCHEMA_NAME NOT LIKE _UCS2'VOLATILE\\_SCHEMA\\_%' ESCAPE _UCS2'\\' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAM_INTERVALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'HISTOGRAMS_FREQ_VALS' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_TABLE_INFO_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_UMD' AND " +
-                "      O.OBJECT_NAME<>_UCS2'MVS_USED_UMD')";
-              delStmt = conn.prepareStatement(cmd);
-              delStmt.executeUpdate();                                                                     
-            }
-            // Get current count of tables that will be automated.
-            cmd="SELECT COUNT(*) FROM " + autoTable + " WHERE ADDED_BY<>_ISO88591'EXCLUD'";
-            cntStmt = conn.prepareStatement(cmd);
-            rs = cntStmt.executeQuery();
-            rs.next(); 
-            autoListCnt = rs.getInt(1);
-            
-            result[0]="INSERTed " + autoListCnt + " table names (all) into list.";
-            rs.close();
-          }
-          catch(IOException err)
-          {
-            // Shell failure message.
-            result[0] = "Unable to " + operation + ".  Error: " + err.getMessage().trim();
-            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
-              result[0]=result[0].substring(0,result[0].length()-21);
-          }   
-        }
-        else if (operation.equals(exclStr) && 
-                 schema.equals("*") && table.equals("*")) 
-          result[0] = "EXCLUDE failed. Specifying '*', '*' not allowed.";
-        else
-        {
-          // User has requested to INSERT, INCLUDE, or EXCLUDE a specific table.
-          String addedBy="USER";
-          String action=operation+"d";
-          if (operation.equals(addStr))  action=operation+"ed";
-          if (operation.equals(exclStr))
-          {
-            addedBy="EXCLUD";
-            // For EXCLUDE, always delete the blank entry created when all entries are deleted.
-            // (See DELETE below.)  In addition, if EXCLUDing, and an entry already exists for
-            // this schema and table with ADDED_BY='SYSTEM', remove so it can be EXCLUDEd.
-            PreparedStatement delStmt1 =
-              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2''");
-            // Do not check for errors.
-            delStmt1.executeUpdate();
-            PreparedStatement delStmt2 =
-              conn.prepareStatement("DELETE FROM " + autoTable + " WHERE CAT_NAME=_UCS2'NEO' " +
-                                    "AND SCH_NAME=" + intSchInStrLit + " AND TBL_NAME=" + intTblInStrLit +
-                                    " AND ADDED_BY=_ISO88591'SYSTEM'");
-            // Do not check for errors.
-            delStmt2.executeUpdate();
-          }
-          
-          PreparedStatement insStmt =
-            conn.prepareStatement("INSERT INTO " + autoTable + " VALUES (_UCS2'NEO'," +
-                                  " ?, ?, TIMESTAMP '0001-01-01 00:00:00'," +
-                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'" +
-                                  addedBy + "')");
-          insStmt.setString(1, schema);  // Set first  argument in statement (1st '?').
-          insStmt.setString(2, table);   // Set second argument in statement (2nd '?').
-          if (insStmt.executeUpdate() == 1) 
-            result[0]="Table name "+extSchDotTbl+" " + action +".";
-
-        }
-      }
-      else if(operation.equals(delStr)) 
-      {
-        // Perform DELETE command.
-        if (schema.equals("*") && table.equals("*")) 
-        {
-          // If the user has specified '*'.'*' for schema and table, remove all 
-          // entries in list, then add an empty entry.
-          PreparedStatement delStmt = conn.prepareStatement("DELETE FROM " + autoTable);
-          delStmt.executeUpdate();
-          result[0]="All entries DELETEd.  Automation disabled.";
-          
-          // Add the empty entry, which is needed so that USAS.sh does not later insert all
-          // existing tables.  It would do so if the USTAT_AUTO_TABLES table were empty.
-          PreparedStatement insStmt =
-            conn.prepareStatement("INSERT INTO " + autoTable + 
-								  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
-                                  " TIMESTAMP '0001-01-01 00:00:00'," +
-                                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
-          insStmt.executeUpdate();
-
-          try {
-            // Remove USTAT_AUTOMATION_INTERVAL entry from SYSTEM_DEFAULTS tables.
-
-            String os = System.getProperty("os.name").toLowerCase();
-  
-	    if ( os.indexOf("linux") >=0 ) {
-                PreparedStatement delStmt2 =
-                   conn.prepareStatement(
-        "DELETE FROM HP_SYSTEM_CATALOG.SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
-        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
-               // Do not check for errors.
-               delStmt2.executeUpdate();
-  
-               // Now remove AUTO_CQDS_SET file from the cluster.
-               String shellCmd;
-
-               String sqroot = System.getenv("MY_SQROOT");
-
-               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autodir/USTAT_CQDS_SET";
-               Process p = Runtime.getRuntime().exec(shellCmd);
-
-               shellCmd = "rm " + sqroot + "/export/lib/mx_ustat/autoprev/USTAT_CQDS_SET";
-               p = Runtime.getRuntime().exec(shellCmd);
-
-            } else {
-  
-              // assume NSK
-              // Obtain system name.
-              String sys="";
-              String shellCmd = "/bin/gtacl -c SYSINFO";
-              Process p = Runtime.getRuntime().exec(shellCmd);
-              BufferedReader stdInput = new BufferedReader(new 
-                InputStreamReader(p.getInputStream()));
-              String s;
-              int pos;
-              while ((s = stdInput.readLine()) != null)
-                if ((pos = s.indexOf("System name")) >= 0)
-                {
-                  pos = s.indexOf("\\"); // Find beginning of system name.
-                  sys = s.substring(pos+1);
-                }
-  
-              // Obtain all segment names.  The grep here is really to avoid getting names
-              // of systems that are on expand which are not segments.
-              String sysprefix=sys.substring(0,3).toLowerCase();
-              shellCmd = "ls /E";
-              p = Runtime.getRuntime().exec(shellCmd);
-              stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
-              // For each segment, remove USTAT_AUTOMATION_INTERVAL from system defaults table.
-              // (make sure the segment name returned starts with 'sysprefix').
-              while ((s = stdInput.readLine()) != null && s.indexOf(sysprefix) == 0)
-              {
-                PreparedStatement delStmt2 =
-                  conn.prepareStatement("DELETE FROM HP_SYSTEM_CATALOG" + 
-                                        ".SYSTEM_DEFAULTS_SCHEMA.SYSTEM_DEFAULTS " +
-                                        "WHERE ATTRIBUTE = 'USTAT_AUTOMATION_INTERVAL'");
-                // Do not check for errors.
-                delStmt2.executeUpdate();
-              }
-  
-              // Now remove AUTO_CQDS_SET file from primary segment.
-              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autodir/USTAT_CQDS_SET";
-              p = Runtime.getRuntime().exec(shellCmd);
-              shellCmd = "rm /E/" + sysprefix + "0101/usr/tandem/mx_ustat/autoprev/USTAT_CQDS_SET";
-              p = Runtime.getRuntime().exec(shellCmd);
-            }
-
-          }
-          catch(IOException err)
-          {
-            // Shell failure message.
-            result[0] = "Unable to remove USTAT_AUTOMATION_INTERVAL from SYSTEM_DEFAULTS " + 
-                        "tables.  You must do this manually.";
-            if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
-              result[0]=result[0].substring(0,result[0].length()-21);
-          }   
-        }
-        else 
-        {          
-          // User has requested to delete a specific table.
-          // First see if the table is 'EXCLUD' and can be deleted.  Note that deletion
-          // of the last 'USER' added table results in a blank 'USER' entry being added.
-          // This is not done for deletion of 'EXCLUD'ed tables.
-          PreparedStatement delete1 =
-            conn.prepareStatement("DELETE FROM " + autoTable + 
-            " WHERE SCH_NAME = ? AND TBL_NAME = ? AND ADDED_BY=_ISO88591'EXCLUD'");
-            delete1.setString(1, schema);  // Set first  argument in statement (1st '?').
-            delete1.setString(2, table);   // Set second argument in statement (2nd '?').
-          if (delete1.executeUpdate() == 0) 
-          {
-            // Failed to delete (0 rows deleted).  Either the table did not have 
-            // ADDED_BY='EXCLUD' or entry does not exist.  Try to delete for any ADDED_BY.
-            PreparedStatement delete2 =
-              conn.prepareStatement("DELETE FROM " + autoTable +
-              " WHERE SCH_NAME = ? AND TBL_NAME = ?");
-              delete2.setString(1, schema);  // Set first  argument in statement (1st '?').
-              delete2.setString(2, table);   // Set second argument in statement (2nd '?').
-            if (delete2.executeUpdate() == 0) 
-              result[0]="Table name  "+extSchDotTbl+" not found, not DELETEd.";
-            else
-            { 
-              // A 'SYSTEM' or 'USER' table DELETEd.
-              result[0]="Table name "+extSchDotTbl+" DELETEd.";
-
-              // Add the empty entry, if there are no rows with the ADDED_BY field set to
-              // 'USER'.  This keeps USAS.sh from inserting all existing tables later
-              // on.  It would do so if all 'USER' entries from USTAT_AUTO_TABLES table had
-              // been deleted.
-              PreparedStatement FindUserEnteredTables =
-                conn.prepareStatement("SELECT COUNT(*) FROM " + autoTable +
-                " WHERE ADDED_BY = _ISO88591'USER'" + 
-                " FOR READ UNCOMMITTED ACCESS");
-              ResultSet rs = FindUserEnteredTables.executeQuery();
-              rs.next();
-              if (rs.getInt(1) == 0)
-              {
-                PreparedStatement insStmt =
-                  conn.prepareStatement("INSERT INTO " + autoTable + 
-				  " VALUES (_UCS2'', _UCS2'', _UCS2'', " +
-                  " TIMESTAMP '0001-01-01 00:00:00'," +
-                  " TIMESTAMP '0001-01-01 00:00:00', 0, _UCS2'', _ISO88591'USER')");
-                insStmt.executeUpdate();
-              }
-              rs.close();
-            }
-          }
-          // 'EXCLUD' table was successfully DELETEd, set result string.
-          else result[0]="Table name "+extSchDotTbl+"\" DELETEd.";
-        }
-      }
-      else 
-      {
-        result[0] = operation + " is not a valid operation.";
-      }
-    } 
-    catch(SQLException err)
-    {
-      result[0] = err.getMessage().trim(); // Issue SQL error.
-      if (result[0].charAt(result[0].length()-1) == ']') // Remove date/time.
-        result[0]=result[0].substring(0,result[0].length()-21);
-    } 
-    finally 
-    {
-      conn.close();
-    }
-    if (result[0].length() > 80) result[0]=result[0].substring(0,79);
-  }
-
-  public static String internalFormat(String name)
-  {
-    // Remove enclosing quotes
-    name=name.substring(1, name.length()-1);
-
-    // Change all occurrences of "" to ".
-    int index=-1;
-    while((index=name.indexOf("\"\"", index+1)) != -1)
-      name=name.substring(0,index+1)+name.substring(index+2);  
-
-    return name;
-  }
-}