You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by db...@apache.org on 2015/10/02 18:16:34 UTC
[5/9] incubator-trafodion git commit: Most of the Trafodion Java
source files are built through Maven, using projects DCS, REST,
HBase-trx and SQL. A few files remain in the core/sql/executor and
core/sql/ustat directories that are built through javac co
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/OrcFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/OrcFileReader.java b/core/sql/executor/OrcFileReader.java
deleted file mode 100644
index 6e5eb75..0000000
--- a/core/sql/executor/OrcFileReader.java
+++ /dev/null
@@ -1,518 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.apache.hadoop.hive.ql.io.orc;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.*;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.lang.Integer;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import org.apache.hadoop.hive.conf.*;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.apache.hadoop.hive.serde2.objectinspector.*;
-
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
-
-import org.apache.hive.common.util.HiveTestUtils;
-
-import static org.junit.Assert.assertEquals;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertNull;
-
-public class OrcFileReader
-{
-
- Configuration m_conf;
- Path m_file_path;
-
- Reader m_reader;
- List<OrcProto.Type> m_types;
- StructObjectInspector m_oi;
- List<? extends StructField> m_fields;
- RecordReader m_rr;
- String lastError = null;
- Reader.Options m_options;
-
-public class OrcRowReturnSQL
-{
- int m_row_length;
- int m_column_count;
- long m_row_number;
- byte[] m_row_ba = new byte[4096];
-}
-
- OrcRowReturnSQL rowData; //TEMP!!
-
-
- OrcFileReader() {
- m_conf = new Configuration();
- rowData = new OrcRowReturnSQL(); //TEMP: was in fetch
- }
-
-//********************************************************************************
-
-// ORIGINAL VERSION BEFORE ADDING SUPPORT FOR COLUMN SELECTION
- public String open(String pv_file_name) throws IOException {
-// pv_file_name= pv_file_name + "/000000_0";
-
- m_file_path = new Path(pv_file_name);
-
- try{
- m_reader = OrcFile.createReader(m_file_path, OrcFile.readerOptions(m_conf));
- } catch (java.io.FileNotFoundException e1) {
- return "file not found";
- }
- if (m_reader == null)
- return "open failed!";
- m_types = m_reader.getTypes();
- m_oi = (StructObjectInspector) m_reader.getObjectInspector();
- m_fields = m_oi.getAllStructFieldRefs();
-
- try{
- m_rr = m_reader.rows();
- } catch (java.io.IOException e1) {
- return (e1.getMessage());
- }
-
- if (m_rr == null)
- return "open:RecordReader is null";
- return null;
- }
-
-//********************************************************************************
-/*
- public String open(String pv_file_name) throws Exception {
-// pv_file_name= pv_file_name + "/000000_0";
- m_file_path = new Path(pv_file_name);
-
- try{
- m_reader = OrcFile.createReader(m_file_path, OrcFile.readerOptions(m_conf));
- } catch (java.io.FileNotFoundException e1) {
- return "file not found";
- }
- if (m_reader == null)
- return "open failed!";
- m_types = m_reader.getTypes();
- m_oi = (StructObjectInspector) m_reader.getObjectInspector();
- m_fields = m_oi.getAllStructFieldRefs();
-
-// m_rr = m_reader.rows(); //RESTORE THIS as working code!
-// boolean[] includes = new boolean[29];
- boolean[] includes = new boolean[] {true,true,false,false,false,false,false,false,false,false,false,false,
- false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true};
- m_options = new Reader.Options();
-// my_options.include(includes);
-// System.out.println("Array size: " + includes.length);
- m_rr = m_reader.rowsOptions(m_options.include(includes));
-// m_rr = m_reader.rowsOptions(m_options.include(new boolean[] {false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false}));
-//{true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true}));
-
- return null;
- }
-*/
-//********************************************************************************
-
- public String close()
- {
- m_reader = null;
- m_rr = null;
- m_file_path = null;
- return null;
- }
-
-
- public void printFileInfo() throws Exception {
-
- System.out.println("Reader: " + m_reader);
-
-
- System.out.println("# Rows: " + m_reader.getNumberOfRows());
- System.out.println("# Types in the file: " + m_types.size());
- for (int i=0; i < m_types.size(); i++) {
- System.out.println("Type " + i + ": " + m_types.get(i).getKind());
- }
-
- System.out.println("Compression: " + m_reader.getCompression());
- if (m_reader.getCompression() != CompressionKind.NONE) {
- System.out.println("Compression size: " + m_reader.getCompressionSize());
- }
-
- m_oi = (StructObjectInspector) m_reader.getObjectInspector();
-
- System.out.println("object inspector type category: " + m_oi.getCategory());
- System.out.println("object inspector type name : " + m_oi.getTypeName());
-
- System.out.println("Number of columns in the table: " + m_fields.size());
-
- // Print the type info:
- for (int i = 0; i < m_fields.size(); i++) {
- System.out.println("Column " + i + " name: " + m_fields.get(i).getFieldName());
- ObjectInspector lv_foi = m_fields.get(i).getFieldObjectInspector();
- System.out.println("Column " + i + " type category: " + lv_foi.getCategory());
- System.out.println("Column " + i + " type name: " + lv_foi.getTypeName());
- }
-
- }
-
- public boolean seekToRow(long pv_rowNumber) throws IOException {
-
- if (m_reader == null) {
- return false;
- }
-
- if ((pv_rowNumber < 0) ||
- (pv_rowNumber >= m_reader.getNumberOfRows())) {
- return false;
- }
-
- m_rr.seekToRow(pv_rowNumber);
-
- return true;
- }
-
- public String seeknSync(long pv_rowNumber) throws IOException {
- if (m_reader == null) {
- return "Looks like a file has not been opened. Call open() first.";
- }
-
- if ((pv_rowNumber < 0) ||
- (pv_rowNumber >= m_reader.getNumberOfRows())) {
- return "Invalid rownumber: " + pv_rowNumber + " provided.";
- }
-
- m_rr.seekToRow(pv_rowNumber);
-
- return null;
- }
-
- public long getNumberOfRows() throws IOException {
-
- return m_reader.getNumberOfRows();
-
- }
-
- public long getPosition() throws IOException {
-
- return m_rr.getRowNumber();
-
- }
-
- // Dumps the content of the file. The columns are '|' separated.
- public void readFile_String() throws Exception {
-
- seeknSync(0);
- OrcStruct lv_row = null;
- Object lv_field_val = null;
- StringBuilder lv_row_string = new StringBuilder(1024);
- while (m_rr.hasNext()) {
- lv_row = (OrcStruct) m_rr.next(lv_row);
- lv_row_string.setLength(0);
- for (int i = 0; i < m_fields.size(); i++) {
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val != null) {
- lv_row_string.append(lv_field_val);
- }
- lv_row_string.append('|');
- }
- System.out.println(lv_row_string);
- }
-
- }
-
-
- // Dumps the contents of the file as ByteBuffer.
- public void readFile_ByteBuffer() throws Exception {
-
- OrcStruct lv_row = null;
- Object lv_field_val = null;
- ByteBuffer lv_row_buffer;
-
- seeknSync(0);
- while (m_rr.hasNext()) {
- byte[] lv_row_ba = new byte[4096];
- lv_row_buffer = ByteBuffer.wrap(lv_row_ba);
- lv_row = (OrcStruct) m_rr.next(lv_row);
- for (int i = 0; i < m_fields.size(); i++) {
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val == null) {
- lv_row_buffer.putInt(0);
- continue;
- }
- String lv_field_val_str = lv_field_val.toString();
- lv_row_buffer.putInt(lv_field_val_str.length());
- if (lv_field_val != null) {
- lv_row_buffer.put(lv_field_val_str.getBytes());
- }
- }
- System.out.println(lv_row_buffer);
- // System.out.println(new String(lv_row_buffer.array()));
- }
- }
-
- public String getNext_String(char pv_ColSeparator) throws Exception {
-
- if ( ! m_rr.hasNext()) {
- return null;
- }
-
- OrcStruct lv_row = null;
- Object lv_field_val = null;
- StringBuilder lv_row_string = new StringBuilder(1024);
-
- lv_row = (OrcStruct) m_rr.next(lv_row);
- for (int i = 0; i < m_fields.size(); i++) {
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val != null) {
- lv_row_string.append(lv_field_val);
- }
- lv_row_string.append(pv_ColSeparator);
- }
-
- return lv_row_string.toString();
- }
-
- // returns the next row as a byte array
- public byte[] fetchNextRow() throws Exception {
-
- if ( ! m_rr.hasNext()) {
- return null;
- }
-
-// OrcStruct lv_row = (OrcStruct) m_rr.next(null);
- OrcStruct lv_row = (OrcStruct) m_rr.next(null);
- Object lv_field_val = null;
- ByteBuffer lv_row_buffer;
-
- byte[] lv_row_ba = new byte[4096];
- lv_row_buffer = ByteBuffer.wrap(lv_row_ba);
- for (int i = 0; i < m_fields.size(); i++) {
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val == null) {
- lv_row_buffer.putInt(0);
- continue;
- }
- String lv_field_val_str = lv_field_val.toString();
- lv_row_buffer.putInt(lv_field_val_str.length());
- if (lv_field_val != null) {
- lv_row_buffer.put(lv_field_val_str.getBytes());
- }
- }
- return lv_row_buffer.array();
- }
-
-
-//****************************************************************************
-
-//THIS IS THE ORIGINAL FORM BEFORE ADDING SUPPORT FOR COLUMN SELECTION !!!!
-public OrcRowReturnSQL fetchNextRowObj() throws Exception
-{
-// int lv_integerLength = Integer.Bytes;
- int lv_integerLength = 4;
-// OrcRowReturnSQL rowData = new OrcRowReturnSQL();
-
- if ( ! m_rr.hasNext()) {
- return null;
- }
-
- OrcStruct lv_row = (OrcStruct) m_rr.next(null);
- Object lv_field_val = null;
- ByteBuffer lv_row_buffer;
-
-// lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
- lv_row_buffer = ByteBuffer.wrap(rowData.m_row_ba);
- lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
-
- rowData.m_row_length = 0;
- rowData.m_column_count = m_fields.size();
- rowData.m_row_number = m_rr.getRowNumber();
-
- for (int i = 0; i < m_fields.size(); i++) {
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val == null) {
- lv_row_buffer.putInt(0);
- rowData.m_row_length = rowData.m_row_length + lv_integerLength;
- continue;
- }
- String lv_field_val_str = lv_field_val.toString();
- lv_row_buffer.putInt(lv_field_val_str.length());
- rowData.m_row_length = rowData.m_row_length + lv_integerLength;
- if (lv_field_val != null) {
- lv_row_buffer.put(lv_field_val_str.getBytes());
- rowData.m_row_length = rowData.m_row_length + lv_field_val_str.length();
- }
- }
-
- return rowData;
-
-}
-
-//****************************************************************************
-/*
-public OrcRowReturnSQL fetchNextRowObj() throws Exception
-{
-// int lv_integerLength = Integer.Bytes;
- int lv_integerLength = 4;
- boolean[] lv_include;
-
- OrcRowReturnSQL rowData = new OrcRowReturnSQL();
-
- if ( ! m_rr.hasNext()) {
- return null;
- }
-
- OrcStruct lv_row = (OrcStruct) m_rr.next(null);
- Object lv_field_val = null;
- ByteBuffer lv_row_buffer;
-
-// lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
- lv_row_buffer = ByteBuffer.wrap(rowData.m_row_ba);
- lv_row_buffer.order(ByteOrder.LITTLE_ENDIAN);
-// rowData.m_column_count = m_fields.size();
- rowData.m_column_count = 0;;
- rowData.m_row_number = m_rr.getRowNumber();
- lv_include = m_options.getInclude();
-
- for (int i = 0; i < m_fields.size(); i++) {
- if (lv_include[i+1] == false) continue;
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val == null) {
- lv_row_buffer.putInt(0);
- rowData.m_row_length = rowData.m_row_length + lv_integerLength;
- rowData.m_column_count++;;
- continue;
- }
- String lv_field_val_str = lv_field_val.toString();
- lv_row_buffer.putInt(lv_field_val_str.length());
- rowData.m_row_length = rowData.m_row_length + lv_integerLength;
- if (lv_field_val != null) {
- lv_row_buffer.put(lv_field_val_str.getBytes());
- rowData.m_row_length = rowData.m_row_length + lv_field_val_str.length();
- rowData.m_column_count++;;
-
- }
- }
-
- return rowData;
-
-}
-*/
-//****************************************************************************
-String getLastError() {
- return lastError;
- }
-
-//****************************************************************************
-public boolean isEOF() throws Exception
-{
- if (m_rr.hasNext())
- {
- return false;
- }
- else
- {
- return true;
- }
-}
-//****************************************************************************
- public String fetchNextRow(char pv_ColSeparator) throws Exception {
-
- if ( ! m_rr.hasNext()) {
- return null;
- }
-
- OrcStruct lv_row = null;
- Object lv_field_val = null;
- StringBuilder lv_row_string = new StringBuilder(1024);
-
- lv_row = (OrcStruct) m_rr.next(lv_row);
- for (int i = 0; i < m_fields.size(); i++) {
- lv_field_val = lv_row.getFieldValue(i);
- if (lv_field_val != null) {
- lv_row_string.append(lv_field_val);
- }
- lv_row_string.append(pv_ColSeparator);
- }
-
- return lv_row_string.toString();
- }
-
-
-
- public static void main(String[] args) throws Exception
- {
- System.out.println("OrcFile Reader main");
-
- OrcFileReader lv_this = new OrcFileReader();
-
- lv_this.open(args[0]);
-
- lv_this.printFileInfo();
-
- lv_this.readFile_String();
-
- lv_this.readFile_ByteBuffer();
-
- // Gets rows as byte[] (starts at row# 4)
- boolean lv_done = false;
- if (lv_this.seeknSync(4) == null) {
- while (! lv_done) {
- System.out.println("Next row #: " + lv_this.getPosition());
- byte[] lv_row_bb = lv_this.fetchNextRow();
- if (lv_row_bb != null) {
- System.out.println("First 100 bytes of lv_row_bb: " + new String(lv_row_bb, 0, 100));
- System.out.println("Length lv_row_bb: " + lv_row_bb.length);
- }
- else {
- lv_done = true;
- }
- }
- }
-
- // Gets rows as String (starts at row# 10)
- lv_done = false;
- String lv_row_string;
- if (lv_this.seeknSync(10) == null) {
- while (! lv_done) {
- lv_row_string = lv_this.getNext_String('|');
- if (lv_row_string != null) {
- System.out.println(lv_row_string);
- }
- else {
- lv_done = true;
- }
- }
- }
-System.out.println("Shows the change in place");
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/ResultIterator.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/ResultIterator.java b/core/sql/executor/ResultIterator.java
deleted file mode 100644
index 14ef422..0000000
--- a/core/sql/executor/ResultIterator.java
+++ /dev/null
@@ -1,133 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-
-
-public class ResultIterator {
- ResultScanner scanner;
- Result[] resultSet;
- Result row = null;
- scanFetchStep step;
- List<KeyValue> kvList;
- int listIndex = 0;
- int cellIndex;
- int numKVs;
- boolean isSingleRow = false;
-
- private enum scanFetchStep {
- SCAN_FETCH_NEXT_ROW,
- SCAN_FETCH_NEXT_COL,
- SCAN_FETCH_CLOSE
- } ;
-
- public ResultIterator(ResultScanner scanner) {
- this.scanner = scanner;
- resultSet = null;
- step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
- }
-
- public ResultIterator(Result[] results) {
- this.scanner = null;
- resultSet = results;
- step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
- }
-
- public ResultIterator(Result result) {
- this.scanner = null;
- resultSet = null;
- row = result;
- isSingleRow = true;
- step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
- }
-
- KeyValue nextCell() throws IOException {
- while (true)
- {
- switch (step)
- {
- case SCAN_FETCH_NEXT_ROW:
- {
- if (isSingleRow == false) {
- if (scanner != null)
- row = scanner.next();
- else {
- if (listIndex == resultSet.length) {
- step = scanFetchStep.SCAN_FETCH_CLOSE;
- break;
- }
- row = resultSet[listIndex];
- listIndex++;
- }
- }
-
- if (row == null || row.isEmpty()) {
- step = scanFetchStep.SCAN_FETCH_CLOSE;
- break;
- }
-
- kvList = row.list();
- cellIndex = 0;
- numKVs = kvList.size();
-
- step = scanFetchStep.SCAN_FETCH_NEXT_COL;
- }
- break;
-
- case SCAN_FETCH_NEXT_COL:
- {
- KeyValue kv = kvList.get(cellIndex);
- cellIndex++;
- if (kv == null) {
- if (isSingleRow)
- step = scanFetchStep.SCAN_FETCH_CLOSE;
- else
- step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
- break;
- }
-
- if (cellIndex == numKVs)
- if (isSingleRow)
- step = scanFetchStep.SCAN_FETCH_CLOSE;
- else
- step = scanFetchStep.SCAN_FETCH_NEXT_ROW;
-
- return kv;
- }
-
- case SCAN_FETCH_CLOSE:
- {
- return null;
- }
-
- }// switch
- } // while
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/ResultKeyValueList.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/ResultKeyValueList.java b/core/sql/executor/ResultKeyValueList.java
deleted file mode 100644
index 54eed48..0000000
--- a/core/sql/executor/ResultKeyValueList.java
+++ /dev/null
@@ -1,100 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.List;
-import java.io.*;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Result;
-import java.nio.*;
-
-public class ResultKeyValueList {
- Result result;
- List<KeyValue> kvList;
-
- public ResultKeyValueList(Result result) {
- super();
- this.result = result;
- kvList = result.list();
- }
-
- byte[] getRowID() {
- if (result == null)
- return null;
- else
- return result.getRow();
- }
-
- byte[] getAllKeyValues() {
- if (kvList == null)
- return null;
- int numCols = kvList.size();
- byte[] rowID = result.getRow();
- int bufSize = rowID.length;
- bufSize += (64 * numCols);
- for (int i=0; i<numCols; i++) {
- bufSize += kvList.get(i).getLength();
- }
- ByteBuffer buf = ByteBuffer.allocate(bufSize);
- buf.order(ByteOrder.LITTLE_ENDIAN);
- // move in numCols
- buf.putInt(numCols);
- // move in rowID length and rowID
- buf.putInt(rowID.length);
- buf.put(rowID);;
- // move in all descriptors
- for (int i=0; i<numCols; i++) {
- copyKVs(buf, kvList.get(i));
- }
- return buf.array();
- }
-
- void copyKVs(ByteBuffer buf, KeyValue kv)
- {
- buf.putInt(kv.getLength());
- int offset = kv.getOffset();
- buf.putInt(kv.getValueLength());
- buf.putInt(kv.getValueOffset() - offset);
- buf.putInt(kv.getQualifierLength());
- buf.putInt(kv.getQualifierOffset() - offset);
- buf.putInt(kv.getFamilyLength());
- buf.putInt(kv.getFamilyOffset() - offset);
- buf.putLong(kv.getTimestamp());
- buf.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
- }
-
-
- int getSize() {
- if (kvList == null)
- return 0;
- else
- return kvList.size();
- }
-
- KeyValue getEntry(int i) {
- if (kvList == null)
- return null;
- else
- return kvList.get(i);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/RowToInsert.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/RowToInsert.java b/core/sql/executor/RowToInsert.java
deleted file mode 100644
index 92d8fbc..0000000
--- a/core/sql/executor/RowToInsert.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.Vector;
-
-public class RowToInsert extends Vector<RowToInsert.ColToInsert> {
-
- public class ColToInsert {
- public byte[] qualName;
- public byte[] colValue;
- }
-
- private static final long serialVersionUID = 5066470006717527862L;
-
- public void addColumn(byte[] name, byte[] value) {
- ColToInsert col = new ColToInsert();
- col.qualName = name;
- col.colValue = value;
- add(col);
- }
-
-}
-
-
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/RowsToInsert.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/RowsToInsert.java b/core/sql/executor/RowsToInsert.java
deleted file mode 100644
index 8ca82bf..0000000
--- a/core/sql/executor/RowsToInsert.java
+++ /dev/null
@@ -1,57 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.Vector;
-
-public class RowsToInsert extends Vector<RowsToInsert.RowInfo> {
-
- public class RowInfo {
- public byte[] rowId;
- public Vector<RowsToInsert.ColToInsert> columns;
- }
-
- public class ColToInsert {
- public byte[] qualName;
- public byte[] colValue;
- }
-
- private static final long serialVersionUID = 5066470006717527863L;
-
- public void addRowId(byte[] rowId) {
- RowInfo rowInfo = new RowInfo();
- rowInfo.rowId = rowId;
- rowInfo.columns = new Vector<RowsToInsert.ColToInsert>();
- rowInfo.columns.clear();
- add(rowInfo);
- }
-
- public void addColumn(byte[] name, byte[] value) {
- ColToInsert col = new ColToInsert();
- col.qualName = name;
- col.colValue = value;
- if (size() > 0)
- get(size()-1).columns.add(col);
- // RowInfo.columns.add(col);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/SequenceFileReader.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileReader.cpp b/core/sql/executor/SequenceFileReader.cpp
index 065389f..4b93f2a 100644
--- a/core/sql/executor/SequenceFileReader.cpp
+++ b/core/sql/executor/SequenceFileReader.cpp
@@ -71,7 +71,7 @@ SequenceFileReader::~SequenceFileReader()
//////////////////////////////////////////////////////////////////////////////
SFR_RetCode SequenceFileReader::init()
{
- static char className[]="org/trafodion/sql/HBaseAccess/SequenceFileReader";
+ static char className[]="org/trafodion/sql/SequenceFileReader";
SFR_RetCode rc;
if (javaMethodsInitialized_)
@@ -489,7 +489,7 @@ SequenceFileWriter::~SequenceFileWriter()
//////////////////////////////////////////////////////////////////////////////
SFW_RetCode SequenceFileWriter::init()
{
- static char className[]="org/trafodion/sql/HBaseAccess/SequenceFileWriter";
+ static char className[]="org/trafodion/sql/SequenceFileWriter";
SFW_RetCode rc;
if (javaMethodsInitialized_)
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/SequenceFileReader.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileReader.java b/core/sql/executor/SequenceFileReader.java
deleted file mode 100644
index dbbe5c6..0000000
--- a/core/sql/executor/SequenceFileReader.java
+++ /dev/null
@@ -1,448 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-/**
- *
- */
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.util.ReflectionUtils;
-//import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-//import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
-//import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-//import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-
-
-public class SequenceFileReader {
-
- Configuration conf = null; // File system configuration
- SequenceFile.Reader reader = null; // The HDFS SequenceFile reader object.
- Writable key = null;
- Writable row = null;
-// LazySimpleSerDe serde = null;
- boolean isEOF = false;
- String lastError = null;
-
- /**
- * Class Constructor
- */
- SequenceFileReader() {
- conf = new Configuration();
- conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
- }
-
- String getLastError() {
- return lastError;
- }
-
- /**
- * Initialize the SerDe object. Needed only before calling fetchArrayOfColumns().
- * @param numColumns The number of columns in the table.
- * @param fieldDelim The delimiter between fields.
- * @param columns A comma delimited list of column names.
- * @param colTypes A comma delimited list of column types.
- * @param nullFormat NULL representation.
- */
-// public void initSerDe(String numColumns, String fieldDelim, String columns, String colTypes, String nullFormat) throws IllegalStateException {
-//
-// serde = new LazySimpleSerDe();
-// Properties tbl = new Properties();
-// tbl.setProperty("serialization.format", numColumns);
-// tbl.setProperty("field.delim", fieldDelim);
-// tbl.setProperty("columns", columns);
-// tbl.setProperty("columns.types", colTypes);
-// tbl.setProperty("serialization.null.format", colTypes);
-// serde.initialize(conf, tbl);
-// }
-
- /**
- * Open the SequenceFile for reading.
- * @param path The HDFS path to the file.
- */
- public String open(String path) throws IOException {
-
- Path filename = new Path(path);
-
- reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(filename));
-
- key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
- row = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
-
- return null;
-
- }
-
- /**
- * Get the current position in the file.
- * @return The current position or -1 if error.
- */
- public long getPosition() throws IOException {
-
- lastError = null;
- if (reader == null) {
- lastError = "open() was not called first.";
- return -1;
- }
-
- return reader.getPosition();
- }
-
- /**
- * Have we reached the end of the file yet?
- * @return
- */
- public boolean isEOF() {
- return isEOF;
- }
-
- /**
- * Seek to the specified position in the file, and then to the beginning
- * of the record after the next sync mark.
- * @param pos Required file position.
- * @return null if OK, or error message.
- */
- public String seeknSync(long pos) throws IOException {
-
- if (reader == null) {
- return "open() was not called first.";
- }
-
- reader.sync(pos);
- return null;
- }
-
- /**
- * Fetch the next row as an array of columns.
- * @return An array of columns.
- */
-// public String[] fetchArrayOfColumns() throws IllegalStateException {
-// if (reader == null)
-// throw new IllegalStateException("open() was not called first.");
-// if (serde == null)
-// throw new IllegalStateException("initSerDe() was not called first.");
-//
-// ArrayList<String> result = new ArrayList<String>();
-// boolean theresMore = reader.next(key, row);
-// if (!theresMore)
-// return null;
-// StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector();
-// List<? extends StructField> fieldRefs = soi.getAllStructFieldRefs();
-// Object data = serde.deserialize(row);
-//
-// for (StructField fieldRef : fieldRefs) {
-// ObjectInspector oi = fieldRef.getFieldObjectInspector();
-// Object obj = soi.getStructFieldData(data, fieldRef);
-// Object column = convertLazyToJava(obj, oi);
-// if (column == null)
-// result.add(null);
-// else
-// result.add(column.toString());
-// }
-// String[] resultArray = new String[result.size()];
-// result.toArray(resultArray);
-// return resultArray;
-// }
-
- /**
- * Fetch the next row as a single String, that still needs to be parsed.
- * @return The next row.
- */
- public String fetchNextRow() throws IOException {
-
- lastError = null;
- if (reader == null) {
- lastError = "open() was not called first.";
- return null;
- }
-
- boolean result = reader.next(key, row);
- if (result) {
- return row.toString();
- }
- else {
- return null;
- }
- }
-
- /**
- * @param minSize Minimum size of the result. If the file is compressed,
- * the result may be much larger. The reading starts at the current
- * position in the file, and stops once the limit has been reached.
- * @return An array of result rows.
- * @throws IllegalStateException
- */
- public String[] fetchArrayOfRows(int minSize) throws IOException {
-
- lastError = "";
- if (reader == null) {
- lastError = "open() was not called first.";
- return null;
- }
-
- ArrayList<String> result = new ArrayList<String>();
- long initialPos = getPosition();
- boolean stop = false;
- do {
- String newRow = fetchNextRow();
-
- if (newRow==null && lastError!=null)
- return null;
-
- boolean reachedEOF = (newRow == null || newRow == "");
- if (!reachedEOF)
- result.add(newRow);
-
- long bytesRead = getPosition() - initialPos;
- stop = reachedEOF || (bytesRead > minSize);
- } while (!stop);
-
- String[] resultArray = new String[result.size()];
- result.toArray(resultArray);
- return resultArray;
- }
-
- /**
- * Read a block of data from the file and return it as an array of rows.
- * First sync to startOffset, and skip the first row, then keep reading
- * Until passing stopOffset and passing the next Sync marker.
- * @param startOffset
- * @param stopOffset
- * @return
- * @throws IllegalStateException
- * @throws IOException
- */
- public String[] fetchArrayOfRows(int startOffset, int stopOffset)
- throws IOException {
-
- lastError = "";
- if (reader == null) {
- lastError = "open() was not called first.";
- return null;
- }
-
- seeknSync(startOffset);
-
- ArrayList<String> result = new ArrayList<String>();
- boolean stop = false;
- do {
- long startingPosition = getPosition();
- String newRow = fetchNextRow();
-
- if (newRow==null && lastError!=null)
- return null;
-
- boolean reachedEOF = (newRow == null || newRow == "");
-
- boolean reachedSize = (startingPosition > stopOffset);
- boolean lastSyncSeen = (reachedSize && reader.syncSeen());
- // Stop reading if there is no more data, or if we have read
- // enough bytes and have seen the Sync mark.
- stop = reachedEOF || (reachedSize && lastSyncSeen);
-
- if (!stop)
- result.add(newRow);
-
- } while (!stop);
-
- String[] resultArray = new String[result.size()];
- result.toArray(resultArray);
- return resultArray;
- }
-
- /**
- * Fetch the next row from the file.
- * @param stopOffset File offset at which to start looking for a sync marker
- * @return The next row, or null if we have reached EOF or have passed stopOffset and then
- * the sync marker.
- */
- public String fetchNextRow(long stopOffset) throws IOException {
-
- lastError = "";
- if (reader == null) {
- lastError = "open() was not called first.";
- return null;
- }
-
- long startingPosition = getPosition();
-
- String newRow = fetchNextRow();
-
- if (newRow==null && lastError!=null)
- return null;
-
- if (newRow == null)
- isEOF = true;
-
- if (newRow == "")
- newRow = null;
-
- // If we have already read past the stopOffset on a previous row,
- // and have seen the sync marker, then this row belongs to the next block.
- if ((startingPosition > stopOffset) && reader.syncSeen())
- newRow = null;
-
- return newRow;
- }
-
- /**
- * Close the reader.
- */
- public String close() {
-
- lastError = "";
- if (reader == null) {
- lastError = "open() was not called first.";
- return null;
- }
-
- IOUtils.closeStream(reader);
-
- return null;
- }
-
- private boolean ReadnPrint(int start, int end)
- throws IOException {
- System.out.println("Beginning position: " + getPosition());
- String[] batch;
- batch = fetchArrayOfRows(start, end);
- if (batch==null)
- return false;
-
- boolean theresMore = (batch.length > 0);
- for (String newRow : batch)
- System.out.println(newRow);
- System.out.println("Ending position: " + getPosition());
- System.out.println("===> Buffer Split <===");
- return theresMore;
- }
-
- private boolean ReadnPrint2(int start, int end) throws IOException {
- System.out.println("Read from: " + start + " to: " + end + ".");
- seeknSync(start);
- System.out.println("Beginning position: " + getPosition());
- String newRow = null;
- do {
- newRow = fetchNextRow(end);
-
- if (newRow != null)
- System.out.println(newRow);
- } while (newRow != null);
-
- System.out.println("Ending position: " + getPosition());
- System.out.println("===> Buffer Split <===");
- return !isEOF();
- }
-
- /**
- * @param args
- * @throws IOException
- */
- public static void main(String[] args) throws IOException {
-
- SequenceFileReader sfReader = new SequenceFileReader();
- byte[] fieldDelim = new byte[2];
- fieldDelim[0] = 1;
- fieldDelim[1] = 0;
- //sfReader.initSerDe("19", "\01",
- // "p_promo_sk,p_promo_id,p_start_date_sk,p_end_date_sk,p_item_sk,p_cost,p_response_target,p_promo_name,p_channel_dmail,p_channel_email,p_channel_catalog,p_channel_tv,p_channel_radio,p_channel_press,p_channel_event,p_channel_demo,p_channel_details,p_purpose,p_discount_active",
- // "int,string,int,int,int,float,int,string,string,string,string,string,string,string,string,string,string,string,string",
- // "NULL");
-
- //sfReader.open("hdfs://localhost:9000/user/hive/warehouse/promotion_seq/000000_0");
- sfReader.seeknSync(300);
-
- int opType = 4;
- switch (opType)
- {
-// case 1:
-// boolean theresMoreRows = true;
-// do {
-// String[] columns = sfReader.fetchArrayOfColumns();
-// theresMoreRows = (columns != null);
-// if (theresMoreRows)
-// {
-// for (String col : columns)
-// {
-// if (col == null)
-// System.out.print("<NULL>, ");
-// else
-// System.out.print(col + ", ");
-// }
-// System.out.println();
-// }
-// } while (theresMoreRows);
-// break;
-
- case 2: // Return row as String
- String row;
- do {
- row = sfReader.fetchNextRow();
- if (row != null)
- System.out.println(row);
- } while (row != null);
- break;
-
- case 3:
- case 4:
- int size = 3000;
- int start = 0;
- int end = size;
- boolean theresMore3 = true;
-
- while (theresMore3) {
- if (opType == 3)
- theresMore3 = sfReader.ReadnPrint(start, end);
- else
- theresMore3 = sfReader.ReadnPrint2(start, end);
- start += size;
- end += size;
- }
- break;
-
- }
-
- sfReader.close();
- }
-
-// private static Object convertLazyToJava(Object o, ObjectInspector oi) {
-// Object obj = ObjectInspectorUtils.copyToStandardObject(o, oi, ObjectInspectorCopyOption.JAVA);
-//
-// // for now, expose non-primitive as a string
-// // TODO: expose non-primitive as a structured object while maintaining JDBC compliance
-// if (obj != null && oi.getCategory() != ObjectInspector.Category.PRIMITIVE) {
-// obj = obj.toString();
-// }
-//
-// return obj;
-// }
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/SequenceFileWriter.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/SequenceFileWriter.java b/core/sql/executor/SequenceFileWriter.java
deleted file mode 100644
index 5d12fbf..0000000
--- a/core/sql/executor/SequenceFileWriter.java
+++ /dev/null
@@ -1,467 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-/**
- *
- */
-package org.trafodion.sql.HBaseAccess;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.TableSnapshotScanner;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.compress.CodecPool;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.Compressor;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.io.compress.*;
-import org.apache.hadoop.io.compress.zlib.*;
-import org.apache.hadoop.fs.*;
-
-import java.io.*;
-import java.util.List;
-
-import org.apache.hadoop.util.*;
-import org.apache.hadoop.io.*;
-import org.apache.log4j.Logger;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.ServiceException;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.FsPermission;
-public class SequenceFileWriter {
-
- static Logger logger = Logger.getLogger(SequenceFileWriter.class.getName());
- Configuration conf = null; // File system configuration
- HBaseAdmin admin = null;
-
- SequenceFile.Writer writer = null;
-
- FSDataOutputStream fsOut = null;
- OutputStream outStream = null;
-
- FileSystem fs = null;
- /**
- * Class Constructor
- */
- SequenceFileWriter() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
- {
- init("", "");
- conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
- }
-
-
- public String open(String path) {
- try {
- Path filename = new Path(path);
- writer = SequenceFile.createWriter(conf,
- SequenceFile.Writer.file(filename),
- SequenceFile.Writer.keyClass(ByteWritable.class),
- SequenceFile.Writer.valueClass(BytesWritable.class),
- SequenceFile.Writer.compression(CompressionType.NONE));
- return null;
- } catch (Exception e) {
- //e.printStackTrace();
- return e.getMessage();
- }
- }
-
- public String open(String path, int compressionType) {
- try {
- Path filename = new Path(path);
-
- CompressionType compType=null;
- switch (compressionType) {
- case 0:
- compType = CompressionType.NONE;
- break;
-
- case 1:
- compType = CompressionType.RECORD;
- break;
-
- case 2:
- compType = CompressionType.BLOCK;
- break;
-
- default:
- return "Wrong argument for compression type.";
- }
-
- writer = SequenceFile.createWriter(conf,
- SequenceFile.Writer.file(filename),
- SequenceFile.Writer.keyClass(BytesWritable.class),
- SequenceFile.Writer.valueClass(Text.class),
- SequenceFile.Writer.compression(compType));
- return null;
- } catch (Exception e) {
- //e.printStackTrace();
- return e.getMessage();
- }
- }
-
- public String write(String data) {
- if (writer == null)
- return "open() was not called first.";
-
- try {
- writer.append(new BytesWritable(), new Text(data.getBytes()));
- return null;
- } catch (IOException e) {
- //e.printStackTrace();
- return e.getMessage();
- }
- }
-
- public String close() {
- if (writer == null)
- return "open() was not called first.";
-
- try {
- writer.close();
- return null;
- } catch (Exception e) {
- //e.printStackTrace();
- return e.getMessage();
- }
- }
-
-
-
- boolean hdfsCreate(String fname , boolean compress) throws IOException
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - started" );
- Path filePath = null;
- if (!compress || (compress && fname.endsWith(".gz")))
- filePath = new Path(fname);
- else
- filePath = new Path(fname + ".gz");
-
- fs = FileSystem.get(filePath.toUri(),conf);
- fsOut = fs.create(filePath, true);
-
- outStream = fsOut;
-
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - file created" );
- if (compress)
- {
- GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, conf);
- Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
- try
- {
- outStream = gzipCodec.createOutputStream(fsOut, gzipCompressor);
- }
- catch (IOException e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() --exception :" + e);
- throw e;
- }
- }
-
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCreate() - compressed output stream created" );
- return true;
- }
-
- boolean hdfsWrite(byte[] buff, long len) throws Exception,OutOfMemoryError
- {
-
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - started" );
- try
- {
- outStream.write(buff);
- outStream.flush();
- }
- catch (Exception e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() -- exception: " + e);
- throw e;
- }
- catch (OutOfMemoryError e1)
- {
- logger.debug("SequenceFileWriter.hdfsWrite() -- OutOfMemory Error: " + e1);
- throw e1;
- }
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsWrite() - bytes written and flushed:" + len );
-
- return true;
- }
-
- boolean hdfsClose() throws IOException
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - started" );
- try
- {
- outStream.close();
- fsOut.close();
- }
- catch (IOException e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsClose() - exception:" + e);
- throw e;
- }
- return true;
- }
-
-
- public boolean hdfsMergeFiles(String srcPathStr, String dstPathStr) throws Exception
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - start");
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - source Path: " + srcPathStr +
- ", destination File:" + dstPathStr );
- try
- {
- Path srcPath = new Path(srcPathStr );
- srcPath = srcPath.makeQualified(srcPath.toUri(), null);
- FileSystem srcFs = FileSystem.get(srcPath.toUri(),conf);
-
- Path dstPath = new Path(dstPathStr);
- dstPath = dstPath.makeQualified(dstPath.toUri(), null);
- FileSystem dstFs = FileSystem.get(dstPath.toUri(),conf);
-
- if (dstFs.exists(dstPath))
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - destination files exists" );
- // for this prototype we just delete the file-- will change in next code drops
- dstFs.delete(dstPath, false);
- // The caller should already have checked existence of file-- throw exception
- //throw new FileAlreadyExistsException(dstPath.toString());
- }
-
- Path tmpSrcPath = new Path(srcPath, "tmp");
-
- FileSystem.mkdirs(srcFs, tmpSrcPath,srcFs.getFileStatus(srcPath).getPermission());
- logger.debug("SequenceFileWriter.hdfsMergeFiles() - tmp folder created." );
- Path[] files = FileUtil.stat2Paths(srcFs.listStatus(srcPath));
- for (Path f : files)
- {
- srcFs.rename(f, tmpSrcPath);
- }
- // copyMerge and use false for the delete option since it removes the whole directory
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - copyMerge" );
- FileUtil.copyMerge(srcFs, tmpSrcPath, dstFs, dstPath, false, conf, null);
-
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() - delete intermediate files" );
- srcFs.delete(tmpSrcPath, true);
- }
- catch (IOException e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsMergeFiles() --exception:" + e);
- throw e;
- }
-
-
- return true;
- }
- public boolean hdfsCleanUnloadPath(String uldPathStr
- /*, boolean checkExistence, String mergeFileStr*/) throws Exception
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - start");
- logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - unload Path: " + uldPathStr );
-
- try
- {
- Path uldPath = new Path(uldPathStr );
- uldPath = uldPath.makeQualified(uldPath.toUri(), null);
- FileSystem srcFs = FileSystem.get(uldPath.toUri(),conf);
- if (!srcFs.exists(uldPath))
- {
- //unload location does not exist. hdfscreate will create it later
- //nothing to do
- logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -- unload location does not exist." );
- return true;
- }
-
- Path[] files = FileUtil.stat2Paths(srcFs.listStatus(uldPath));
- logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() - delete files" );
- for (Path f : files){
- srcFs.delete(f, false);
- }
- }
- catch (IOException e)
- {
- logger.debug("SequenceFileWriter.hdfsCleanUnloadPath() -exception:" + e);
- throw e;
- }
-
- return true;
- }
-
- public boolean hdfsExists(String filePathStr) throws Exception
- {
- logger.debug("SequenceFileWriter.hdfsExists() - start");
- logger.debug("SequenceFileWriter.hdfsExists() - Path: " + filePathStr);
-
- try
- {
- //check existence of the merge Path
- Path filePath = new Path(filePathStr );
- filePath = filePath.makeQualified(filePath.toUri(), null);
- FileSystem mergeFs = FileSystem.get(filePath.toUri(),conf);
- if (mergeFs.exists( filePath))
- {
- logger.debug("SequenceFileWriter.hdfsExists() - Path: "
- + filePath + " exists" );
- return true;
- }
-
- } catch (IOException e) {
- logger.debug("SequenceFileWriter.hdfsExists() -exception:" + e);
- throw e;
- }
- return false;
- }
-
- public boolean hdfsDeletePath(String pathStr) throws Exception
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() - start - Path: " + pathStr);
- try
- {
- Path delPath = new Path(pathStr );
- delPath = delPath.makeQualified(delPath.toUri(), null);
- FileSystem fs = FileSystem.get(delPath.toUri(),conf);
- fs.delete(delPath, true);
- }
- catch (IOException e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.hdfsDeletePath() --exception:" + e);
- throw e;
- }
-
- return true;
- }
-
- private boolean init(String zkServers, String zkPort)
- throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException
- {
- logger.debug("SequenceFileWriter.init(" + zkServers + ", " + zkPort + ") called.");
- if (conf != null)
- return true;
- conf = HBaseConfiguration.create();
- if (zkServers.length() > 0)
- conf.set("hbase.zookeeper.quorum", zkServers);
- if (zkPort.length() > 0)
- conf.set("hbase.zookeeper.property.clientPort", zkPort);
- HBaseAdmin.checkHBaseAvailable(conf);
- return true;
- }
-
- public boolean createSnapshot( String tableName, String snapshotName)
- throws MasterNotRunningException, IOException, SnapshotCreationException,
- InterruptedException, ZooKeeperConnectionException, ServiceException
- {
- try
- {
- if (admin == null)
- admin = new HBaseAdmin(conf);
- admin.snapshot(snapshotName, tableName);
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Snapshot created: " + snapshotName);
- }
- catch (Exception e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.createSnapshot() - Exception: " + e);
- throw e;
- }
- return true;
- }
- public boolean verifySnapshot( String tableName, String snapshotName)
- throws MasterNotRunningException, IOException, SnapshotCreationException,
- InterruptedException, ZooKeeperConnectionException, ServiceException
- {
- try
- {
- if (admin == null)
- admin = new HBaseAdmin(conf);
- List<SnapshotDescription> lstSnaps = admin.listSnapshots();
-
- for (SnapshotDescription snpd : lstSnaps)
- {
- if (snpd.getName().compareTo(snapshotName) == 0 &&
- snpd.getTable().compareTo(tableName) == 0)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Snapshot verified: " + snapshotName);
- return true;
- }
- }
- }
- catch (Exception e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.verifySnapshot() - Exception: " + e);
- throw e;
- }
- return false;
- }
-
- public boolean deleteSnapshot( String snapshotName)
- throws MasterNotRunningException, IOException, SnapshotCreationException,
- InterruptedException, ZooKeeperConnectionException, ServiceException
- {
- try
- {
- if (admin == null)
- admin = new HBaseAdmin(conf);
- admin.deleteSnapshot(snapshotName);
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Snapshot deleted: " + snapshotName);
- }
- catch (Exception e)
- {
- if (logger.isDebugEnabled()) logger.debug("SequenceFileWriter.deleteSnapshot() - Exception: " + e);
- throw e;
- }
-
- return true;
- }
-
- public boolean release() throws IOException
- {
- if (admin != null)
- {
- admin.close();
- admin = null;
- }
- return true;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/StringArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/executor/StringArrayList.java b/core/sql/executor/StringArrayList.java
deleted file mode 100644
index b0b7ed5..0000000
--- a/core/sql/executor/StringArrayList.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-
-package org.trafodion.sql.HBaseAccess;
-
-import java.util.ArrayList;
-
-public class StringArrayList extends ArrayList<String> {
-
- private static final long serialVersionUID = -3557219338406352735L;
-
- void addElement(String st) {
- add(st);
- }
-
- String getElement(int i) {
- if (size() == 0)
- return null;
- else if (i < size())
- return get(i);
- else
- return null;
- }
-
- int getSize() {
- return size();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/executor/org_trafodion_sql_HTableClient.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/org_trafodion_sql_HTableClient.h b/core/sql/executor/org_trafodion_sql_HTableClient.h
new file mode 100644
index 0000000..e3c8837
--- /dev/null
+++ b/core/sql/executor/org_trafodion_sql_HTableClient.h
@@ -0,0 +1,43 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_trafodion_sql_HTableClient */
+
+#ifndef _Included_org_trafodion_sql_HTableClient
+#define _Included_org_trafodion_sql_HTableClient
+#ifdef __cplusplus
+extern "C" {
+#endif
+#undef org_trafodion_sql_HTableClient_GET_ROW
+#define org_trafodion_sql_HTableClient_GET_ROW 1L
+#undef org_trafodion_sql_HTableClient_BATCH_GET
+#define org_trafodion_sql_HTableClient_BATCH_GET 2L
+#undef org_trafodion_sql_HTableClient_SCAN_FETCH
+#define org_trafodion_sql_HTableClient_SCAN_FETCH 3L
+/*
+ * Class: org_trafodion_sql_HTableClient
+ * Method: setResultInfo
+ * Signature: (J[I[I[I[I[I[I[J[[B[[B[III)I
+ */
+JNIEXPORT jint JNICALL Java_org_trafodion_sql_HTableClient_setResultInfo
+ (JNIEnv *, jobject, jlong, jintArray, jintArray, jintArray, jintArray, jintArray, jintArray, jlongArray, jobjectArray, jobjectArray, jintArray, jint, jint);
+
+/*
+ * Class: org_trafodion_sql_HTableClient
+ * Method: cleanup
+ * Signature: (J)V
+ */
+JNIEXPORT void JNICALL Java_org_trafodion_sql_HTableClient_cleanup
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: org_trafodion_sql_HTableClient
+ * Method: setJavaObject
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_org_trafodion_sql_HTableClient_setJavaObject
+ (JNIEnv *, jobject, jlong);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/Makerules.build
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/Makerules.build b/core/sql/nskgmake/Makerules.build
index 629cfbb..af55cbe 100755
--- a/core/sql/nskgmake/Makerules.build
+++ b/core/sql/nskgmake/Makerules.build
@@ -47,11 +47,6 @@ LSRC :=
YSRC :=
YINC :=
LEX_PREFIX := yy
-JSRC :=
-SPECIAL_JSRC :=
-JARPREFIX :=
-JARFILE :=
-JAR_APPEND :=
CFLAGS :=
CXXFLAGS :=
LDFLAGS :=
@@ -242,15 +237,7 @@ ifdef INSTALL_OBJ
$(call find_first,$(srcfile),$(SRCPATH)))))
endif
-# These are the rules dealing with Java.
-JAVA_OBJS :=
-
-ifdef JSRC
- $(eval $(call JAR_BUILD_template))
-endif
-
# These dependencies will rebuild all of the objects in a directory if the
# makefile for that directory changes.
# $(OBJ_PATHS): $(OBJDIR)/Makefile
# $(INSTALL_OBJ): $(OBJDIR)/Makefile
-# $(JAVA_OBJS): $(OBJDIR)/Makefile
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/Makerules.linux
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/Makerules.linux b/core/sql/nskgmake/Makerules.linux
index 2d07734..8bf6257 100755
--- a/core/sql/nskgmake/Makerules.linux
+++ b/core/sql/nskgmake/Makerules.linux
@@ -123,9 +123,6 @@ endif
# should be set up in other makefiles (e.g. sqlci and tdm_arkcmp makefiles).
#EARLY_DLLS :=
-ifeq ($(SQ_BUILD_TYPE),debug)
- JAVA_COMPILE_FLAGS=-g
-endif
LIBHDFS_INC=-I$(HADOOP_INC_DIR)
LIBHDFS_LIB=-ljvm -lhdfs
LIBHDFS_SO=libhdfs.so
@@ -410,11 +407,6 @@ linuxcleandebug linuxdebugclean linuxcleanrelease linuxreleaseclean : clean
echo rm -f $(NSKBIN)/$$i ;\
rm -f $(NSKBIN)/$$i ;\
done;\
- for i in $(notdir $(FINAL_JARS));\
- do \
- echo rm -f $(NSK_SQ)/export/lib/$$i ;\
- rm -f $(NSK_SQ)/export/lib/$$i ;\
- done;\
for i in $(filter $(POSSIBLE_NO_EXPORT_EXE_NAMES),\
$(notdir $(FINAL_EXES)));\
do \
@@ -452,11 +444,6 @@ linuxmklinksdebug linuxmklinksrelease: copytoolslibs
echo ln -sf $$OUTDIR/$$i $(NSKBIN);\
ln -sf $$OUTDIR/$$i $(NSKBIN);\
done;\
- for i in $(notdir $(FINAL_JARS));\
- do \
- echo ln -sf $$OUTDIR/$$i $(NSK_SQ)/export/lib/;\
- ln -sf $$OUTDIR/$$i $(NSK_SQ)/export/lib/;\
- done;\
for i in $(filter $(POSSIBLE_NO_EXPORT_EXE_NAMES),\
$(notdir $(FINAL_EXES)));\
do \
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/Makerules.mk
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/Makerules.mk b/core/sql/nskgmake/Makerules.mk
index a0e93a2..206a298 100755
--- a/core/sql/nskgmake/Makerules.mk
+++ b/core/sql/nskgmake/Makerules.mk
@@ -70,8 +70,6 @@ SHELL := sh
YACC = export BISON_PKGDATADIR=$(TOPDIR)/toolbin/bison; export M4=$(TOPDIR)/toolbin/m4; $(TOPDIR)/toolbin/bison.exe -p $(YACC_VAR_PREFIX)
LEX = $(TOPDIR)/toolbin/flex.exe -P$(YACC_VAR_PREFIX)
AWK := awk.exe
-JAVAC := $(JAVA_HOME)/bin/javac
-JAR := $(JAVA_HOME)/bin/jar
# Build everything by default
.DEFAULT_GOAL := buildall
@@ -96,9 +94,6 @@ LINK_LIB_DLL_ECHO_RULE = @echo "Creating export file and DLL .lib file $@";
LINK_DLL_ECHO_RULE = @echo "Linking DLL library $@";
BUILD_RC_ECHO_RULE = @echo "Building resource file $@";
LINK_EXE_ECHO_RULE = @echo "Linking executable $@";
-JAVAC_ECHO_RULE = @echo "Compiling $<";
-JAR_APPEND_ECHO_RULE = @echo "Appending to jar file $(JARFILE)";
-JAR_ECHO_RULE = @echo "Creating jar file $(JARFILE)";
LEX_ECHO_RULE = @echo "Generating C++ code from lex file $<";
YACC_ECHO_RULE = @echo "Generating C++ code from yacc file $<";
GENERATE_ECHO_RULE = @echo "Generating file $@";
@@ -158,7 +153,6 @@ FINAL_LIBS :=
FINAL_DLLS :=
FINAL_EXES :=
FINAL_INSTALL_OBJS :=
-FINAL_JARS :=
# These rules are used as part of a mechanism to compile the files
# located in different source locations. This template is called from
@@ -188,67 +182,6 @@ $(C_OBJ) : C_OBJ:=$(C_OBJ)
$(C_OBJ) : C_INC_OVERRIDE:=$(C_INC_OVERRIDE)
endef
-# The build_java_rule rules and the JAVA_BUILD_template are used for
-# compiling java classes into class files.
-compile_java_rule = $(JAVAC) ${JAVA_COMPILE_FLAGS} -d $(TARGOBJDIR)/java -classpath '$(CLASSPATH)' $<
-
-append_jar_rule = cp $(JAR_APPEND) $$(JARFILE);$(JAR) uvmf $$(JARMANIFEST) $$(JARFILE) -C $$(TARGOBJDIR)/java $$(PACKAGE)
-compile_jar_rule = $(JAR) cvmf $$(JARMANIFEST) $$(JARFILE)_temp -C $$(TARGOBJDIR)/java $$(PACKAGE) -C $$(TARGOBJDIR)/java $$(ORCPACKAGE); mv -f $$(JARFILE)_temp $$(JARFILE)
-
-build_java_rule = $(JAVAC_ECHO_RULE) \
- HEADING="Compiling $(<) --> $(@)"; $(starting_logfile) \
- CMD="$(compile_java_rule)"; $(capture_output)
-
-define JAVA_BUILD_template
- _dummy := $(if $(wildcard $(TARGOBJDIR)/java),,$(shell mkdir -p $(TARGOBJDIR)/java))
- ifneq (.,$(PACKAGE))
- $(TARGOBJDIR)/java/$(PACKAGE)/$(basename $(notdir $1)).class: $(1)
- $$(build_java_rule)
- else
- $(TARGOBJDIR)/java/$(basename $(notdir $1)).class: $(1)
- $$(build_java_rule)
- endif
-endef
-
-# The build_jar_rule creates the .jar file from the individual .class files.
-define JAR_BUILD_template
- ifneq (.,$(PACKAGE))
- JAVA_OBJS := $$(patsubst %.java,$$(TARGOBJDIR)/java/$$(PACKAGE)/%.class,\
- $$(JSRC) $$(SPECIAL_JSRC))
- else
- JAVA_OBJS := $$(patsubst %.java,$$(TARGOBJDIR)/java/%.class,\
- $$(JSRC) $$(SPECIAL_JSRC))
- PACKAGE := .
- endif
- JARFILE := $$(RESULTDIR)/$$(JARPREFIX).jar
- FINAL_JARS += $$(JARFILE)
-
- # Rules for building jar files
- ifneq (,$(JAR_APPEND))
- $$(JARFILE): $$(JAVA_OBJS) $(JAR_APPEND) $$(JARMANIFEST)
- $$(JAR_APPEND_ECHO_RULE) \
- HEADING="Compiling $$(<) --> $$(@)"; $$(starting_logfile) \
- CMD="$(append_jar_rule)"; $$(capture_output)
- else
- $$(JARFILE): $$(JAVA_OBJS) $$(JARMANIFEST)
- $$(JAR_ECHO_RULE) \
- HEADING="Compiling $$(<) --> $$(@)"; $$(starting_logfile) \
- CMD="$(compile_jar_rule)"; $$(capture_output)
- endif
-
- # Rules for compiling java files
- $$(foreach srcfile,$$(JSRC),$$(eval $$(call JAVA_BUILD_template,\
- $$(call find_first,$$(srcfile),$$(SRCPATH)))))
-
- # Make sure these variables are instantiated correctly.
- $$(JARFILE): TARGOBJDIR:=$$(TARGOBJDIR)
- $$(JARFILE): JARFILE:=$$(JARFILE)
- $$(JARFILE): PACKAGE:=$$(PACKAGE)
- $$(JARFILE): CLASSPATH:=$$(CLASSPATH)
- $$(JARFILE): JAVA_OBJS := $(JAVA_OBJS)
- $$(JARFILE): JARMANIFEST := $(JARMANIFEST)
-endef
-
compile_c_resultobj_rule = $(CXX) $(DEBUG_FLAGS) $(SQLCLIOPT) $(ALL_INCLUDES) -o $@ -c $<
build_c_resultobj_rule = $(COMPILE_ECHO_RULE) \
@@ -335,16 +268,16 @@ endif
.PHONY: $(MAKECMDGOALS)
-# DLL's should be build before executables, so we are adding this dependency here.
+# DLLs should be built before executables, so we are adding this dependency here.
# $(FINAL_EXES): $(FINAL_DLLS)
-# Some (soon maybe all) Java files get built through Maven
+# Java files get built through Maven
mavenbuild:
set -o pipefail && cd ..; $(MAVEN) -f pom.xml package -DskipTests | tee maven_build.log | grep -e '\[INFO\] Building' -e '\[INFO\] BUILD SUCCESS' -e 'ERROR'
cp -pf ../target/*.jar $(MY_SQROOT)/export/lib
# This is where the top-level is declared to build everything.
-buildall: $(FINAL_LIBS) $(FINAL_DLLS) $(FINAL_INSTALL_OBJS) $(FINAL_JARS) $(FINAL_EXES) mavenbuild
+buildall: $(FINAL_LIBS) $(FINAL_DLLS) $(FINAL_INSTALL_OBJS) $(FINAL_EXES) mavenbuild
clean:
@echo "Removing intermediate objects for $(TARGTYPE)/$(ARCHBITS)/$(FLAVOR)"
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/executor/Makefile
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/executor/Makefile b/core/sql/nskgmake/executor/Makefile
index d624b11..dd23a6e 100755
--- a/core/sql/nskgmake/executor/Makefile
+++ b/core/sql/nskgmake/executor/Makefile
@@ -139,57 +139,3 @@ EXTERN_LIBS := $(SP_EXPORT_LIB)/libwrappersq.so
endif
SYS_LIBS := -lrt -lpthread
SRCPATH := bin executor runtimestats porting_layer qmscommon
-
-### Java stuff - for building trafodion-HBaseAccess
-################################################
-
-PACKAGE := org/trafodion/sql/HBaseAccess
-ORCPACKAGE := org/apache/hadoop/hive/ql/io/orc
-JARPREFIX := trafodion-HBaseAccess-$(TRAFODION_VER)
-
-JSRC := RowToInsert.java \
- RowsToInsert.java \
- ResultIterator.java \
- StringArrayList.java \
- ByteArrayList.java \
- ResultKeyValueList.java \
- SequenceFileWriter.java \
- SequenceFileReader.java \
- HTableClient.java \
- HBaseClient.java \
- HiveClient.java \
- HBulkLoadClient.java \
- OrcFileReader.java
-
-# Set up explicit dependencies (necessary for parallel builds... and for correctness).
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowsToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultIterator.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ByteArrayList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultKeyValueList.class
-
-$(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowsToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class
-
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/RowsToInsert.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultIterator.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/StringArrayList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ByteArrayList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/ResultKeyValueList.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/HTableClient.class
-$(TARGOBJDIR)/java/$(PACKAGE)/HBaseClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/HBulkLoadClient.class
-
-
-$(TARGOBJDIR)/java/$(PACKAGE)/HiveClient.class : $(TARGOBJDIR)/java/$(PACKAGE)/StringArrayList.class
-
- JARMANIFEST := trafodion-HBaseAccess.jar.mf
-
-JSRC_DIR := $(MY_SQROOT)/../sql/executor
-CLASSPATH := $(TARGOBJDIR)/java:$(JSRC_DIR):$(ENV_CLASSPATH)
-
-
-$(JARMANIFEST) : $(MY_SQROOT)/export/include/SCMBuildMan.mf $(TOPDIR)/executor/trafodion-HBaseAccess.jar.version
- cat $(TOPDIR)/executor/trafodion-HBaseAccess.jar.version >$@
- cat $(MY_SQROOT)/export/include/SCMBuildMan.mf >>$@
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/nskgmake/ustat/Makefile
----------------------------------------------------------------------
diff --git a/core/sql/nskgmake/ustat/Makefile b/core/sql/nskgmake/ustat/Makefile
index 6fe06a1..78c8192 100755
--- a/core/sql/nskgmake/ustat/Makefile
+++ b/core/sql/nskgmake/ustat/Makefile
@@ -33,21 +33,8 @@ CPPSRC := hs_cli.cpp \
hs_util.cpp \
vers_libustat.cpp
-JSRC := ChgAutoList.java \
- UstatUtil.java
-
-PACKAGE := com/hp/mx_ustat
-JARPREFIX := mx_ustat
- JARMANIFEST := $(TOPDIR)/ustat/mx_ustat.jar.mf
-CLASSPATH := $(TARGOBJDIR)/java
-
YSRC := hs_yacc.y
LSRC := hs_lex.ll
YACC_VAR_PREFIX := ystat
LEX_PREFIX := ystat
-
-# Explicit dependencies needed
-$(JARMANIFEST) : $(MY_SQROOT)/export/include/SCMBuildMan.mf $(TOPDIR)/ustat/mx_ustat.jar.version
- cat $(TOPDIR)/ustat/mx_ustat.jar.version >$@
- cat $(MY_SQROOT)/export/include/SCMBuildMan.mf >>$@
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/pom.xml
----------------------------------------------------------------------
diff --git a/core/sql/pom.xml b/core/sql/pom.xml
index 53901fd..82c1d11 100755
--- a/core/sql/pom.xml
+++ b/core/sql/pom.xml
@@ -20,13 +20,62 @@
*/
-->
<repositories>
+ <repository>
+ <id>cloudera</id>
+ <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
+ </repository>
</repositories>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <hadoop.version>2.5.0</hadoop.version>
+ <hbase.version>0.98.1-cdh5.1.0</hbase.version>
+ <hbase-trx.id>hbase-trx-cdh5_3</hbase-trx.id>
+ <hive.version>0.13.1</hive.version>
+ <thrift.version>0.9.0</thrift.version>
<java.version>1.7</java.version>
</properties>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-server</artifactId>
+ <version>${hbase.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase</groupId>
+ <artifactId>hbase-client</artifactId>
+ <version>${hbase.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hbase.client.transactional</groupId>
+ <artifactId>${hbase-trx.id}</artifactId>
+ <version>${env.TRAFODION_VER}</version>
+ <scope>system</scope>
+ <systemPath>${env.MY_SQROOT}/export/lib/${hbase-trx.id}-${env.TRAFODION_VER}.jar</systemPath>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-jdbc</artifactId>
+ <version>${hive.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-metastore</artifactId>
+ <version>${hive.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.thrift</groupId>
+ <artifactId>libthrift</artifactId>
+ <version>${thrift.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>2.5.0</version>
+ </dependency>
+ </dependencies>
+
<groupId>org.trafodion.sql</groupId>
<modelVersion>4.0.0</modelVersion>
<artifactId>trafodion-sql</artifactId>
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/qmscommon/QRLogger.cpp
----------------------------------------------------------------------
diff --git a/core/sql/qmscommon/QRLogger.cpp b/core/sql/qmscommon/QRLogger.cpp
index bd775e8..364c0d2 100644
--- a/core/sql/qmscommon/QRLogger.cpp
+++ b/core/sql/qmscommon/QRLogger.cpp
@@ -61,14 +61,14 @@ std::string CAT_SQL_HBASE = "SQL.HBase";
// these categories are currently not used
std::string CAT_SQL_QMP = "SQL.Qmp";
std::string CAT_SQL_QMM = "SQL.Qmm";
-std::string CAT_SQL_COMP_QR_DESC_GEN = "SQL.Comp.DescGen";
-std::string CAT_SQL_COMP_QR_HANDLER = "SQL.Comp.QRHandler";
-std::string CAT_SQL_COMP_QR_COMMON = "SQL.COMP.QRCommon";
-std::string CAT_SQL_COMP_QR_IPC = "SQL.COMP.QRCommon.IPC";
-std::string CAT_SQL_COMP_MV_REFRESH = "SQL.COMP.MV.REFRESH";
-std::string CAT_SQL_COMP_MVCAND = "SQL.Comp.MVCandidates";
-std::string CAT_SQL_MEMORY = "SQL.Memory";
-std::string CAT_SQL_COMP_RANGE = "SQL.COMP.Range";
+std::string CAT_SQL_COMP_QR_DESC_GEN = "SQL.COMP"; // ".DescGen";
+std::string CAT_SQL_COMP_QR_HANDLER = "SQL.COMP"; // ".QRHandler";
+std::string CAT_SQL_COMP_QR_COMMON = "SQL.COMP"; // ".QRCommon";
+std::string CAT_SQL_COMP_QR_IPC = "SQL.COMP"; // ".QRCommon.IPC";
+std::string CAT_SQL_COMP_MV_REFRESH = "SQL.COMP"; // ".MV.REFRESH";
+std::string CAT_SQL_COMP_MVCAND = "SQL.COMP"; // ".MVCandidates";
+std::string CAT_SQL_MEMORY = "SQL.COMP"; // ".Memory";
+std::string CAT_SQL_COMP_RANGE = "SQL.COMP"; // ".Range";
std::string CAT_QR_TRACER = "QRCommon.Tracer";
std::string CAT_SQL_QMS = "SQL.Qms";
std::string CAT_SQL_QMS_MAIN = "SQL.Qms.Main";
@@ -78,7 +78,7 @@ std::string CAT_SQL_MVMEMO_STATS = "SQL.Qms.MvmemoStats";
std::string CAT_SQL_QMS_GRP_LATTCE_INDX = "SQL.Qms.LatticeIndex";
std::string CAT_SQL_QMS_MATCHTST_MVDETAILS = "SQL.Qms.MatchTest";
std::string CAT_SQL_QMS_XML = "SQL.Qms.XML";
-std::string CAT_SQL_COMP_XML = "SQL.Comp.XML";
+std::string CAT_SQL_COMP_XML = "SQL.COMP"; // ".XML";
// **************************************************************************
// **************************************************************************
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/a44823fe/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java b/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java
new file mode 100644
index 0000000..46b81fe
--- /dev/null
+++ b/core/sql/src/main/java/org/trafodion/sql/ByteArrayList.java
@@ -0,0 +1,54 @@
+// @@@ START COPYRIGHT @@@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// @@@ END COPYRIGHT @@@
+
+package org.trafodion.sql;
+
+import java.util.ArrayList;
+
+public class ByteArrayList extends ArrayList<byte[]> {
+
+ private static final long serialVersionUID = -3557219337406352735L;
+
+ void addElement(byte[] ba) {
+ add(ba);
+ }
+
+ byte[] getElement(int i) {
+ if (size() == 0)
+ return null;
+ else if (i < size())
+ return get(i);
+ else
+ return null;
+ }
+
+ int getSize() {
+ return size();
+ }
+
+ int getEntrySize(int i) {
+ return get(i).length;
+ }
+
+ byte[] getEntry(int i) {
+ return get(i);
+ }
+}