You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by se...@apache.org on 2018/08/23 22:24:42 UTC
[1/4] trafodion git commit: [TRAFODION-3110] Refactor LOB access to
use the new implementation of HdfsClient
Repository: trafodion
Updated Branches:
refs/heads/master 31cab907a -> 08e0ab09e
[TRAFODION-3110] Refactor LOB access to use the new implementation of HdfsClient
LOB: Extract lobtofile() to a hdfs file returns 8442 error
Implemented the missing code to support this functionality via the new
implementation.
Project: http://git-wip-us.apache.org/repos/asf/trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/trafodion/commit/ba00576e
Tree: http://git-wip-us.apache.org/repos/asf/trafodion/tree/ba00576e
Diff: http://git-wip-us.apache.org/repos/asf/trafodion/diff/ba00576e
Branch: refs/heads/master
Commit: ba00576e1f47a9f0e0b3f344da8742aeecbc3ce4
Parents: 5e8bfc7
Author: selvaganesang <se...@esgyn.com>
Authored: Tue Aug 14 03:13:32 2018 +0000
Committer: selvaganesang <se...@esgyn.com>
Committed: Tue Aug 14 18:17:01 2018 +0000
----------------------------------------------------------------------
core/sql/executor/ExHbaseAccess.cpp | 2 +-
core/sql/executor/ExHdfsScan.cpp | 2 +-
core/sql/executor/HdfsClient_JNI.cpp | 56 ++++++++++++++++-
core/sql/executor/HdfsClient_JNI.h | 6 +-
core/sql/exp/ExpLOBaccess.cpp | 53 ++++++++++++++--
core/sql/optimizer/HDFSHook.cpp | 64 +++++++++-----------
core/sql/regress/executor/EXPECTED130 | 43 ++++++++-----
core/sql/regress/executor/TEST130 | 9 ++-
.../main/java/org/trafodion/sql/HDFSClient.java | 41 +++++++++++--
9 files changed, 205 insertions(+), 71 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/executor/ExHbaseAccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExHbaseAccess.cpp b/core/sql/executor/ExHbaseAccess.cpp
index 13146a6..461c5dc 100644
--- a/core/sql/executor/ExHbaseAccess.cpp
+++ b/core/sql/executor/ExHbaseAccess.cpp
@@ -3266,7 +3266,7 @@ void ExHbaseAccessTcb::handleException(NAHeap *heap,
if (!loggingFileCreated_) {
logFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetcode);
if (hdfsClientRetcode == HDFS_CLIENT_OK)
- hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, TRUE, FALSE);
+ hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, TRUE, FALSE, FALSE);
if (hdfsClientRetcode == HDFS_CLIENT_OK)
loggingFileCreated_ = TRUE;
else
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/executor/ExHdfsScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExHdfsScan.cpp b/core/sql/executor/ExHdfsScan.cpp
index d4cf717..c49a6a0 100644
--- a/core/sql/executor/ExHdfsScan.cpp
+++ b/core/sql/executor/ExHdfsScan.cpp
@@ -2167,7 +2167,7 @@ void ExHdfsScanTcb::handleException(NAHeap *heap,
if (!loggingFileCreated_) {
logFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetcode);
if (hdfsClientRetcode == HDFS_CLIENT_OK)
- hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, TRUE, FALSE);
+ hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, TRUE, FALSE, FALSE);
if (hdfsClientRetcode == HDFS_CLIENT_OK)
loggingFileCreated_ = TRUE;
else
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/executor/HdfsClient_JNI.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/HdfsClient_JNI.cpp b/core/sql/executor/HdfsClient_JNI.cpp
index 5ae2805..51bc30e 100644
--- a/core/sql/executor/HdfsClient_JNI.cpp
+++ b/core/sql/executor/HdfsClient_JNI.cpp
@@ -349,6 +349,8 @@ static const char* const hdfsClientErrorEnumStr[] =
,"Java exception in HdfsClient::hdfsExists()."
,"JNI NewStringUTF() in HdfsClient::hdfsDeletePath()."
,"Java exception in HdfsClient::hdfsDeletePath()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsDeleteFiles()."
+ ,"Java exception in HdfsClient::hdfsDeleteFiles()."
,"Error in HdfsClient::setHdfsFileInfo()."
,"Error in HdfsClient::hdfsListDirectory()."
,"Java exception in HdfsClient::hdfsListDirectory()."
@@ -453,7 +455,7 @@ HDFS_Client_RetCode HdfsClient::init()
JavaMethods_[JM_CTOR ].jm_name = "<init>";
JavaMethods_[JM_CTOR ].jm_signature = "()V";
JavaMethods_[JM_HDFS_CREATE ].jm_name = "hdfsCreate";
- JavaMethods_[JM_HDFS_CREATE ].jm_signature = "(Ljava/lang/String;ZZ)Z";
+ JavaMethods_[JM_HDFS_CREATE ].jm_signature = "(Ljava/lang/String;ZZZ)Z";
JavaMethods_[JM_HDFS_OPEN ].jm_name = "hdfsOpen";
JavaMethods_[JM_HDFS_OPEN ].jm_signature = "(Ljava/lang/String;Z)Z";
JavaMethods_[JM_HDFS_WRITE ].jm_name = "hdfsWrite";
@@ -472,6 +474,8 @@ HDFS_Client_RetCode HdfsClient::init()
JavaMethods_[JM_HDFS_EXISTS].jm_signature = "(Ljava/lang/String;)Z";
JavaMethods_[JM_HDFS_DELETE_PATH].jm_name = "hdfsDeletePath";
JavaMethods_[JM_HDFS_DELETE_PATH].jm_signature = "(Ljava/lang/String;)Z";
+ JavaMethods_[JM_HDFS_DELETE_FILES].jm_name = "hdfsDeleteFiles";
+ JavaMethods_[JM_HDFS_DELETE_FILES].jm_signature = "(Ljava/lang/String;Ljava/lang/String;)Z";
JavaMethods_[JM_HDFS_LIST_DIRECTORY].jm_name = "hdfsListDirectory";
JavaMethods_[JM_HDFS_LIST_DIRECTORY].jm_signature = "(Ljava/lang/String;J)I";
JavaMethods_[JM_HIVE_TBL_MAX_MODIFICATION_TS].jm_name = "getHiveTableMaxModificationTs";
@@ -514,7 +518,7 @@ void HdfsClient::setPath(const char *path)
strcpy(path_, path);
}
-HDFS_Client_RetCode HdfsClient::hdfsCreate(const char* path, NABoolean overwrite, NABoolean compress)
+HDFS_Client_RetCode HdfsClient::hdfsCreate(const char* path, NABoolean overwrite, NABoolean append, NABoolean compress)
{
QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsCreate(%s) called.", path);
@@ -530,12 +534,13 @@ HDFS_Client_RetCode HdfsClient::hdfsCreate(const char* path, NABoolean overwrite
jboolean j_compress = compress;
jboolean j_overwrite = overwrite;
+ jboolean j_append = append;
if (hdfsStats_ != NULL)
hdfsStats_->getHdfsTimer().start();
tsRecentJMFromJNI = JavaMethods_[JM_HDFS_CREATE].jm_full_name;
- jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_CREATE].methodID, js_path, j_overwrite, j_compress);
+ jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_CREATE].methodID, js_path, j_overwrite, j_append, j_compress);
if (hdfsStats_ != NULL) {
hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
hdfsStats_->incHdfsCalls();
@@ -964,6 +969,51 @@ HDFS_Client_RetCode HdfsClient::hdfsDeletePath( const NAString& delPath)
return HDFS_CLIENT_OK;
}
+HDFS_Client_RetCode HdfsClient::hdfsDeleteFiles(const NAString& dirPath, const char *startingFileName)
+{
+ QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsDeleteFiles(%s, %s) called.",
+ dirPath.data(), startingFileName);
+ if (initJNIEnv() != JOI_OK)
+ return HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM;
+ if (getInstance() == NULL)
+ return HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM;
+
+ jstring js_dirPath = jenv_->NewStringUTF(dirPath.data());
+ if (js_dirPath == NULL) {
+ GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM));
+ jenv_->PopLocalFrame(NULL);
+ return HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM;
+ }
+
+ jstring js_startingFileName = jenv_->NewStringUTF(startingFileName);
+ if (js_startingFileName == NULL) {
+ GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM));
+ jenv_->PopLocalFrame(NULL);
+ return HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM;
+ }
+
+ tsRecentJMFromJNI = JavaMethods_[JM_HDFS_DELETE_FILES].jm_full_name;
+ jboolean jresult = jenv_->CallStaticBooleanMethod(javaClass_, JavaMethods_[JM_HDFS_DELETE_FILES].methodID,
+ js_dirPath, js_startingFileName);
+
+ if (jenv_->ExceptionCheck())
+ {
+ getExceptionDetails(__FILE__, __LINE__, "HdfsClient::hdfsDeleteFiles()");
+ jenv_->PopLocalFrame(NULL);
+ return HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_EXCEPTION;
+ }
+
+ if (jresult == false)
+ {
+ logError(CAT_SQL_HDFS, "HdfsClient::hdfsDeleteFiles()", getLastError());
+ jenv_->PopLocalFrame(NULL);
+ return HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_EXCEPTION;
+ }
+
+ jenv_->PopLocalFrame(NULL);
+ return HDFS_CLIENT_OK;
+}
+
HDFS_Client_RetCode HdfsClient::hdfsListDirectory(const char *pathStr, HDFS_FileInfo **hdfsFileInfo, int *numFiles)
{
QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsListDirectory(%s) called.", pathStr);
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/executor/HdfsClient_JNI.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/HdfsClient_JNI.h b/core/sql/executor/HdfsClient_JNI.h
index c3e6518..0791b25 100644
--- a/core/sql/executor/HdfsClient_JNI.h
+++ b/core/sql/executor/HdfsClient_JNI.h
@@ -142,6 +142,8 @@ typedef enum {
,HDFS_CLIENT_ERROR_HDFS_EXISTS_FILE_EXISTS
,HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_PARAM
,HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_EXCEPTION
+ ,HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_PARAM
+ ,HDFS_CLIENT_ERROR_HDFS_DELETE_FILES_EXCEPTION
,HDFS_CLIENT_ERROR_SET_HDFSFILEINFO
,HDFS_CLIENT_ERROR_HDFS_LIST_DIR_PARAM
,HDFS_CLIENT_ERROR_HDFS_LIST_DIR_EXCEPTION
@@ -184,7 +186,7 @@ public:
void setHdfsStats(ExHdfsScanStats *hdfsStats)
{ hdfsStats_ = hdfsStats; }
HDFS_Client_RetCode init();
- HDFS_Client_RetCode hdfsCreate(const char* path, NABoolean overwrite, NABoolean compress);
+ HDFS_Client_RetCode hdfsCreate(const char* path, NABoolean overwrite, NABoolean append, NABoolean compress);
HDFS_Client_RetCode hdfsOpen(const char* path, NABoolean compress);
Int64 hdfsSize(HDFS_Client_RetCode &hdfsClientRetcode);
Int32 hdfsWrite(const char* data, Int64 size, HDFS_Client_RetCode &hdfsClientRetcode, int maxChunkSize = 0);
@@ -200,6 +202,7 @@ public:
static HDFS_Client_RetCode hdfsCleanUnloadPath(const NAString& uldPath );
static HDFS_Client_RetCode hdfsExists(const NAString& uldPath, NABoolean & exists );
static HDFS_Client_RetCode hdfsDeletePath(const NAString& delPath);
+ static HDFS_Client_RetCode hdfsDeleteFiles(const NAString& dirPath, const char *startingFileName);
static HDFS_Client_RetCode getHiveTableMaxModificationTs(Int64& maxModificationTs, const char * tableDirPaths, int levelDeep);
// Get the hdfs URL.
// buffer is the buffer pre-allocated to hold the result
@@ -221,6 +224,7 @@ private:
JM_HDFS_CLEAN_UNLOAD_PATH,
JM_HDFS_EXISTS,
JM_HDFS_DELETE_PATH,
+ JM_HDFS_DELETE_FILES,
JM_HDFS_LIST_DIRECTORY,
JM_HIVE_TBL_MAX_MODIFICATION_TS,
JM_GET_FS_DEFAULT_NAME,
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/exp/ExpLOBaccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index bb9b580..e3c086a 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -183,7 +183,7 @@ Ex_Lob_Error ExLob::initialize(const char *lobFile, Ex_Lob_Mode mode,
if (! useLibHdfs_) {
if (mode == EX_LOB_CREATE) {
- hdfsClientRetcode = hdfsClient_->hdfsCreate(lobDataFile_.data(), FALSE, FALSE);
+ hdfsClientRetcode = hdfsClient_->hdfsCreate(lobDataFile_.data(), FALSE, TRUE, FALSE);
if (hdfsClientRetcode != HDFS_CLIENT_OK)
return LOB_DATA_FILE_CREATE_ERROR;
}
@@ -1783,7 +1783,7 @@ Ex_Lob_Error ExLob::allocateDesc(ULng32 size, Int64 &descNum, Int64 &dataOffset,
if (! useLibHdfs_) {
if (size == 0) {
// Delete and Create the Hdfs file by passing overwrite to TRUE
- hdfsClientRetcode = hdfsClient_->hdfsCreate(lobDataFile_.data(), TRUE, FALSE);
+ hdfsClientRetcode = hdfsClient_->hdfsCreate(lobDataFile_.data(), TRUE, FALSE, FALSE);
if (hdfsClientRetcode != HDFS_CLIENT_OK)
return LOB_DATA_FILE_WRITE_ERROR;
else {
@@ -1955,7 +1955,7 @@ Ex_Lob_Error ExLob::compactLobDataFile(ExLobInMemoryDescChunksEntry *dcArray,Int
return LOB_DATA_FILE_OPEN_ERROR;
}
- hdfsClientRetcode = dstHdfsClient->hdfsCreate(tmpLobDataFile, TRUE, FALSE);
+ hdfsClientRetcode = dstHdfsClient->hdfsCreate(tmpLobDataFile, TRUE, FALSE, FALSE);
if (hdfsClientRetcode != HDFS_CLIENT_OK)
{
// extract substring small enough to fit in logBuf
@@ -2455,16 +2455,59 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
Int64 srcLen = size;
Int64 srcOffset = offset;
Int64 tgtOffset = 0;
- char *lobData = 0;
+ char *lobData = NULL;
Int64 chunkSize = 0;
hdfsFile fdTgtFile;
char logBuf[4096];
+ writeOperLen = 0;
+ HdfsClient *tgtHdfsClient;
+ HDFS_Client_RetCode hdfsClientRetcode;
+ NABoolean overwrite = TRUE;
+ NABoolean append = FALSE;
+ Int64 remainLen = size;
+ Int64 pos = offset;
+ Int64 readLen;
lobDebugInfo("In ExLob::readDataToHdfsFile",0,__LINE__,lobTrace_);
// open and write to the target file
int openFlags = O_WRONLY;
+ if (! useLibHdfs_) {
+ if (((LobTgtFileFlags)fileflags == Lob_Error_Or_Create) ||
+ ((LobTgtFileFlags)fileflags == Lob_Truncate_Or_Error))
+ overwrite = FALSE;
+ if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error)
+ append = TRUE;
+ tgtHdfsClient = HdfsClient::newInstance(getLobGlobalHeap(), NULL, hdfsClientRetcode);
+ ex_assert(hdfsClientRetcode == HDFS_CLIENT_OK, "Internal error: HdfsClient::newInstance returned an error");
+ if (tgtHdfsClient->hdfsCreate(tgtFileName, overwrite, append, FALSE) != HDFS_CLIENT_OK)
+ return LOB_TARGET_FILE_OPEN_ERROR;
+ Int32 bytesRead;
+ Int32 bytesWritten;
+ while (remainLen > 0)
+ {
+ if (remainLen > lobMaxChunkMemLen)
+ readLen = lobMaxChunkMemLen;
+ else
+ readLen = remainLen;
+ if (lobData == NULL)
+ lobData = new (lobGlobalHeap_) char[readLen];
+ bytesRead = hdfsClient_->hdfsRead(pos, lobData, readLen, hdfsClientRetcode);
+ if (hdfsClientRetcode == HDFS_CLIENT_OK)
+ bytesWritten = tgtHdfsClient->hdfsWrite(lobData, bytesRead, hdfsClientRetcode, lobMaxChunkMemLen);
+ if (hdfsClientRetcode == HDFS_CLIENT_OK) {
+ pos += bytesRead;
+ remainLen -= bytesRead;
+ writeOperLen += bytesWritten;
+ } else {
+ NADELETEBASIC(lobData, lobGlobalHeap_);
+ HdfsClient::deleteInstance(tgtHdfsClient);
+ return LOB_DATA_READ_ERROR;
+ }
+ }
+ HdfsClient::deleteInstance(tgtHdfsClient);
+ return LOB_OPER_OK;
+ }
if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error )
openFlags |= O_APPEND;
-
//hdfsFile fdTgtFile = hdfsOpenFile(fs_,tgtFileName, openFlags, 0,0,0);
if (hdfsExists(fs_,tgtFileName) == 0)
{
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/optimizer/HDFSHook.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.cpp b/core/sql/optimizer/HDFSHook.cpp
index 4082679..ed9b2fd 100644
--- a/core/sql/optimizer/HDFSHook.cpp
+++ b/core/sql/optimizer/HDFSHook.cpp
@@ -316,7 +316,6 @@ void HHDFSFileStats::populate(hdfsFS fs, hdfsFileInfo *fileInfo,
// instances have and we have exhausted all data content in the block.
// We will keep reading if the current block does not contain
// any instance of the record separator.
- //
hdfsFile file =
hdfsOpenFile(fs, fileInfo->mName,
O_RDONLY,
@@ -324,7 +323,6 @@ void HHDFSFileStats::populate(hdfsFS fs, hdfsFileInfo *fileInfo,
0, // replication, take the default size
fileInfo->mBlockSize // blocksize
);
-
if ( file != NULL ) {
tOffset offset = 0;
tSize bufLen = sampleBufferSize;
@@ -332,9 +330,9 @@ void HHDFSFileStats::populate(hdfsFS fs, hdfsFileInfo *fileInfo,
buffer[bufLen] = 0; // extra null at the end to protect strchr()
// to run over the buffer.
-
+
NABoolean sampleDone = FALSE;
-
+
Int32 totalSamples = 10;
Int32 totalLen = 0;
Int32 recordPrefixLen = 0;
@@ -342,13 +340,12 @@ void HHDFSFileStats::populate(hdfsFS fs, hdfsFileInfo *fileInfo,
while (!sampleDone) {
tSize szRead = hdfsPread(fs, file, offset, buffer, bufLen);
-
if ( szRead <= 0 )
break;
CMPASSERT(szRead <= bufLen);
-
- char* pos = NULL;
+
+ char* pos = NULL;
//if (isSequenceFile && offset==0 && memcmp(buffer, "SEQ6", 4) == 0)
// isSequenceFile_ = TRUE;
@@ -358,47 +355,41 @@ void HHDFSFileStats::populate(hdfsFS fs, hdfsFileInfo *fileInfo,
for (Int32 i=0; i<totalSamples; i++ ) {
- if ( (pos=strchr(start, recordTerminator)) ) {
+ if ( (pos=strchr(start, recordTerminator)) ) {
- totalLen += pos - start + 1 + recordPrefixLen;
- samples++;
+ totalLen += pos - start + 1 + recordPrefixLen;
+ samples++;
- start = pos+1;
+ start = pos+1;
- if ( start > buffer + szRead ) {
- sampleDone = TRUE;
- break;
- }
+ if ( start > buffer + szRead ) {
+ sampleDone = TRUE;
+ break;
+ }
- recordPrefixLen = 0;
+ recordPrefixLen = 0;
- } else {
- recordPrefixLen += szRead - (start - buffer + 1);
- break;
- }
- }
-
-
- if ( samples > 0 )
+ } else {
+ recordPrefixLen += szRead - (start - buffer + 1);
+ break;
+ }
+ }
+ if ( samples > 0 )
break;
- else
+ else
offset += szRead;
- }
-
- NADELETEBASIC(buffer, heap_);
-
- if ( samples > 0 ) {
- sampledBytes_ += totalLen;
- sampledRows_ += samples;
- }
-
- hdfsCloseFile(fs, file);
+ }
+ NADELETEBASIC(buffer, heap_);
+ if ( samples > 0 ) {
+ sampledBytes_ += totalLen;
+ sampledRows_ += samples;
+ }
+ hdfsCloseFile(fs, file);
} else {
diags.recordError(NAString("Unable to open HDFS file ") + fileInfo->mName,
"HHDFSFileStats::populate");
}
}
-
if (blockSize_)
{
numBlocks_ = totalSize_ / blockSize_;
@@ -410,7 +401,6 @@ void HHDFSFileStats::populate(hdfsFS fs, hdfsFileInfo *fileInfo,
diags.recordError(NAString("Could not determine block size of HDFS file ") + fileInfo->mName,
"HHDFSFileStats::populate");
}
-
if ( totalSize_ > 0 && diags.isSuccess())
{
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/regress/executor/EXPECTED130
----------------------------------------------------------------------
diff --git a/core/sql/regress/executor/EXPECTED130 b/core/sql/regress/executor/EXPECTED130
index 7ad8bbf..506dda2 100644
--- a/core/sql/regress/executor/EXPECTED130
+++ b/core/sql/regress/executor/EXPECTED130
@@ -63,9 +63,9 @@ C1
C1 C2
----------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
----------------------------------------
- 1 LOBH0000000200010492540128525785049519492540128543818753118212384275392041945020"TRAFODION"."LOB130"
- 2 LOBH0000000200010492540128525785049519492540128544675708418212384275402066673020"TRAFODION"."LOB130"
- 3 LOBH0000000200010492540128525785049519492540128545449436418212384275409949492020"TRAFODION"."LOB130"
+ 1 LOBH0000000200010189730973312766405019189730973325743020118212400522666936189020"TRAFODION"."LOB130"
+ 2 LOBH0000000200010189730973312766405019189730973325822915418212400522668231546020"TRAFODION"."LOB130"
+ 3 LOBH0000000200010189730973312766405019189730973325854670118212400522668551834020"TRAFODION"."LOB130"
--- 3 row(s) selected.
>>
@@ -668,7 +668,7 @@ And the dish ran away with the fork !
>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'tlob130_txt1.txt');/g" >> t130_extract_command;
>>
>>obey t130_extract_command;
->>extract lobtofile(LOB 'LOBH0000000200010492540128525796887519492540128660413814518212384276559256768020"TRAFODION"."LOB130"
' , 'tlob130_txt1.txt');
+>>extract lobtofile(LOB 'LOBH0000000200010189730973312770821219189730973368818826218212400523097801680020"TRAFODION"."LOB130"
' , 'tlob130_txt1.txt');
Success. Targetfile :tlob130_txt1.txt Length : 19
--- SQL operation complete.
@@ -684,7 +684,7 @@ Success. Targetfile :tlob130_txt1.txt Length : 19
>>sh rm t130_extract_command;
>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'tlob130_deep.jpg');/g" >> t130_extract_command;
>>obey t130_extract_command;
->>extract lobtofile(LOB 'LOBH0000000200010492540128525797109219492540128672143163118212384276675380782020"TRAFODION"."LOB130"
' , 'tlob130_deep.jpg');
+>>extract lobtofile(LOB 'LOBH0000000200010189730973312770991919189730973372704796118212400523136699585020"TRAFODION"."LOB130"
' , 'tlob130_deep.jpg');
Success. Targetfile :tlob130_deep.jpg Length : 159018
--- SQL operation complete.
@@ -700,7 +700,7 @@ Success. Targetfile :tlob130_deep.jpg Length : 159018
>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'tlob130_anoush.jpg');/g" >> t130_extract_command;
>>
>>obey t130_extract_command;
->>extract lobtofile(LOB 'LOBH0000000200010492540128525797109219492540128672143163118212384276675380782020"TRAFODION"."LOB130"
' , 'tlob130_anoush.jpg');
+>>extract lobtofile(LOB 'LOBH0000000200010189730973312770991919189730973372704796118212400523136699585020"TRAFODION"."LOB130"
' , 'tlob130_anoush.jpg');
Success. Targetfile :tlob130_anoush.jpg Length : 230150
--- SQL operation complete.
@@ -819,9 +819,12 @@ And the dish ran away with the fork !
>>log;
>>sh rm t130_extract_command;
>>
->>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_txt2.txt');/g" >> t130_extract_command;
->>
+>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_txt2.txt');/g" >> t130_extract_command;
>>obey t130_extract_command;
+>>extract lobtofile(LOB 'LOBH0000000200010189730973312772090419189730973379454400718212400523204167938020"TRAFODION"."LOB130"
' , 'hdfs:///user/trafodion/lobs/tlob130_txt2.txt');
+Success. Targetfile :hdfs:///user/trafodion/lobs/tlob130_txt2.txt Length : 19
+
+--- SQL operation complete.
>>
>>--binary input/update
>>
@@ -832,8 +835,12 @@ And the dish ran away with the fork !
>>
>>log;
>>sh rm t130_extract_command;
->>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_deep.jpg');/g" >> t130_extract_command;
+>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_deep.jpg');/g" >> t130_extract_command;
>>obey t130_extract_command;
+>>extract lobtofile(LOB 'LOBH0000000200010189730973312772260419189730973382656270118212400523236180902020"TRAFODION"."LOB130"
' , 'hdfs:///user/trafodion/lobs/tlob130_deep.jpg');
+Success. Targetfile :hdfs:///user/trafodion/lobs/tlob130_deep.jpg Length : 159018
+
+--- SQL operation complete.
>>
>>update tlob130bin2 set c2=filetolob('anoush.jpg') ;
@@ -843,9 +850,13 @@ And the dish ran away with the fork !
>>
>>log;
>>sh rm t130_extract_command;
->>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_anoush.jpg');/g" >> t130_extract_command;
+>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_anoush.jpg');/g" >> t130_extract_command;
>>
>>obey t130_extract_command;
+>>extract lobtofile(LOB 'LOBH0000000200010189730973312770991919189730973372704796118212400523136699585020"TRAFODION"."LOB130"
' , 'hdfs:///user/trafodion/lobs/tlob130_anoush.jpg');
+Success. Targetfile :hdfs:///user/trafodion/lobs/tlob130_anoush.jpg Length : 230150
+
+--- SQL operation complete.
>>
>>
>>sh clitestdriver 2 < TEST130_argfile 2>&1 | tee -a LOG130;
@@ -942,8 +953,12 @@ Success. Targetfile :tlob130_deep2.jpg Length : 159018
>>
>>log;
>>sh rm t130_extract_command;
->>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_anoush2.jpg');/g" >> t130_extract_command;
+>>sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_anoush2.jpg');/g" >> t130_extract_command;
>>obey t130_extract_command;
+>>extract lobtofile(LOB 'LOBH0000000200030189730973312773623919189730973392250459518212400523332159942020"TRAFODION"."LOB130"
' , 'hdfs:///user/trafodion/lobs/tlob130_anoush2.jpg');
+Success. Targetfile :hdfs:///user/trafodion/lobs/tlob130_anoush2.jpg Length : 230150
+
+--- SQL operation complete.
>>
>>-- combination blob and clob columns
>>create table tlob130bt (c1 int not null, c2 int, c3 blob, c4 clob, primary key (c1));
@@ -1202,14 +1217,14 @@ TRAFODION
>>sh rm t130_extract_command;
>>sh grep "^LOBH" TMP130 | sed "s/^/extract name(LOB '/g" | sed "s/$/');/g" >> t130_extract_command;
>>obey t130_extract_command;
->>extract name(LOB 'LOBH0000000200020492540128525808308619492540128781847368018212384277774048076020"TRAFODION"."LOB130"
');
- LOB filename : /user/trafodion/lobs/LOBP_04925401285258083086_0002
+>>extract name(LOB 'LOBH0000000200020189730973312776402419189730973430096815918212400523710965517020"TRAFODION"."LOB130"
');
+ LOB filename : /user/trafodion/lobs/LOBP_01897309733127764024_0002
--- SQL operation complete.
>>sh rm t130_extract_command;
>>sh grep "^LOBH" TMP130 | sed "s/^/extract offset(LOB '/g" | sed "s/$/');/g" >> t130_extract_command;
>>obey t130_extract_command;
->>extract offset(LOB 'LOBH0000000200020492540128525808308619492540128781847368018212384277774048076020"TRAFODION"."LOB130"
');
+>>extract offset(LOB 'LOBH0000000200020189730973312776402419189730973430096815918212400523710965517020"TRAFODION"."LOB130"
');
LOB Offset : 43
--- SQL operation complete.
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/regress/executor/TEST130
----------------------------------------------------------------------
diff --git a/core/sql/regress/executor/TEST130 b/core/sql/regress/executor/TEST130
index 72fea33..5029c1d 100755
--- a/core/sql/regress/executor/TEST130
+++ b/core/sql/regress/executor/TEST130
@@ -400,8 +400,7 @@ log;
log LOG130;
sh rm t130_extract_command;
-sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_txt2.txt');/g" >> t130_extract_command;
-
+sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_txt2.txt');/g" >> t130_extract_command;
obey t130_extract_command;
--binary input/update
@@ -416,7 +415,7 @@ log;
log LOG130;
sh rm t130_extract_command;
-sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_deep.jpg');/g" >> t130_extract_command;
+sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_deep.jpg');/g" >> t130_extract_command;
obey t130_extract_command;
update tlob130bin2 set c2=filetolob('anoush.jpg') ;
@@ -430,7 +429,7 @@ log;
log LOG130;
sh rm t130_extract_command;
-sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_anoush.jpg');/g" >> t130_extract_command;
+sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_anoush.jpg');/g" >> t130_extract_command;
obey t130_extract_command;
@@ -481,7 +480,7 @@ log;
log LOG130;
sh rm t130_extract_command;
-sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user/trafodion/lobs\/tlob130_anoush2.jpg');/g" >> t130_extract_command;
+sh grep "^LOBH" TMP130 | sed "s/^/extract lobtofile(LOB '/g" | sed "s/$/' , 'hdfs:\/\/\/user\/trafodion\/lobs\/tlob130_anoush2.jpg');/g" >> t130_extract_command;
obey t130_extract_command;
-- combination blob and clob columns
http://git-wip-us.apache.org/repos/asf/trafodion/blob/ba00576e/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
----------------------------------------------------------------------
diff --git a/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java b/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
index 1995851..3364543 100644
--- a/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
+++ b/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
@@ -406,8 +406,9 @@ public class HDFSClient
return isEOF_;
}
- boolean hdfsCreate(String fname , boolean overwrite, boolean compress) throws IOException
+ boolean hdfsCreate(String fname , boolean overwrite, boolean append, boolean compress) throws IOException
{
+ boolean fileExists = false;
filename_ = fname;
if (logger_.isDebugEnabled())
logger_.debug("HDFSClient.hdfsCreate() - started" );
@@ -423,12 +424,16 @@ public class HDFSClient
{
if (overwrite)
fs_.delete(filepath_);
- else
+ else if (!append)
throw new IOException(filepath_ + " already exists");
+ else
+ fileExists = true;
}
FSDataOutputStream fsOut = null;
- fsOut = fs_.create(filepath_);
- fsOut.close();
+ if (!fileExists) {
+ fsOut = fs_.create(filepath_);
+ fsOut.close();
+ }
return true;
}
@@ -692,6 +697,34 @@ public class HDFSClient
return true;
}
+ public static boolean hdfsDeleteFiles(String dirPathStr, String startingFileName) throws IOException
+ {
+ if (logger_.isDebugEnabled())
+ logger_.debug("HDFSClient.hdfsDeleteFiles(" + dirPathStr + ", " + startingFileName +")");
+
+ Path dirPath = new Path(dirPathStr );
+ FileSystem fs = FileSystem.get(dirPath.toUri(), config_);
+ FileStatus[] fileStatus;
+ if (fs.isDirectory(dirPath))
+ fileStatus = fs.listStatus(dirPath);
+ else
+ throw new IOException("The path " + dirPath + " is not a directory");
+ FileStatus aFileStatus;
+ if (fileStatus != null) {
+ for (int i = 0; i < fileStatus.length; i++)
+ {
+ aFileStatus = fileStatus[i];
+ if (! aFileStatus.isDirectory()) {
+ String pathName = aFileStatus.getPath().toString();
+ String filenameParts[] = pathName.split(dirPathStr);
+ if (filenameParts.length == 2 && filenameParts[1].startsWith(startingFileName))
+ fs.delete(aFileStatus.getPath());
+ }
+ }
+ }
+ return true;
+ }
+
public int hdfsListDirectory(String pathStr, long hdfsClientJniObj) throws IOException
{
if (logger_.isDebugEnabled())
[2/4] trafodion git commit: Merge branch 'master' of
github.com:apache/trafodion into trafodion-3110
Posted by se...@apache.org.
Merge branch 'master' of github.com:apache/trafodion into trafodion-3110
Project: http://git-wip-us.apache.org/repos/asf/trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/trafodion/commit/39e8791b
Tree: http://git-wip-us.apache.org/repos/asf/trafodion/tree/39e8791b
Diff: http://git-wip-us.apache.org/repos/asf/trafodion/diff/39e8791b
Branch: refs/heads/master
Commit: 39e8791beee87ada7f672c7f234bd1da59dbbbfd
Parents: ba00576 bd3facc
Author: selvaganesang <se...@esgyn.com>
Authored: Wed Aug 22 18:25:31 2018 +0000
Committer: selvaganesang <se...@esgyn.com>
Committed: Wed Aug 22 18:25:31 2018 +0000
----------------------------------------------------------------------
core/sqf/monitor/linux/cluster.cxx | 4 +-
core/sqf/monitor/linux/redirector.cxx | 2 +-
core/sqf/src/seabed/src/sockstream.cpp | 1 +
core/sqf/src/seabed/src/stream.cpp | 2 +
core/sql/executor/ExExeUtilCli.cpp | 67 +++++++----
core/sql/executor/ex_tuple_flow.cpp | 4 +
core/sql/generator/Generator.cpp | 6 +-
core/sql/generator/Generator.h | 1 +
core/sql/optimizer/BindRI.cpp | 49 ++------
core/sql/regress/hive/DIFF006.KNOWN | 114 +++++++++++++++++++
core/sql/regress/hive/TEST006 | 6 +
core/sql/regress/tools/regress-filter-linux | 2 +-
core/sql/sqlcat/TrafDDLdesc.h | 12 +-
.../main/java/org/trafodion/sql/HDFSClient.java | 45 +++++---
.../_chapters/sql_language_elements.adoc | 109 ++++++++++++++++--
15 files changed, 332 insertions(+), 92 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/trafodion/blob/39e8791b/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
----------------------------------------------------------------------
[3/4] trafodion git commit: [TRAFODION-3110] LOB: Extract lobtofile()
to a hdfs file returns 8442 error
Posted by se...@apache.org.
[TRAFODION-3110] LOB: Extract lobtofile() to a hdfs file returns 8442 error
Fixes as per the review comments for the commit ba00576e1f47a9f0e0b3f344da8742aeecbc3ce
Also took care of truncate option.
Project: http://git-wip-us.apache.org/repos/asf/trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/trafodion/commit/82ef6c1c
Tree: http://git-wip-us.apache.org/repos/asf/trafodion/tree/82ef6c1c
Diff: http://git-wip-us.apache.org/repos/asf/trafodion/diff/82ef6c1c
Branch: refs/heads/master
Commit: 82ef6c1c9487404b5a2e95993e30ee7fefd1fcdc
Parents: 39e8791
Author: selvaganesang <se...@esgyn.com>
Authored: Thu Aug 23 17:49:03 2018 +0000
Committer: selvaganesang <se...@esgyn.com>
Committed: Thu Aug 23 17:49:03 2018 +0000
----------------------------------------------------------------------
core/sql/exp/ExpLOBaccess.cpp | 122 +++++++++++++++++++++----------------
1 file changed, 69 insertions(+), 53 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/trafodion/blob/82ef6c1c/core/sql/exp/ExpLOBaccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index e3c086a..ab592f4 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -2462,7 +2462,7 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
writeOperLen = 0;
HdfsClient *tgtHdfsClient;
HDFS_Client_RetCode hdfsClientRetcode;
- NABoolean overwrite = TRUE;
+ NABoolean overwrite = FALSE;
NABoolean append = FALSE;
Int64 remainLen = size;
Int64 pos = offset;
@@ -2471,41 +2471,17 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
// open and write to the target file
int openFlags = O_WRONLY;
if (! useLibHdfs_) {
- if (((LobTgtFileFlags)fileflags == Lob_Error_Or_Create) ||
- ((LobTgtFileFlags)fileflags == Lob_Truncate_Or_Error))
- overwrite = FALSE;
- if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error)
+ if (((LobTgtFileFlags)fileflags == Lob_Truncate_Or_Error) ||
+ ((LobTgtFileFlags)fileflags == Lob_Truncate_Or_Create))
+ overwrite = TRUE;
+ if (((LobTgtFileFlags)fileflags == Lob_Append_Or_Error) ||
+ ((LobTgtFileFlags)fileflags == Lob_Append_Or_Create))
append = TRUE;
tgtHdfsClient = HdfsClient::newInstance(getLobGlobalHeap(), NULL, hdfsClientRetcode);
ex_assert(hdfsClientRetcode == HDFS_CLIENT_OK, "Internal error: HdfsClient::newInstance returned an error");
if (tgtHdfsClient->hdfsCreate(tgtFileName, overwrite, append, FALSE) != HDFS_CLIENT_OK)
return LOB_TARGET_FILE_OPEN_ERROR;
- Int32 bytesRead;
- Int32 bytesWritten;
- while (remainLen > 0)
- {
- if (remainLen > lobMaxChunkMemLen)
- readLen = lobMaxChunkMemLen;
- else
- readLen = remainLen;
- if (lobData == NULL)
- lobData = new (lobGlobalHeap_) char[readLen];
- bytesRead = hdfsClient_->hdfsRead(pos, lobData, readLen, hdfsClientRetcode);
- if (hdfsClientRetcode == HDFS_CLIENT_OK)
- bytesWritten = tgtHdfsClient->hdfsWrite(lobData, bytesRead, hdfsClientRetcode, lobMaxChunkMemLen);
- if (hdfsClientRetcode == HDFS_CLIENT_OK) {
- pos += bytesRead;
- remainLen -= bytesRead;
- writeOperLen += bytesWritten;
- } else {
- NADELETEBASIC(lobData, lobGlobalHeap_);
- HdfsClient::deleteInstance(tgtHdfsClient);
- return LOB_DATA_READ_ERROR;
- }
- }
- HdfsClient::deleteInstance(tgtHdfsClient);
- return LOB_OPER_OK;
- }
+ } else {
if ((LobTgtFileFlags)fileflags == Lob_Append_Or_Error )
openFlags |= O_APPEND;
//hdfsFile fdTgtFile = hdfsOpenFile(fs_,tgtFileName, openFlags, 0,0,0);
@@ -2537,13 +2513,42 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
return LOB_TARGET_FILE_OPEN_ERROR;
}
}
+ }
+ if (!multipleChunks) {
+ if (! useLibHdfs_) {
+ Int32 bytesRead;
+ Int32 bytesWritten;
+ while (remainLen > 0)
+ {
+ if (remainLen > lobMaxChunkMemLen)
+ readLen = lobMaxChunkMemLen;
+ else
+ readLen = remainLen;
+ if (lobData == NULL)
+ lobData = new (lobGlobalHeap_) char[readLen];
+ bytesRead = hdfsClient_->hdfsRead(pos, lobData, readLen, hdfsClientRetcode);
+ if (hdfsClientRetcode == HDFS_CLIENT_OK)
+ bytesWritten = tgtHdfsClient->hdfsWrite(lobData, bytesRead, hdfsClientRetcode, lobMaxChunkMemLen);
+ if (hdfsClientRetcode == HDFS_CLIENT_OK) {
+ pos += bytesRead;
+ remainLen -= bytesRead;
+ writeOperLen += bytesWritten;
+ } else {
+ NADELETEBASIC(lobData, lobGlobalHeap_);
+ HdfsClient::deleteInstance(tgtHdfsClient);
+ return LOB_DATA_READ_ERROR;
+ }
+ }
+ HdfsClient::deleteInstance(tgtHdfsClient);
+ return LOB_OPER_OK;
+ } // !multipleChunk && !useLibHdfs
+ else {
+ if ((srcLen < lobMaxChunkMemLen))
+ {
+ lobDebugInfo("Reading in single chunk",0,__LINE__,lobTrace_);
+ lobData = (char *) (getLobGlobalHeap())->allocateMemory(srcLen);
- if ((srcLen < lobMaxChunkMemLen) && (multipleChunks ==FALSE)) // simple single I/O case
- {
- lobDebugInfo("Reading in single chunk",0,__LINE__,lobTrace_);
- lobData = (char *) (getLobGlobalHeap())->allocateMemory(srcLen);
-
- if (lobData == NULL)
+ if (lobData == NULL)
{
return LOB_SOURCE_DATA_ALLOC_ERROR;
}
@@ -2567,23 +2572,22 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
}
getLobGlobalHeap()->deallocateMemory(lobData);
}
- else
- {// multiple chunks to read
+ } // !multipleChunk && useLibHdfs
+ } // !multipleChunk
+ else {// multiple chunks to read
lobDebugInfo("Reading in multiple chunks into local file",0,__LINE__,lobTrace_);
err = openCursor(handleIn,
handleInLen,
transId);
if (err != LOB_OPER_OK)
return err;
+ chunkSize = MINOF(srcLen, lobMaxChunkMemLen);
+ lobData = (char *) (getLobGlobalHeap())->allocateMemory(chunkSize);
+ if (lobData == NULL)
+ return LOB_SOURCE_DATA_ALLOC_ERROR;
while ( srcLen > 0)
{
chunkSize = MINOF(srcLen, lobMaxChunkMemLen);
- lobData = (char *) (getLobGlobalHeap())->allocateMemory(chunkSize);
- if (lobData == NULL)
- {
- getLobGlobalHeap()->deallocateMemory(lobData);
- return LOB_SOURCE_DATA_ALLOC_ERROR;
- }
//handle reading the multiple chunks like a cursor
err = readCursor(lobData,chunkSize, handleIn,
handleInLen, operLen, transId);
@@ -2594,6 +2598,15 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_DATA_READ_ERROR;
}
+ if (!useLibHdfs_) {
+ writeOperLen += tgtHdfsClient->hdfsWrite(lobData, chunkSize, hdfsClientRetcode, lobMaxChunkMemLen);
+ if (hdfsClientRetcode != HDFS_CLIENT_OK) {
+ NADELETEBASIC(lobData, lobGlobalHeap_);
+ HdfsClient::deleteInstance(tgtHdfsClient);
+ return LOB_TARGET_FILE_WRITE_ERROR;
+ }
+ }
+ else {
writeOperLen += hdfsWrite(fs_,fdTgtFile,lobData, chunkSize);
if (writeOperLen <= 0)
{
@@ -2605,18 +2618,21 @@ Ex_Lob_Error ExLob::readDataToHdfsFile(char *tgtFileName, Int64 offset, Int64 s
getLobGlobalHeap()->deallocateMemory(lobData);
return LOB_DATA_FLUSH_ERROR;
}
- getLobGlobalHeap()->deallocateMemory(lobData);
+ }
srcLen -= chunkSize;
-
}
closeCursor(handleIn,
handleInLen,transId);
- }
- hdfsCloseFile(fs_, fdTgtFile);
- fdTgtFile=NULL;
- hdfsCloseFile(fs_,fdData_);
- fdData_=NULL;
-
+ }
+ getLobGlobalHeap()->deallocateMemory(lobData);
+ if (! useLibHdfs_) {
+ HdfsClient::deleteInstance(tgtHdfsClient);
+ } else {
+ hdfsCloseFile(fs_, fdTgtFile);
+ fdTgtFile=NULL;
+ hdfsCloseFile(fs_,fdData_);
+ fdData_=NULL;
+ }
return LOB_OPER_OK;
}
[4/4] trafodion git commit: Merge PR 1696 [TRAFODION-3110] Refactor
LOB access to use the new implementation of HdfsClient
Posted by se...@apache.org.
Merge PR 1696 [TRAFODION-3110] Refactor LOB access to use the new implementation of HdfsClient
Project: http://git-wip-us.apache.org/repos/asf/trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/trafodion/commit/08e0ab09
Tree: http://git-wip-us.apache.org/repos/asf/trafodion/tree/08e0ab09
Diff: http://git-wip-us.apache.org/repos/asf/trafodion/diff/08e0ab09
Branch: refs/heads/master
Commit: 08e0ab09ef1963658637b21b65638a336c2d4bdf
Parents: 31cab90 82ef6c1
Author: selvaganesang <se...@apache.org>
Authored: Thu Aug 23 22:21:53 2018 +0000
Committer: selvaganesang <se...@apache.org>
Committed: Thu Aug 23 22:21:53 2018 +0000
----------------------------------------------------------------------
core/sql/executor/ExHbaseAccess.cpp | 2 +-
core/sql/executor/ExHdfsScan.cpp | 2 +-
core/sql/executor/HdfsClient_JNI.cpp | 56 ++++++++-
core/sql/executor/HdfsClient_JNI.h | 6 +-
core/sql/exp/ExpLOBaccess.cpp | 113 ++++++++++++++-----
core/sql/optimizer/HDFSHook.cpp | 64 +++++------
core/sql/regress/executor/EXPECTED130 | 43 ++++---
core/sql/regress/executor/TEST130 | 9 +-
.../main/java/org/trafodion/sql/HDFSClient.java | 41 ++++++-
9 files changed, 243 insertions(+), 93 deletions(-)
----------------------------------------------------------------------