You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafodion.apache.org by an...@apache.org on 2016/05/31 22:53:36 UTC

[1/8] incubator-trafodion git commit: hive data modification detection: commit #1

Repository: incubator-trafodion
Updated Branches:
  refs/heads/master d19936293 -> c39d3abf6


hive data modification detection: commit #1


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/f4728220
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/f4728220
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/f4728220

Branch: refs/heads/master
Commit: f4728220c7a21f06c5c295e7a4c515a09a1a219d
Parents: c41b39b
Author: Anoop Sharma <an...@esgyn.com>
Authored: Sun May 22 04:26:45 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Sun May 22 04:26:45 2016 +0000

----------------------------------------------------------------------
 core/sql/bin/SqlciErrors.txt          |  2 +
 core/sql/cli/SessionDefaults.cpp      |  3 +
 core/sql/comexe/ComTdbHdfsScan.cpp    | 33 +++++++++--
 core/sql/comexe/ComTdbHdfsScan.h      | 26 +++++++--
 core/sql/executor/ExHdfsScan.cpp      | 76 +++++++++++++++++++++++--
 core/sql/executor/ExHdfsScan.h        | 13 +++--
 core/sql/exp/ExpLOBaccess.cpp         | 90 +++++++++++++++++++++++++++---
 core/sql/exp/ExpLOBaccess.h           | 79 +++++++++++++++-----------
 core/sql/exp/ExpLOBenums.h            |  3 +
 core/sql/exp/ExpLOBinterface.cpp      | 46 ++++++++++++++-
 core/sql/exp/ExpLOBinterface.h        | 13 +++++
 core/sql/generator/GenRelScan.cpp     | 47 +++++++++-------
 core/sql/optimizer/HDFSHook.cpp       |  5 ++
 core/sql/optimizer/HDFSHook.h         |  4 ++
 core/sql/regress/executor/EXPECTED020 |  4 ++
 core/sql/sqlcomp/DefaultConstants.h   |  4 ++
 core/sql/sqlcomp/nadefaults.cpp       |  2 +
 17 files changed, 369 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/bin/SqlciErrors.txt
----------------------------------------------------------------------
diff --git a/core/sql/bin/SqlciErrors.txt b/core/sql/bin/SqlciErrors.txt
index cf532fe..4aa8cf1 100644
--- a/core/sql/bin/SqlciErrors.txt
+++ b/core/sql/bin/SqlciErrors.txt
@@ -1550,6 +1550,8 @@ $1~String1 --------------------------------
 8432 22003 99999 BEGINNER MINOR LOGONLY A negative value cannot be converted to an unsigned numeric datatype.$0~string0
 8433 22003 99999 BEGINNER MINOR LOGONLY Invalid $0~string0 character encountered in $1~string1.
 8434 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Invalid target column for LOB function. The column needs to be blob/clob type. 
+8435 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
+8436 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Mismatch detected between compiletime and runtime hive table definitions.
 8440 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The size of the history buffer is too small to execute one or more of the OLAP Windowed Functions in the query. 
 8441 ZZZZZ 99999 BEGINNER MAJOR DBADMIN one or more of the OLAP Windowed Functions in the query may require overflow which is not supported yet.
 8442 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Unable to access $0~string0 interface. Call to $1~string1 returned error $2~string2($0~int0). Error detail $1~int1.

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/cli/SessionDefaults.cpp
----------------------------------------------------------------------
diff --git a/core/sql/cli/SessionDefaults.cpp b/core/sql/cli/SessionDefaults.cpp
index 1ce2145..372943f 100644
--- a/core/sql/cli/SessionDefaults.cpp
+++ b/core/sql/cli/SessionDefaults.cpp
@@ -740,6 +740,7 @@ static const QueryString cqdInfo[] =
   {"unique_hash_joins"}, {"OFF"}
 , {"transform_to_sidetree_insert"}, {"OFF"}
 , {"METADATA_CACHE_SIZE"}, {"0"}
+, {"QUERY_CACHE"}, {"0"}
 };
 
 static const AQRInfo::AQRErrorMap aqrErrorMap[] = 
@@ -771,6 +772,8 @@ static const AQRInfo::AQRErrorMap aqrErrorMap[] =
   // parallel purgedata failed
   AQREntry(   8022,      0,      3,    60,      0,   0, "",    0,     1),
 
+  AQREntry(   8436,      0,      1,     0,      0,   1, "04",  0,     0),
+
   // FS memory errors
   AQREntry(   8550,     30,      1,    60,      0,   0, "",    0,     0),
   AQREntry(   8550,     31,      1,    60,      0,   0, "",    0,     0),

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/comexe/ComTdbHdfsScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbHdfsScan.cpp b/core/sql/comexe/ComTdbHdfsScan.cpp
index 0f42446..a6aac21 100755
--- a/core/sql/comexe/ComTdbHdfsScan.cpp
+++ b/core/sql/comexe/ComTdbHdfsScan.cpp
@@ -65,9 +65,14 @@ ComTdbHdfsScan::ComTdbHdfsScan(
                                Cardinality estimatedRowCount,
                                Int32  numBuffers,
                                UInt32  bufferSize,
-                               char * errCountTable = NULL,
-                               char * loggingLocation = NULL,
-                               char * errCountId = NULL
+                               char * errCountTable,
+                               char * loggingLocation,
+                               char * errCountId,
+
+                               char * hdfsFilesDir,
+                               Int64  modTSforDir,
+                               Lng32  numFilesInDir
+
                                )
 : ComTdb( ComTdb::ex_HDFS_SCAN,
             eye_HDFS_SCAN,
@@ -107,7 +112,10 @@ ComTdbHdfsScan::ComTdbHdfsScan(
   flags_(0),
   errCountTable_(errCountTable),
   loggingLocation_(loggingLocation),
-  errCountRowId_(errCountId)
+  errCountRowId_(errCountId),
+  hdfsFilesDir_(hdfsFilesDir),
+  modTSforDir_(modTSforDir),
+  numFilesInDir_(numFilesInDir)
 {};
 
 ComTdbHdfsScan::~ComTdbHdfsScan()
@@ -142,6 +150,9 @@ Long ComTdbHdfsScan::pack(void * space)
   errCountTable_.pack(space);
   loggingLocation_.pack(space);
   errCountRowId_.pack(space);
+
+  hdfsFilesDir_.pack(space);
+
   return ComTdb::pack(space);
 }
 
@@ -173,6 +184,9 @@ Lng32 ComTdbHdfsScan::unpack(void * base, void * reallocator)
   if (errCountTable_.unpack(base)) return -1;
   if (loggingLocation_.unpack(base)) return -1;
   if (errCountRowId_.unpack(base)) return -1;
+
+  if (hdfsFilesDir_.unpack(base)) return -1;
+
   return ComTdb::unpack(base, reallocator);
 }
 
@@ -419,6 +433,17 @@ void ComTdbHdfsScan::displayContents(Space * space,ULng32 flag)
                                                    sizeof(short));
             }
         }
+
+      if (hdfsFilesDir_)
+        {
+          str_sprintf(buf, "hdfsDir: %s", hdfsFilesDir_);
+          space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short));
+
+          str_sprintf(buf, "modTSforDir_ = %Ld, numFilesInDir_ = %d",
+                      modTSforDir_, numFilesInDir_);
+          space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short));
+        }
+
     }
 
   if(flag & 0x00000001)

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/comexe/ComTdbHdfsScan.h
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbHdfsScan.h b/core/sql/comexe/ComTdbHdfsScan.h
index 0842c19..0b17947 100755
--- a/core/sql/comexe/ComTdbHdfsScan.h
+++ b/core/sql/comexe/ComTdbHdfsScan.h
@@ -131,8 +131,17 @@ class ComTdbHdfsScan : public ComTdb
   NABasicPtr loggingLocation_;                                // 168 - 175
   NABasicPtr errCountRowId_;                                  // 176 - 183
   UInt32  hiveScanMode_;                                      // 184 - 187
-  char fillersComTdbHdfsScan1_[12];                           // 188 - 199
 
+  char fillersComTdbHdfsScan1_[4];                           // 188 - 191
+
+  // next 3 params used to check if data under hdfsFileDir
+  // was modified after query was compiled.
+  NABasicPtr hdfsFilesDir_;                                    // 192 - 199
+  Int64  modTSforDir_;                                         // 200 - 207
+  Lng32  numFilesInDir_;                                       // 208 - 211
+
+  char fillersComTdbHdfsScan2_[12];                           // 212 - 223
+    
 public:
   enum HDFSFileType
   {
@@ -178,10 +187,17 @@ public:
 		 queue_index up,
 		 Cardinality estimatedRowCount,
                  Int32  numBuffers,
-                 UInt32  bufferSize
-                 , char * errCountTable
-                 , char * loggingLocation
-                 , char * errCountId
+                 UInt32  bufferSize,
+
+                 char * errCountTable = NULL,
+                 char * loggingLocation = NULL,
+                 char * errCountId = NULL,
+
+                 // next 3 params used to check if data under hdfsFileDir
+                 // was modified after query was compiled.
+                 char * hdfsFilesDir  = NULL,
+                 Int64  modTSforDir   = -1,
+                 Lng32  numFilesInDir = -1
                  );
 
   ~ComTdbHdfsScan();

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/executor/ExHdfsScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExHdfsScan.cpp b/core/sql/executor/ExHdfsScan.cpp
index 1278c3a..dbb5e7c 100644
--- a/core/sql/executor/ExHdfsScan.cpp
+++ b/core/sql/executor/ExHdfsScan.cpp
@@ -115,6 +115,7 @@ ExHdfsScanTcb::ExHdfsScanTcb(
   , numBytesProcessedInRange_(0)
   , exception_(FALSE)
   , checkRangeDelimiter_(FALSE)
+  , dataModCheckDone_(FALSE)
 {
   Space * space = (glob ? glob->getSpace() : 0);
   CollHeap * heap = (glob ? glob->getDefaultHeap() : 0);
@@ -394,7 +395,7 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
 
 	    if (hdfsScanTdb().getHdfsFileInfoList()->isEmpty())
 	      {
-		step_ = DONE;
+                step_ = CHECK_FOR_DATA_MOD_AND_DONE;
 		break;
 	      }
 
@@ -410,16 +411,76 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
 
 	    hdfsScanBufMaxSize_ = hdfsScanTdb().hdfsBufSize_;
 
+            dataModCheckDone_ = FALSE;
+
 	    if (numRanges_ > 0)
-              step_ = INIT_HDFS_CURSOR;
+              step_ = CHECK_FOR_DATA_MOD;
             else
-              step_ = DONE;
+              step_ = CHECK_FOR_DATA_MOD_AND_DONE;
 	  }
 	  break;
 
+        case CHECK_FOR_DATA_MOD:
+        case CHECK_FOR_DATA_MOD_AND_DONE:
+          {
+            char * dirPath = hdfsScanTdb().hdfsFilesDir_;
+            if (! dirPath)
+              dataModCheckDone_ = TRUE;
+
+            if (NOT dataModCheckDone_)
+              {
+                Int64 modTS = hdfsScanTdb().modTSforDir_;
+                Lng32 numFilesInDir = hdfsScanTdb().numFilesInDir_;
+
+                retcode = ExpLOBinterfaceDataModCheck
+                  (lobGlob_,
+                   dirPath,
+                   hdfsScanTdb().hostName_,
+                   hdfsScanTdb().port_,
+                   modTS,
+                   numFilesInDir);
+                
+                if (retcode < 0)
+                  {
+                    Lng32 cliError = 0;
+		    
+                    Lng32 intParam1 = -retcode;
+                    ComDiagsArea * diagsArea = NULL;
+                    ExRaiseSqlError(getHeap(), &diagsArea, 
+                                    (ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE),
+                                    NULL, &intParam1, 
+                                    &cliError, 
+                                    NULL, 
+                                    "HDFS",
+                                    (char*)"ExpLOBInterfaceDataModCheck",
+                                    getLobErrStr(intParam1));
+                    pentry_down->setDiagsArea(diagsArea);
+                    step_ = HANDLE_ERROR_AND_DONE;
+                    break;
+                  }  
+
+                if (retcode == 1) // check failed
+                  {
+                    ComDiagsArea * diagsArea = NULL;
+                    ExRaiseSqlError(getHeap(), &diagsArea, 
+                                    (ExeErrorCode)(8436));
+                    pentry_down->setDiagsArea(diagsArea);
+                    step_ = HANDLE_ERROR_AND_DONE;
+                    break;
+                  }
+
+                dataModCheckDone_ = TRUE;
+              }
+
+            if (step_ == CHECK_FOR_DATA_MOD_AND_DONE)
+              step_ = DONE;
+            else
+              step_ = INIT_HDFS_CURSOR;
+          }
+          break;
+
 	case INIT_HDFS_CURSOR:
 	  {
-
             hdfo_ = (HdfsFileInfo*)
               hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
             if ((hdfo_->getBytesToRead() == 0) && 
@@ -569,10 +630,11 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
             trailingPrevRead_ = 0; 
             firstBufOfFile_ = true;
             numBytesProcessedInRange_ = 0;
+
             step_ = GET_HDFS_DATA;
           }
           break;
-	  
+
 	case GET_HDFS_DATA:
 	  {
 	    Int64 bytesToRead = hdfsScanBufMaxSize_ - trailingPrevRead_;
@@ -1228,8 +1290,10 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
             workAtp_->getDiagsArea()->clear();
 	}
 	break;
+
         case HANDLE_ERROR_WITH_CLOSE:
 	case HANDLE_ERROR:
+	case HANDLE_ERROR_AND_DONE:
 	  {
 	    if (qparent_.up->isFull())
 	      return WORK_OK;
@@ -1258,6 +1322,8 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
 	    
             if (step_ == HANDLE_ERROR_WITH_CLOSE)
                step_ = CLOSE_HDFS_CURSOR;
+            else if (step_ == HANDLE_ERROR_AND_DONE)
+              step_ = DONE;
             else
 	       step_ = ERROR_CLOSE_FILE;
 	    break;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/executor/ExHdfsScan.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExHdfsScan.h b/core/sql/executor/ExHdfsScan.h
index 8cd4690..453596e 100644
--- a/core/sql/executor/ExHdfsScan.h
+++ b/core/sql/executor/ExHdfsScan.h
@@ -162,18 +162,21 @@ protected:
     NOT_STARTED
   , INIT_HDFS_CURSOR
   , OPEN_HDFS_CURSOR
+  , CHECK_FOR_DATA_MOD
+  , CHECK_FOR_DATA_MOD_AND_DONE
   , GET_HDFS_DATA
   , CLOSE_HDFS_CURSOR
   , PROCESS_HDFS_ROW
   , RETURN_ROW
   , REPOS_HDFS_DATA
-  ,CLOSE_FILE
-  ,ERROR_CLOSE_FILE
-  ,COLLECT_STATS
+  , CLOSE_FILE
+  , ERROR_CLOSE_FILE
+  , COLLECT_STATS
   , HANDLE_ERROR
-  ,HANDLE_EXCEPTION
+  , HANDLE_EXCEPTION
   , DONE
   , HANDLE_ERROR_WITH_CLOSE
+  , HANDLE_ERROR_AND_DONE
   } step_,nextStep_;
 
   /////////////////////////////////////////////////////
@@ -287,6 +290,8 @@ protected:
   NABoolean exception_;
   ComCondition * lastErrorCnd_;
   NABoolean checkRangeDelimiter_;
+
+  NABoolean dataModCheckDone_;
 };
 
 class ExOrcScanTcb  : public ExHdfsScanTcb

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/exp/ExpLOBaccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index 929db63..5c1d2fa 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -131,11 +131,11 @@ Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode,
 	  dir_ = string(dir);
 	}
 
-   
-      snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s/%s", dir_.c_str(), lobFile);
+      if (lobFile)
+        snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s/%s", dir_.c_str(), lobFile);
       
     } 
-  else 
+  else if (lobFile)
     { 
       snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s", lobFile);
       
@@ -153,7 +153,8 @@ Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode,
 
   hdfsServer_ = hdfsServer;
   hdfsPort_ = hdfsPort;
-  lobLocation_ = lobLocation;
+  if (lobLocation)
+    lobLocation_ = lobLocation;
   clock_gettime(CLOCK_MONOTONIC, &startTime);
 
   if (lobGlobals->getHdfsFs() == NULL)
@@ -377,6 +378,62 @@ Ex_Lob_Error ExLob::writeDataSimple(char *data, Int64 size, LobsSubOper subOpera
 
     return LOB_OPER_OK;
 }
+
+Ex_Lob_Error ExLob::dataModCheck(
+       char * dirPath, 
+       Int64  inputModTS,
+       Lng32  inputNumFilesInDir,
+       Lng32  &numFilesInDir)
+{
+  // find mod time of dir
+  hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, dirPath);
+  if (fileInfos == NULL)
+    {
+      return LOB_DATA_FILE_NOT_FOUND_ERROR;
+    }
+
+  Int64 currModTS = fileInfos[0].mLastMod;
+  hdfsFreeFileInfo(fileInfos, 1);
+  if ((inputModTS > 0) &&
+      (currModTS > inputModTS))
+    return LOB_DATA_MOD_CHECK_ERROR;
+
+  // find number of files in dirPath.
+  Lng32 currNumFilesInDir = 0;
+  fileInfos = hdfsListDirectory(fs_, dirPath, &currNumFilesInDir);
+  if ((currNumFilesInDir > 0) && (fileInfos == NULL))
+    {
+      return LOB_DATA_FILE_NOT_FOUND_ERROR;
+    }
+
+  NABoolean failed = FALSE;
+  for (Lng32 i = 0; ((NOT failed) && (i < currNumFilesInDir)); i++)
+    {
+      hdfsFileInfo &fileInfo = fileInfos[i];
+      if (fileInfo.mKind == kObjectKindDirectory)
+        {
+          if (dataModCheck(fileInfo.mName, inputModTS, 
+                           inputNumFilesInDir, numFilesInDir) ==
+              LOB_DATA_MOD_CHECK_ERROR)
+            {
+              failed = TRUE;
+            }
+        }
+      else if (fileInfo.mKind == kObjectKindFile)
+        {
+          numFilesInDir++;
+          if (numFilesInDir > inputNumFilesInDir)
+            failed = TRUE;
+        }
+    }
+
+  hdfsFreeFileInfo(fileInfos, currNumFilesInDir);
+  if (failed)
+    return LOB_DATA_MOD_CHECK_ERROR;
+
+  return LOB_OPER_OK;
+}
+
 Ex_Lob_Error ExLob::emptyDirectory()
 {
     Ex_Lob_Error err;
@@ -2040,8 +2097,8 @@ Ex_Lob_Error ExLobsOper (
 			 LobsStorage storage,           // storage type
 			 char        *source,           // source (memory addr, filename, foreign lob etc)
 			 Int64       sourceLen,         // source len (memory len, foreign desc offset etc)
-			 Int64 cursorBytes,
-			 char *cursorId,
+			 Int64       cursorBytes,
+			 char        *cursorId,
 			 LobsOper    operation,         // LOB operation
 			 LobsSubOper subOperation,      // LOB sub operation
 			 Int64       waited,            // waited or nowaited
@@ -2315,7 +2372,6 @@ Ex_Lob_Error ExLobsOper (
         lobDebugInfo("purgeLob failed ",err,__LINE__,lobGlobals->lobTrace_);
       break;
 
-
     case Lob_Stats:
       err = lobPtr->readStats(source);
       lobPtr->initStats(); // because file may remain open across cursors
@@ -2323,10 +2379,28 @@ Ex_Lob_Error ExLobsOper (
 
     case Lob_Empty_Directory:
       lobPtr->initialize(fileName, EX_LOB_RW,
-			 dir, storage, hdfsServer, hdfsPort, dir,bufferSize, replication, blockSize);
+			 dir, storage, hdfsServer, hdfsPort, dir, bufferSize, replication, blockSize);
       err = lobPtr->emptyDirectory();
       break;
 
+    case Lob_Data_Mod_Check:
+      {
+        lobPtr->initialize(NULL, EX_LOB_RW,
+                           NULL, storage, hdfsServer, hdfsPort, NULL, 
+                           bufferSize, replication, blockSize);
+
+        Int64 inputModTS = *(Int64*)blackBox;
+        Int32 inputNumFilesInDir = 
+          *(Lng32*)&((char*)blackBox)[sizeof(inputModTS)];
+        Int32 numFilesInDir = 0;
+        err = lobPtr->dataModCheck(dir, inputModTS, 
+                                   inputNumFilesInDir, numFilesInDir);
+        if ((err == LOB_OPER_OK) &&
+            (numFilesInDir != inputNumFilesInDir))
+          err = LOB_DATA_MOD_CHECK_ERROR;
+      }
+      break;
+
     case Lob_Cleanup:
       delete lobGlobals;
       break;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/exp/ExpLOBaccess.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.h b/core/sql/exp/ExpLOBaccess.h
index 452f769..138116c 100644
--- a/core/sql/exp/ExpLOBaccess.h
+++ b/core/sql/exp/ExpLOBaccess.h
@@ -436,41 +436,41 @@ class ExLob
   Ex_Lob_Error insertData(char *data, Int64 size, LobsSubOper so,Int64 headDescNum, Int64 &operLen, Int64 lobMaxSize, Int64 lobMaxChunkMemSize,char *handleIn,Int32 handleInLen, char *blackBox, Int32 blackBoxLen, char * handleOut, Int32 &handleOutLen, void *lobGlobals);
   Ex_Lob_Error append(char *data, Int64 size, LobsSubOper so, Int64 headDescNum, Int64 &operLen, Int64 lobMaxSize, Int64 lobMaxChunkMemLen,Int64 lobGCLimit, char *handleIn,Int32 handleInLen, char * handleOut, Int32 &handleOutLen, void *lobGlobals);
   Ex_Lob_Error update(char *data, Int64 size, LobsSubOper so,Int64 headDescNum, Int64 &operLen, Int64 lobMaxSize,Int64 lobMaxChunkMemLen,Int64 lobGCLimit,char *handleIn,Int32 handleInLen, char * handleOut, Int32 &handleOutLen, void *lobGlobals);
-    Ex_Lob_Error readSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
-    Ex_Lob_Error readHdfsSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
-    Ex_Lob_Error readLocalSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
-    Ex_Lob_Error readExternalSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
-    Ex_Lob_Error statSourceFile(char *srcfile, Int64 &sourceEOF);
-    Ex_Lob_Error delDesc(char *handleIn, Int32 handleInLen, Int64 transId);
-    Ex_Lob_Error purgeLob();
-    Ex_Lob_Error closeFile();
-    LobInputOutputFileType fileType(char *ioFileName);
-    Ex_Lob_Error closeCursor(char *handleIn, Int32 handleInLen);
-    Ex_Lob_Error closeDataCursorSimple(char *fileName, ExLobGlobals *lobGlobals);
-   
-    Ex_Lob_Error doSanityChecks(char *dir, LobsStorage storage,
-                                Int32 handleInLen, Int32 handleOutLen, 
-                                Int32 blackBoxLen);
+  Ex_Lob_Error readSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
+  Ex_Lob_Error readHdfsSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
+  Ex_Lob_Error readLocalSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
+  Ex_Lob_Error readExternalSourceFile(char *srcfile, char *&fileData, Int32 &size, Int64 offset);
+  Ex_Lob_Error statSourceFile(char *srcfile, Int64 &sourceEOF);
+  Ex_Lob_Error delDesc(char *handleIn, Int32 handleInLen, Int64 transId);
+  Ex_Lob_Error purgeLob();
+  Ex_Lob_Error closeFile();
+  LobInputOutputFileType fileType(char *ioFileName);
+  Ex_Lob_Error closeCursor(char *handleIn, Int32 handleInLen);
+  Ex_Lob_Error closeDataCursorSimple(char *fileName, ExLobGlobals *lobGlobals);
+  
+  Ex_Lob_Error doSanityChecks(char *dir, LobsStorage storage,
+                              Int32 handleInLen, Int32 handleOutLen, 
+                              Int32 blackBoxLen);
   Ex_Lob_Error allocateDesc(unsigned int size, Int64 &descNum, Int64 &dataOffset,Int64 lobMaxSize,Int64 lobMaxChunkMemSize, char *handleIn, Int32 handleInLen,Int64 lobGCLimit, void *lobGlobals);
-    Ex_Lob_Error readStats(char *buffer);
-    Ex_Lob_Error initStats();
-
-    Ex_Lob_Error insertDesc(Int64 offset, Int64 size,  char *handleIn, Int32 handleInLen,  char *handleOut, Int32 &handleOutLen, char *blackBox, Int32 blackBoxLen,void *lobGlobals) ;
-
-    Ex_Lob_Error lockDesc();
-    Ex_Lob_Error unlockDesc();
-    char *getDataFileName() { return lobDataFile_; }
-   
-    int getErrNo();
-
+  Ex_Lob_Error readStats(char *buffer);
+  Ex_Lob_Error initStats();
   
-    Ex_Lob_Error getDesc(ExLobDesc &desc,char * handleIn, Int32 handleInLen, char *blackBox, Int32 *blackBoxLen, char * handleOut, Int32 &handleOutLen, Int64 transId);
-
-    Ex_Lob_Error writeData(Int64 offset, char *data, Int32 size, Int64 &operLen);
+  Ex_Lob_Error insertDesc(Int64 offset, Int64 size,  char *handleIn, Int32 handleInLen,  char *handleOut, Int32 &handleOutLen, char *blackBox, Int32 blackBoxLen,void *lobGlobals) ;
+  
+  Ex_Lob_Error lockDesc();
+  Ex_Lob_Error unlockDesc();
+  char *getDataFileName() { return lobDataFile_; }
+  
+  int getErrNo();
+  
+  
+  Ex_Lob_Error getDesc(ExLobDesc &desc,char * handleIn, Int32 handleInLen, char *blackBox, Int32 *blackBoxLen, char * handleOut, Int32 &handleOutLen, Int64 transId);
+  
+  Ex_Lob_Error writeData(Int64 offset, char *data, Int32 size, Int64 &operLen);
   Ex_Lob_Error readDataToMem(char *memAddr, Int64 offset, Int64 size,
                              Int64 &operLen,char *handleIn, Int32 handleLenIn, 
                              NABoolean multipleChunks, Int64 transId);
-   
+  
   Ex_Lob_Error readDataToLocalFile(char *fileName, Int64 offset, Int64 size,Int64 &operLen,Int64 lobMaxChunkMemLen ,Int32 fileFlags,char *handleIn,Int32 handleInLen, NABoolean multipleChunks,Int64 transId);
   Ex_Lob_Error readDataToHdfsFile(char *fileName, Int64 offset, Int64 size, Int64 &operLen,Int64 lobMaxChunkMemLen, Int32 fileflags,char *handleIn,Int32 handleInLen, NABoolean multipleChunks,Int64 transId);
   Ex_Lob_Error readDataToExternalFile(char *tgtFileName,  Int64 offset, Int64 size, Int64 &operLen, Int64 lobMaxChunkMemLen, Int32 fileflags,char *handleIn,Int32 handleInLen, NABoolean multipleChunks,Int64 transId);
@@ -479,9 +479,22 @@ class ExLob
   Ex_Lob_Error  restoreLobDataFile();
   Ex_Lob_Error purgeBackupLobDataFile();
 
-    Ex_Lob_Error emptyDirectory();
-    ExLobStats *getStats() { return &stats_; }
-    NAHeap *getLobGlobalHeap() { return lobGlobalHeap_;}
+  // dirPath: path to needed directory (includes directory name)
+  // modTS is the latest timestamp on any file/dir under dirPath.
+  // numFilesInDir is the total number of files under dirPath.
+  // This method validates that current modTS is not greater then input modTS
+  // and current number of files in dirPath are the same as input numFilesInDir.
+  // If either condition is not true, then check fails.
+  // Return: LOB_OPER_OK, if passes. LOB_DATA_MOD_CHECK_ERROR, if fails.
+  Ex_Lob_Error dataModCheck(
+       char * dirPath, 
+       Int64  modTS,
+       Lng32  inputNumFilesInDir,
+       Lng32  &numFilesInDir);
+
+  Ex_Lob_Error emptyDirectory();
+  ExLobStats *getStats() { return &stats_; }
+  NAHeap *getLobGlobalHeap() { return lobGlobalHeap_;}
   ExLobRequest *getRequest() { return &request_; }
   
   //The next 2 functions are not active at this point. They serve as an example

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/exp/ExpLOBenums.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBenums.h b/core/sql/exp/ExpLOBenums.h
index 99e072d..12647be 100644
--- a/core/sql/exp/ExpLOBenums.h
+++ b/core/sql/exp/ExpLOBenums.h
@@ -92,6 +92,7 @@ typedef enum {
   LOB_INVALID_ERROR_VAL,
   LOB_MAX_LIMIT_ERROR = 560,
   LOB_TARGET_FILE_EXISTS_ERROR,
+  LOB_DATA_MOD_CHECK_ERROR,
   LOB_MAX_ERROR_NUM     // keep this as the last element in enum list.
 } Ex_Lob_Error;
 
@@ -159,6 +160,7 @@ static const char * const lobErrorEnumStr[] =
   "LOB_INVALID_ERROR_VAL", 
   "LOB_MAX_LIMIT_ERROR", //560
   "LOB_TGT_FILE_EXISTS_ERROR",
+  "LOB_DATA_MOD_CHECK_ERROR",
   "LOB_MAX_ERROR_NUM"     // keep this as the last element in enum list.
 };
 
@@ -236,6 +238,7 @@ typedef enum {
    Lob_Print, // debugging purposes
 
    Lob_Empty_Directory,
+   Lob_Data_Mod_Check,
 
    Lob_Cleanup, // destroy everything under globals
    Lob_PerformGC,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/exp/ExpLOBinterface.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBinterface.cpp b/core/sql/exp/ExpLOBinterface.cpp
index 23a2083..a984635 100644
--- a/core/sql/exp/ExpLOBinterface.cpp
+++ b/core/sql/exp/ExpLOBinterface.cpp
@@ -220,7 +220,6 @@ Lng32 ExpLOBinterfaceCreate(
                    bufferSize ,
                    replication,
                    blockSize
-		   
 		   );
 
   if (err != LOB_OPER_OK)
@@ -229,6 +228,51 @@ Lng32 ExpLOBinterfaceCreate(
     return 0;
 }
 
+// Return: 1, if check fails. 
+//         0, if check passes. 
+//         -LOB_*_ERROR, if error.
+Lng32 ExpLOBinterfaceDataModCheck(void * lobGlob,
+                                  char * dirPath,
+                                  char * lobHdfsServer,
+                                  Lng32  lobHdfsPort,
+                                  Int64  modTS,
+                                  Lng32  numFilesInDir)
+{
+  Ex_Lob_Error err;
+
+  Int64 dummyParam=0;
+  Int32 dummyParam2 = 0;
+  Ex_Lob_Error status;
+  Int64 cliError = -1;
+
+  char dirInfoBuf[100];
+  *(Int64*)dirInfoBuf = modTS;
+  *(Lng32*)&dirInfoBuf[sizeof(modTS)] = numFilesInDir;
+  Lng32 dirInfoBufLen = sizeof(modTS) + sizeof(numFilesInDir);
+  err = ExLobsOper((char*)"",
+                   NULL, 0,
+                   lobHdfsServer, lobHdfsPort,
+                   NULL, dummyParam2, 0, dummyParam,
+                   dummyParam, 0, dummyParam, status, cliError,
+                   dirPath, (LobsStorage)Lob_HDFS_File,
+                   NULL, 0,
+		   0,NULL,
+                   Lob_Data_Mod_Check,
+                   Lob_None,
+                   1, // waited op
+                   lobGlob,
+                   0, 
+                   dirInfoBuf, dirInfoBufLen
+                   );
+
+  if (err == LOB_DATA_MOD_CHECK_ERROR)
+    return 1;
+  else if (err != LOB_OPER_OK)
+    return -(short)err;
+  else
+    return 0;
+}
+
 Lng32 ExpLOBinterfaceEmptyDirectory(
                             void * lobGlob,
                             char * lobName,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/exp/ExpLOBinterface.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBinterface.h b/core/sql/exp/ExpLOBinterface.h
index 26f2ad4..a9b7597 100644
--- a/core/sql/exp/ExpLOBinterface.h
+++ b/core/sql/exp/ExpLOBinterface.h
@@ -291,6 +291,19 @@ Lng32 ExpLOBinterfacePerformGC(void *& lobGlob, char *lobName,void *descChunksAr
 Lng32 ExpLOBinterfaceRestoreLobDataFile(void *& lobGlob, char *hdfsServer, Int32 hdfsPort,char *lobLoc,char *lobName);
 Lng32 ExpLOBinterfacePurgeBackupLobDataFile(void *& lobGlob,  char *hdfsServer, Int32 hdfsPort,char *lobLoc,char *lobName);
 
+// dirPath: path to needed directory (includes directory name)
+// modTS is the latest timestamp on any file/dir under dirPath.
+// numFilesInDir is the total number of files under dirPath.
+// This method validates that current modTS is not greater then input modTS
+// and current number of files in dirPath are the same as input numFilesInDir.
+// If either condition is not true, then check fails.
+// Return: 1, if check fails. 0, if passes. -1, if error.
+Lng32 ExpLOBinterfaceDataModCheck(void * lobGlob,
+                                  char * dirPath,
+                                  char * lobHdfsServer,
+                                  Lng32  lobHdfsPort,
+                                  Int64  modTS,
+                                  Lng32  numFilesInDir);
 
 Lng32 ExpLOBinterfaceEmptyDirectory(void * lobGlob,
                             char * lobName,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/generator/GenRelScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/generator/GenRelScan.cpp b/core/sql/generator/GenRelScan.cpp
index 827ed90..a781815 100644
--- a/core/sql/generator/GenRelScan.cpp
+++ b/core/sql/generator/GenRelScan.cpp
@@ -360,29 +360,16 @@ short FileScan::genForTextAndSeq(Generator * generator,
   const NABoolean isSequenceFile = hTabStats->isSequenceFile();
 
   HiveFileIterator hfi;
-  NABoolean firstFile = TRUE;
   hdfsPort = 0;
   hdfsHostName = NULL;
   
-  while (firstFile && getHiveSearchKey()->getNextFile(hfi))
-    {
-      const HHDFSFileStats * hFileStats = hfi.getFileStats();
-      if (firstFile)
-        {
-          // determine connection info (host and port) from the first file
-          NAString dummy, hostName;
-          NABoolean result;
-          result = ((HHDFSTableStats*)hTabStats)->splitLocation
-            (hFileStats->getFileName().data(), hostName, hdfsPort, dummy) ;
-          
-          GenAssert(result, "Invalid Hive directory name");
-
-          hdfsHostName = 
-            space->AllocateAndCopyToAlignedSpace(hostName, 0);
-
-          firstFile = FALSE;
-        }
-    }
+  // determine host and port from dir name
+  NAString dummy, hostName;
+  NABoolean result = ((HHDFSTableStats*)hTabStats)->splitLocation
+    (hTabStats->tableDir().data(), hostName, hdfsPort, dummy) ;
+  GenAssert(result, "Invalid Hive directory name");
+  hdfsHostName = 
+        space->AllocateAndCopyToAlignedSpace(hostName, 0);
 
   hdfsFileInfoList = new(space) Queue(space);
   hdfsFileRangeBeginList = new(space) Queue(space);
@@ -1159,6 +1146,22 @@ if (hTabStats->isOrcFile())
   char * tablename = 
     space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName()), 0);
 
+  // info needed to validate hdfs file structs
+  //  const HHDFSTableStats* hTabStats = 
+  //    getIndexDesc()->getNAFileSet()->getHHDFSTableStats();
+  char * hdfsDir = NULL;
+  Int64 modTS = -1;
+  Lng32 numFilesInDir = -1;
+  if (CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON)
+    {
+      hdfsDir =
+        space->allocateAndCopyToAlignedSpace(hTabStats->tableDir().data(),
+                                             hTabStats->tableDir().length(),
+                                             0);
+      modTS = hTabStats->getModificationTS();
+      numFilesInDir =  hTabStats->getNumFiles();
+    }
+
   // create hdfsscan_tdb
   ComTdbHdfsScan *hdfsscan_tdb = new(space) 
     ComTdbHdfsScan(
@@ -1197,7 +1200,9 @@ if (hTabStats->isOrcFile())
 		   buffersize,
 		   errCountTab,
 		   logLocation,
-		   errCountRowId
+		   errCountRowId,
+
+                   hdfsDir, modTS, numFilesInDir
 		   );
 
   generator->initTdbFields(hdfsscan_tdb);

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/optimizer/HDFSHook.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.cpp b/core/sql/optimizer/HDFSHook.cpp
index 7a6e86d..90df234 100644
--- a/core/sql/optimizer/HDFSHook.cpp
+++ b/core/sql/optimizer/HDFSHook.cpp
@@ -559,6 +559,8 @@ void HHDFSListPartitionStats::populate(hdfsFS fs,
     }
   else
     {
+      dirInfo_ = *dirInfo;
+
       // list all the files in this directory, they all belong
       // to this partition and either belong to a specific bucket
       // or to the default bucket
@@ -1008,6 +1010,9 @@ void HHDFSTableStats::processDirectory(const NAString &dir, Int32 numOfBuckets,
       totalNumPartitions_++;
       // aggregate stats
       add(partStats);
+
+      if (partStats->dirInfo()->mLastMod > modificationTS_)
+        modificationTS_ = partStats->dirInfo()->mLastMod;
     }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/optimizer/HDFSHook.h
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.h b/core/sql/optimizer/HDFSHook.h
index cbe634c..1ab474c 100644
--- a/core/sql/optimizer/HDFSHook.h
+++ b/core/sql/optimizer/HDFSHook.h
@@ -223,6 +223,8 @@ public:
   Int32 getNumOfBuckets() const { return (defaultBucketIdx_ ? defaultBucketIdx_ : 1); }
   Int32 getLastValidBucketIndx() const               { return defaultBucketIdx_; }
 
+  const hdfsFileInfo * dirInfo() const {return &dirInfo_; }
+
   void populate(hdfsFS fs, const NAString &dir, Int32 numOfBuckets, 
                 HHDFSDiags &diags,
                 NABoolean doEsTimation, char recordTerminator);
@@ -246,6 +248,8 @@ private:
   NABoolean doEstimation_;
   char recordTerminator_;
   
+  hdfsFileInfo dirInfo_;
+
   NAMemory *heap_;
 };
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/regress/executor/EXPECTED020
----------------------------------------------------------------------
diff --git a/core/sql/regress/executor/EXPECTED020 b/core/sql/regress/executor/EXPECTED020
index 5d2f932..5edccba 100644
--- a/core/sql/regress/executor/EXPECTED020
+++ b/core/sql/regress/executor/EXPECTED020
@@ -105,6 +105,7 @@ A            B
      4023              0          1          0          1
      4039              0          1          0          1
      4419              0          1          0          4
+     8436              0          1          0          0
      8550             30          1         60          0
      8550             31          1         60          0
      8550             33          1         60          0
@@ -172,6 +173,7 @@ A            B
      4023              0          1          0          1
      4039              0          1          0          1
      4419              0          1          0          4
+     8436              0          1          0          0
      8550             30          1         60          0
      8550             31          1         60          0
      8550             33          1         60          0
@@ -238,6 +240,7 @@ A            B
      4023              0          1          0          1
      4039              0          1          0          1
      4419              0          1          0          4
+     8436              0          1          0          0
      8550             30          1         60          0
      8550             31          1         60          0
      8550             33          1         60          0
@@ -305,6 +308,7 @@ A            B
      4023              0          1          0          1
      4039              0          1          0          1
      4419              0          1          0          4
+     8436              0          1          0          0
      8550             30          1         60          0
      8550             31          1         60          0
      8550             33          1         60          0

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/sqlcomp/DefaultConstants.h
----------------------------------------------------------------------
diff --git a/core/sql/sqlcomp/DefaultConstants.h b/core/sql/sqlcomp/DefaultConstants.h
index e8cf41d..a48f1c8 100644
--- a/core/sql/sqlcomp/DefaultConstants.h
+++ b/core/sql/sqlcomp/DefaultConstants.h
@@ -3817,6 +3817,10 @@ enum DefaultConstants
   //     // 2 : todo
   HIVE_SCAN_SPECIAL_MODE,
 
+  // if set, data modification check is done at runtime before running
+  // a query.
+  HIVE_DATA_MOD_CHECK,
+
   // This enum constant must be the LAST one in the list; it's a count,
   // not an Attribute (it's not IN DefaultDefaults; it's the SIZE of it)!
   __NUM_DEFAULT_ATTRIBUTES

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/f4728220/core/sql/sqlcomp/nadefaults.cpp
----------------------------------------------------------------------
diff --git a/core/sql/sqlcomp/nadefaults.cpp b/core/sql/sqlcomp/nadefaults.cpp
index 1eb05cf..8f5acbe 100644
--- a/core/sql/sqlcomp/nadefaults.cpp
+++ b/core/sql/sqlcomp/nadefaults.cpp
@@ -1957,6 +1957,8 @@ SDDkwd__(EXE_DIAGNOSTIC_EVENTS,		"OFF"),
 
   DD_____(HIVE_CATALOG,                                ""),
 
+  DDkwd__(HIVE_DATA_MOD_CHECK,                  "ON"),
+
   DDkwd__(HIVE_DEFAULT_CHARSET,            (char *)SQLCHARSETSTRING_UTF8),
   DD_____(HIVE_DEFAULT_SCHEMA,                  "HIVE"),
   DD_____(HIVE_FILE_CHARSET,                    ""),



[6/8] incubator-trafodion git commit: hive data modification detection: commit #3

Posted by an...@apache.org.
hive data modification detection: commit #3


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/b1a8f024
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/b1a8f024
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/b1a8f024

Branch: refs/heads/master
Commit: b1a8f024b5a3d4db368b51bbd2b6aac7035b9571
Parents: fa70e68
Author: Anoop Sharma <an...@esgyn.com>
Authored: Mon May 30 13:48:16 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Mon May 30 13:48:16 2016 +0000

----------------------------------------------------------------------
 core/sql/comexe/ComTdbFastTransport.cpp |   3 +-
 core/sql/comexe/ComTdbFastTransport.h   |   5 +-
 core/sql/executor/ExFastTransport.cpp   |  87 +++++++++--
 core/sql/executor/ExFastTransport.h     |   5 +
 core/sql/exp/ExpLOBaccess.cpp           | 155 +++++++++++++++++---
 core/sql/exp/ExpLOBaccess.h             |   4 +-
 core/sql/generator/GenFastTransport.cpp |  60 +++++---
 core/sql/optimizer/HDFSHook.cpp         |   3 +-
 core/sql/regress/hive/EXPECTED003       |   9 --
 core/sql/regress/hive/EXPECTED005       |   8 +-
 core/sql/regress/hive/EXPECTED006       |   3 -
 core/sql/regress/hive/EXPECTED018       | 208 +++++++++++++++------------
 core/sql/regress/hive/TEST003           |   3 -
 core/sql/regress/hive/TEST006           |   1 -
 core/sql/regress/hive/TEST018           |   4 +-
 15 files changed, 386 insertions(+), 172 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/comexe/ComTdbFastTransport.cpp
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbFastTransport.cpp b/core/sql/comexe/ComTdbFastTransport.cpp
index d995fdb..f492e8f 100644
--- a/core/sql/comexe/ComTdbFastTransport.cpp
+++ b/core/sql/comexe/ComTdbFastTransport.cpp
@@ -98,7 +98,8 @@ ComTdbFastExtract::ComTdbFastExtract(
   hdfsIOBufferSize_(hdfBuffSize),
   hdfsReplication_(replication),
   ioTimeout_(ioTimeout),
-  childDataRowLen_(childDataRowLen)
+  childDataRowLen_(childDataRowLen),
+  modTSforDir_(-1)
 {
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/comexe/ComTdbFastTransport.h
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbFastTransport.h b/core/sql/comexe/ComTdbFastTransport.h
index 1ae7625..98e93eb 100644
--- a/core/sql/comexe/ComTdbFastTransport.h
+++ b/core/sql/comexe/ComTdbFastTransport.h
@@ -401,6 +401,8 @@ public:
     return childDataRowLen_;
   }
 
+  void setModTSforDir(Int64 v) { modTSforDir_ = v; }
+  Int64 getModTSforDir() const { return modTSforDir_; }
 
 protected:
   NABasicPtr   targetName_;                                  // 00 - 07
@@ -427,9 +429,10 @@ protected:
   UInt16       ioTimeout_;                                   // 128 - 129
   UInt16       filler_;                                      // 130 - 131
   UInt32       childDataRowLen_;                             // 132 - 135
+  Int64        modTSforDir_;                                 // 136 - 143
 
   // Make sure class size is a multiple of 8
-  char fillerComTdbFastTransport_[8];                        // 136 - 143
+  char fillerComTdbFastTransport_[8];                        // 144 - 151
 
 };
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/executor/ExFastTransport.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExFastTransport.cpp b/core/sql/executor/ExFastTransport.cpp
index b881b14..45c3959 100644
--- a/core/sql/executor/ExFastTransport.cpp
+++ b/core/sql/executor/ExFastTransport.cpp
@@ -89,6 +89,7 @@ ExFastExtractTcb::ExFastExtractTcb(
   , sourceFieldsConvIndex_(NULL)
   , currBuffer_(NULL)
   , bufferAllocFailuresCount_(0)
+  , modTS_(-1)
 {
   
   ex_globals *stmtGlobals = getGlobals();
@@ -482,6 +483,16 @@ Lng32 ExHdfsFastExtractTcb::lobInterfaceCreate()
 
 }
 
+Lng32 ExHdfsFastExtractTcb::lobInterfaceDataModCheck()
+{
+  return ExpLOBinterfaceDataModCheck(lobGlob_,
+                                     targetLocation_,
+                                     hdfsHost_,
+                                     hdfsPort_,
+                                     myTdb().getModTSforDir(),
+                                     0);
+}
+
 
 Lng32 ExHdfsFastExtractTcb::lobInterfaceClose()
 {
@@ -530,13 +541,13 @@ Int32 ExHdfsFastExtractTcb::fixup()
 
   ex_tcb::fixup();
 
-
   if(!myTdb().getSkipWritingToFiles() &&
      !myTdb().getBypassLibhdfs())
 
     ExpLOBinterfaceInit
       (lobGlob_, getGlobals()->getDefaultHeap(),TRUE);
 
+  modTS_ = myTdb().getModTSforDir();
 
   return 0;
 }
@@ -681,9 +692,62 @@ ExWorkProcRetcode ExHdfsFastExtractTcb::work()
     {
     case EXTRACT_NOT_STARTED:
     {
+      pstate.step_= EXTRACT_CHECK_MOD_TS;
+    }
+    break;
+
+    case EXTRACT_CHECK_MOD_TS:
+    {
+      if ((! myTdb().getTargetFile()) ||
+          (myTdb().getModTSforDir() == -1))
+        {
+          pstate.step_ = EXTRACT_INITIALIZE;
+          break;
+        }
+
+      numBuffers_ = 0;
+
+      memset (hdfsHost_, '\0', sizeof(hdfsHost_));
+      strncpy(hdfsHost_, myTdb().getHdfsHostName(), sizeof(hdfsHost_));
+      hdfsPort_ = myTdb().getHdfsPortNum();
+      memset (fileName_, '\0', sizeof(fileName_));
+      memset (targetLocation_, '\0', sizeof(targetLocation_));
+      snprintf(targetLocation_,999, "%s", myTdb().getTargetName());
+
+      retcode = lobInterfaceDataModCheck();
+      if (retcode < 0)
+      {
+        Lng32 cliError = 0;
+        
+        Lng32 intParam1 = -retcode;
+        ComDiagsArea * diagsArea = NULL;
+        ExRaiseSqlError(getHeap(), &diagsArea, 
+                        (ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE),
+                        NULL, &intParam1, 
+                        &cliError, 
+                        NULL, 
+                        "HDFS",
+                        (char*)"ExpLOBInterfaceDataModCheck",
+                        getLobErrStr(intParam1));
+        pentry_down->setDiagsArea(diagsArea);
+        pstate.step_ = EXTRACT_ERROR;
+        break;
+      }
+      
+      if (retcode == 1) // check failed
+      {
+        ComDiagsArea * diagsArea = NULL;
+        ExRaiseSqlError(getHeap(), &diagsArea, 
+                        (ExeErrorCode)(8436));
+        pentry_down->setDiagsArea(diagsArea);
+        pstate.step_ = EXTRACT_ERROR;
+        break;
+      }
+      
       pstate.step_= EXTRACT_INITIALIZE;
     }
-    //  no break here
+    break;
+    
     case EXTRACT_INITIALIZE:
     {
       pstate.processingStarted_ = FALSE;
@@ -798,7 +862,7 @@ ExWorkProcRetcode ExHdfsFastExtractTcb::work()
               break;
             }
           }
-
+            
           if (feStats)
           {
             feStats->setPartitionNumber(fileNum);
@@ -1123,13 +1187,16 @@ ExWorkProcRetcode ExHdfsFastExtractTcb::work()
         }
         else  if (myTdb().getBypassLibhdfs())
         {
-          sfwRetCode = sequenceFileWriter_->hdfsClose();
-          if (!errorOccurred_ && sfwRetCode != SFW_OK )
-          {
-            createSequenceFileError(sfwRetCode);
-            pstate.step_ = EXTRACT_ERROR;
-            break;
-          }
+          if (sequenceFileWriter_)
+            {
+              sfwRetCode = sequenceFileWriter_->hdfsClose();
+              if (!errorOccurred_ && sfwRetCode != SFW_OK )
+                {
+                  createSequenceFileError(sfwRetCode);
+                  pstate.step_ = EXTRACT_ERROR;
+                  break;
+                }
+            }
         }
         else
         {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/executor/ExFastTransport.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExFastTransport.h b/core/sql/executor/ExFastTransport.h
index 2df9fe9..5de05aa 100644
--- a/core/sql/executor/ExFastTransport.h
+++ b/core/sql/executor/ExFastTransport.h
@@ -255,6 +255,7 @@ public:
   enum FastExtractStates
   {
     EXTRACT_NOT_STARTED = 0,
+    EXTRACT_CHECK_MOD_TS,
     EXTRACT_INITIALIZE,
     EXTRACT_PASS_REQUEST_TO_CHILD,
     EXTRACT_RETURN_ROWS_FROM_CHILD,
@@ -366,6 +367,9 @@ protected:
   time_t              tstart_;
 
   UInt32             bufferAllocFailuresCount_;
+
+  // modification timestamp of root dir location.
+  Int64              modTS_;
 }; // class ExFastExtractTcb
 /////////////////////////////////////////////////////
 
@@ -401,6 +405,7 @@ protected:
   Lng32 lobInterfaceInsert(ssize_t bytesToWrite);
   Lng32 lobInterfaceCreate();
   Lng32 lobInterfaceClose();
+  Lng32 lobInterfaceDataModCheck();
 
   virtual void insertUpQueueEntry(ex_queue::up_status status,
                           ComDiagsArea *diags,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/exp/ExpLOBaccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index 3ac537b..7db4a40 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -110,8 +110,7 @@ ExLob::~ExLob()
    
 }
 
-__thread hdfsFS *globalFS = NULL;
- 
+#ifdef __ignore
 Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode, 
                                char *dir, 
 			       LobsStorage storage,
@@ -147,20 +146,13 @@ Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode,
   hdfsServer_ = hdfsServer;
   hdfsPort_ = hdfsPort;
 
-  if (globalFS == NULL)
-    {
-      globalFS = new hdfsFS;
-      *globalFS = NULL;
-    }
-  
-  if (*globalFS == NULL)
+  if (fs_ == NULL)
     {
-      *globalFS = hdfsConnect(hdfsServer_, hdfsPort_);
-      if (*globalFS == NULL)
+      fs_ = hdfsConnect(hdfsServer_, hdfsPort_);
+      if (fs_ == NULL)
         return LOB_HDFS_CONNECT_ERROR;
     }
 
-  fs_ = *globalFS;
   if (lobGlobals)
     lobGlobals->setHdfsFs(fs_);
   
@@ -214,6 +206,103 @@ Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode,
   return LOB_OPER_OK;
     
 }
+#endif
+
+Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode, 
+                               char *dir, 
+			       LobsStorage storage,
+                               char *hdfsServer, Int64 hdfsPort,
+                               char *lobLocation,
+                               int bufferSize , short replication ,
+                               int blockSize, Int64 lobMaxSize, ExLobGlobals *lobGlobals)
+{
+  int openFlags;
+  mode_t filePerms = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+  struct timespec startTime;
+  struct timespec endTime;
+  Int64 secs, nsecs, totalnsecs;
+ 
+  if (dir) 
+    {
+      if (dir_.empty()) 
+	{
+	  dir_ = string(dir);
+	}
+
+      if (lobFile)
+        snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s/%s", dir_.c_str(), 
+                 lobFile);
+      
+    } 
+  else 
+    { 
+      if (lobFile)
+        snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s", lobFile);
+      
+    }
+
+  if (storage_ != Lob_Invalid_Storage) 
+    {
+      return LOB_INIT_ERROR;
+    } else 
+    {
+      storage_ = storage;
+    }
+
+  stats_.init(); 
+
+  hdfsServer_ = hdfsServer;
+  hdfsPort_ = hdfsPort;
+  lobLocation_ = lobLocation;
+  clock_gettime(CLOCK_MONOTONIC, &startTime);
+
+  if (lobGlobals->getHdfsFs() == NULL)
+    {
+      fs_ = hdfsConnect(hdfsServer_, hdfsPort_);
+      if (fs_ == NULL) 
+	return LOB_HDFS_CONNECT_ERROR;
+      lobGlobals->setHdfsFs(fs_);
+    } 
+  else 
+    {
+      fs_ = lobGlobals->getHdfsFs();
+    }
+
+  clock_gettime(CLOCK_MONOTONIC, &endTime);
+
+  secs = endTime.tv_sec - startTime.tv_sec;
+  nsecs = endTime.tv_nsec - startTime.tv_nsec;
+  if (nsecs < 0) 
+    {
+      secs--;
+      nsecs += NUM_NSECS_IN_SEC;
+    }
+  totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
+  stats_.hdfsConnectionTime += totalnsecs;
+    
+  if (mode == EX_LOB_CREATE) 
+    { 
+      // check if file is already created
+      hdfsFileInfo *fInfo = hdfsGetPathInfo(fs_, lobDataFile_);
+      if (fInfo != NULL) 
+	{
+	  hdfsFreeFileInfo(fInfo, 1);
+	  return LOB_DATA_FILE_CREATE_ERROR;
+	} 
+      openFlags = O_WRONLY | O_CREAT;   
+      fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags, bufferSize, replication, blockSize);
+      if (!fdData_) 
+	{
+          return LOB_DATA_FILE_CREATE_ERROR;
+	}
+      hdfsCloseFile(fs_, fdData_);
+      fdData_ = NULL;
+     
+    }
+  lobGlobalHeap_ = lobGlobals->getHeap();    
+  return LOB_OPER_OK;
+    
+}
 
 Ex_Lob_Error ExLob::fetchCursor(char *handleIn, Int32 handleLenIn, Int64 &outOffset, Int64 &outSize,NABoolean &isEOD, Int64 transId) 
 {
@@ -443,13 +532,24 @@ Ex_Lob_Error ExLob::dataModCheck2(
 Ex_Lob_Error ExLob::dataModCheck(
        char * dirPath, 
        Int64  inputModTS,
-       Lng32  numOfPartLevels)
+       Lng32  numOfPartLevels,
+       ExLobGlobals *lobGlobals)
 {
   // find mod time of root dir
   hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, dirPath);
   if (fileInfos == NULL)
     {
-      return LOB_DATA_FILE_NOT_FOUND_ERROR;
+      hdfsDisconnect(fs_);
+      fs_ = hdfsConnect(hdfsServer_, hdfsPort_);
+      if (fs_ == NULL)
+        return LOB_HDFS_CONNECT_ERROR;
+
+      fileInfos = hdfsGetPathInfo(fs_, dirPath);
+      if (fileInfos == NULL)
+        return LOB_DIR_NAME_ERROR;
+
+      if (lobGlobals)
+        lobGlobals->setHdfsFs(fs_);
     }
 
   Int64 currModTS = fileInfos[0].mLastMod;
@@ -471,12 +571,18 @@ Ex_Lob_Error ExLob::emptyDirectory()
     Ex_Lob_Error err;
 
     int numExistingFiles=0;
-    hdfsFileInfo *fileInfos = hdfsListDirectory(fs_, lobDataFile_, &numExistingFiles);
+    hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, lobDataFile_);
     if (fileInfos == NULL)
-    {
-       return LOB_DATA_FILE_NOT_FOUND_ERROR; //here a directory
-    }
+      {
+        return LOB_DATA_FILE_NOT_FOUND_ERROR; //here a directory
+      }
 
+    fileInfos = hdfsListDirectory(fs_, lobDataFile_, &numExistingFiles);
+    if (fileInfos == NULL)
+      {
+        return LOB_OPER_OK;
+      }
+    
     for (int i = 0; i < numExistingFiles; i++) 
     {
 #ifdef USE_HADOOP_1
@@ -2167,7 +2273,8 @@ Ex_Lob_Error ExLobsOper (
 
   if (globPtr == NULL)
     {
-      if (operation == Lob_Init)
+      if ((operation == Lob_Init) ||
+          (operation == Lob_Data_Mod_Check))
 	{
 	  globPtr = (void *) new ExLobGlobals();
 	  if (globPtr == NULL) 
@@ -2176,14 +2283,16 @@ Ex_Lob_Error ExLobsOper (
 	  lobGlobals = (ExLobGlobals *)globPtr;
 
 	  err = lobGlobals->initialize(); 
-	  return err;
+          if (err != LOB_OPER_OK)
+            return err;
 	}
       else
 	{
 	  return LOB_GLOB_PTR_ERROR;
 	}
     }
-  else
+
+  if ((globPtr != NULL) && (operation != Lob_Init))
     {
       lobGlobals = (ExLobGlobals *)globPtr;
 
@@ -2236,6 +2345,7 @@ Ex_Lob_Error ExLobsOper (
   */
   switch(operation)
     {
+    case Lob_Init:
     case Lob_Create:
       break;
 
@@ -2425,7 +2535,8 @@ Ex_Lob_Error ExLobsOper (
         Int64 inputModTS = *(Int64*)blackBox;
         Int32 inputNumOfPartLevels = 
           *(Lng32*)&((char*)blackBox)[sizeof(inputModTS)];
-        err = lobPtr->dataModCheck(dir, inputModTS, inputNumOfPartLevels);
+        err = lobPtr->dataModCheck(dir, inputModTS, inputNumOfPartLevels,
+                                   lobGlobals);
       }
       break;
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/exp/ExpLOBaccess.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.h b/core/sql/exp/ExpLOBaccess.h
index 518fbd7..416529d 100644
--- a/core/sql/exp/ExpLOBaccess.h
+++ b/core/sql/exp/ExpLOBaccess.h
@@ -489,7 +489,9 @@ class ExLob
   Ex_Lob_Error dataModCheck(
        char * dirPath, 
        Int64  modTS,
-       Lng32  numOfPartLevels);
+       Lng32  numOfPartLevels,
+       ExLobGlobals *lobGlobals);
+
   Ex_Lob_Error dataModCheck2(
        char * dirPath, 
        Int64  modTS,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/generator/GenFastTransport.cpp
----------------------------------------------------------------------
diff --git a/core/sql/generator/GenFastTransport.cpp b/core/sql/generator/GenFastTransport.cpp
index 953176b..7e1ee3e 100644
--- a/core/sql/generator/GenFastTransport.cpp
+++ b/core/sql/generator/GenFastTransport.cpp
@@ -562,6 +562,18 @@ PhysicalFastExtract::codeGen(Generator *generator)
     newRecordSep[1] = '\0';
   }
 
+  Int64 modTS = -1;
+  if ((CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON) &&
+      (isHiveInsert()) &&
+      (getHiveTableDesc() && getHiveTableDesc()->getNATable() && 
+       getHiveTableDesc()->getNATable()->getClusteringIndex()))
+    {
+      const HHDFSTableStats* hTabStats = 
+        getHiveTableDesc()->getNATable()->getClusteringIndex()->getHHDFSTableStats();
+
+      modTS = hTabStats->getModificationTS();
+    }
+
   targetName = AllocStringInSpace(*space, (char *)getTargetName().data());
   hdfsHostName = AllocStringInSpace(*space, (char *)getHdfsHostName().data());
   hiveTableName = AllocStringInSpace(*space, (char *)getHiveTableName().data());
@@ -570,27 +582,27 @@ PhysicalFastExtract::codeGen(Generator *generator)
   recordSeparator = AllocStringInSpace(*space, newRecordSep);
   nullString = AllocStringInSpace(*space, (char *)getNullString().data());
 
-   result = ft_codegen(generator,
-                       *this,              // RelExpr &relExpr
-                       newTdb,             // ComTdbUdr *&newTdb
-                       estimatedRowCount,
-                       targetName,
-                       hdfsHostName,
-                       hdfsPortNum,
-                       hiveTableName,
-                       delimiter,
-                       header,
-                       nullString,
-                       recordSeparator,
-                       downQueueMaxSize,
-                       upQueueMaxSize,
-                       outputBufferSize,
-                       requestBufferSize,
-                       replyBufferSize,
-                       numOutputBuffers,
-                       childTdb,
-                       isSequenceFile());
-
+  result = ft_codegen(generator,
+                      *this,              // RelExpr &relExpr
+                      newTdb,             // ComTdbUdr *&newTdb
+                      estimatedRowCount,
+                      targetName,
+                      hdfsHostName,
+                      hdfsPortNum,
+                      hiveTableName,
+                      delimiter,
+                      header,
+                      nullString,
+                      recordSeparator,
+                      downQueueMaxSize,
+                      upQueueMaxSize,
+                      outputBufferSize,
+                      requestBufferSize,
+                      replyBufferSize,
+                      numOutputBuffers,
+                      childTdb,
+                      isSequenceFile());
+  
   if (!generator->explainDisabled())
   {
     generator->setExplainTuple(addExplainInfo(newTdb, firstExplainTuple, 0, generator));
@@ -626,8 +638,10 @@ PhysicalFastExtract::codeGen(Generator *generator)
     else
     GenAssert(0, "Unexpected Fast Extract compression type")
   }
-     if((ActiveSchemaDB()->getDefaults()).getToken(FAST_EXTRACT_DIAGS) == DF_ON)
-    	 newTdb->setPrintDiags(1);
+  if((ActiveSchemaDB()->getDefaults()).getToken(FAST_EXTRACT_DIAGS) == DF_ON)
+    newTdb->setPrintDiags(1);
+
+  newTdb->setModTSforDir(modTS);
 
   return result;
 }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/optimizer/HDFSHook.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.cpp b/core/sql/optimizer/HDFSHook.cpp
index 4d3f9b6..fda6611 100644
--- a/core/sql/optimizer/HDFSHook.cpp
+++ b/core/sql/optimizer/HDFSHook.cpp
@@ -1070,7 +1070,8 @@ void HHDFSTableStats::print(FILE *ofd)
   fprintf(ofd,"====================================================================\n");
 }
 
-extern __thread hdfsFS *globalFS;
+//extern __thread hdfsFS *globalFS;
+hdfsFS *globalFS;
 
 NABoolean HHDFSTableStats::connectHDFS(const NAString &host, Int32 port)
 {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/EXPECTED003
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED003 b/core/sql/regress/hive/EXPECTED003
index 46aede2..79cdb5a 100644
--- a/core/sql/regress/hive/EXPECTED003
+++ b/core/sql/regress/hive/EXPECTED003
@@ -5,9 +5,6 @@
 >>cqd attempt_esp_parallelism 'off';
 
 --- SQL operation complete.
->>cqd AUTO_QUERY_RETRY 'OFF';
-
---- SQL operation complete.
 >>cqd hive_max_esps  '1';
 
 --- SQL operation complete.
@@ -152,9 +149,6 @@ P_PROMO_SK   P_PROMO_ID                 P_START_DATE_SK  P_END_DATE_SK  P_ITEM_S
 >>
 >>--try new HIVE SYNTAX
 >>--------------
->>cqd query_cache '0';
-
---- SQL operation complete.
 >>insert into TABLE hive.ins_customer_address select * from hive.customer_address;
 
 --- 50000 row(s) inserted.
@@ -529,9 +523,6 @@ T_TIME_SK    T_TIME_ID                  T_TIME       T_HOUR       T_MINUTE     T
 >>cqd HIVE_MAX_STRING_LENGTH '25' ;
 
 --- SQL operation complete.
->>cqd query_cache '0';
-
---- SQL operation complete.
 >>control query shape esp_exchange(cut);
 
 --- SQL operation complete.

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/EXPECTED005
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED005 b/core/sql/regress/hive/EXPECTED005
index 3c286d6..8e26184 100644
--- a/core/sql/regress/hive/EXPECTED005
+++ b/core/sql/regress/hive/EXPECTED005
@@ -226,9 +226,9 @@ xyz
 C_PREFERRED_CUST_FLAG      (EXPR)              
 -------------------------  --------------------
 
-                                          19631
+                                           1384
+N                                         19631
 Y                                         18984
-?                                          1384
 
 --- 3 row(s) selected.
 >>execute s3;
@@ -316,9 +316,9 @@ A            B
 C_PREFERRED_CUST_FLAG      (EXPR)              
 -------------------------  --------------------
 
-                                          19631
+                                           1384
+N                                         19631
 Y                                         18984
-?                                          1384
 
 --- 3 row(s) selected.
 >>execute s4;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/EXPECTED006
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED006 b/core/sql/regress/hive/EXPECTED006
index 645cca9..87f630e 100644
--- a/core/sql/regress/hive/EXPECTED006
+++ b/core/sql/regress/hive/EXPECTED006
@@ -4,9 +4,6 @@
 --- SQL operation complete.
 >>set terminal_charset utf8;
 >>
->>cqd AUTO_QUERY_RETRY 'OFF';
-
---- SQL operation complete.
 >>cqd HIVE_MAX_STRING_LENGTH '25' ;
 
 --- SQL operation complete.

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/EXPECTED018
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED018 b/core/sql/regress/hive/EXPECTED018
index 01f4e0b..2c34c31 100644
--- a/core/sql/regress/hive/EXPECTED018
+++ b/core/sql/regress/hive/EXPECTED018
@@ -150,9 +150,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRE
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_ADDRESS
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRESS
        Rows Processed: 50000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:09.173
+Task:  PREPARATION     Status: Ended      ET: 00:00:06.864
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRESS
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.304
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.262
 
 --- 50000 row(s) loaded.
 >>--
@@ -181,9 +181,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
        Rows Processed: 20000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:11.448
+Task:  PREPARATION     Status: Ended      ET: 00:00:10.758
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.275
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.234
 
 --- 20000 row(s) loaded.
 >>--
@@ -213,9 +213,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
        Rows Processed: 20000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:07.319
+Task:  PREPARATION     Status: Ended      ET: 00:00:09.668
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.198
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.208
 
 --- 20000 row(s) loaded.
 >>--                                                                              
@@ -235,9 +235,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_SALT
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_SALT
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_SALT
        Rows Processed: 100000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:08.340
+Task:  PREPARATION     Status: Ended      ET: 00:00:07.549
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_SALT
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.212
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.167
 
 --- 100000 row(s) loaded.
 >>--
@@ -266,9 +266,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.STORE_SALES_SA
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.STORE_SALES_SALT
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.STORE_SALES_SALT
        Rows Processed: 160756 
-Task:  PREPARATION     Status: Ended      ET: 00:00:10.675
+Task:  PREPARATION     Status: Ended      ET: 00:00:11.382
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.STORE_SALES_SALT
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.256
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.180
 
 --- 160756 row(s) loaded.
 >>--
@@ -288,6 +288,10 @@ Task:  COMPLETION      Status: Ended      ET: 00:00:00.256
 >>-- using insert
 >>insert overwrite table hive.hive.null_format_default select * from null_format_src;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 --- 10 row(s) inserted.
 >>select * from hive.hive.null_format_default;
 
@@ -309,6 +313,10 @@ a
 >>
 >>insert overwrite table hive.hive.null_format_empty select * from null_format_src;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 --- 10 row(s) inserted.
 >>select * from hive.hive.null_format_empty;
 
@@ -330,6 +338,10 @@ a                                                             ?
 >>
 >>insert overwrite table hive.hive.null_format_colon select * from null_format_src;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 --- 10 row(s) inserted.
 >>select * from hive.hive.null_format_colon;
 
@@ -356,10 +368,10 @@ a
 +>   select * from null_format_src;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 10 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.238
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.232
 
 --- 10 row(s) unloaded.
 >>select * from hive.hive.null_format_default;
@@ -385,10 +397,10 @@ a
 +>   select * from null_format_src;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 10 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.246
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.218
 
 --- 10 row(s) unloaded.
 >>select * from hive.hive.null_format_empty;
@@ -414,10 +426,10 @@ a                                                             ?
 +>   select * from null_format_src;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.009
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
 Task:  EXTRACT         Status: Started
        Rows Processed: 10 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.222
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.213
 
 --- 10 row(s) unloaded.
 >>select * from hive.hive.null_format_colon;
@@ -471,12 +483,12 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.011
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.007
 Task:  EXTRACT         Status: Started
        Rows Processed: 50000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:02.786
+Task:  EXTRACT         Status: Ended      ET: 00:00:03.805
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.036
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.034
 
 --- 50000 row(s) unloaded.
 >>log;
@@ -509,12 +521,12 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.010
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.868
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.865
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.031
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.023
 
 --- 20000 row(s) unloaded.
 >>log;
@@ -533,12 +545,12 @@ cat /tmp/merged_customer_demogs | wc -l
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.008
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.837
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.766
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.029
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.023
 
 --- 20000 row(s) unloaded.
 >>log;
@@ -570,12 +582,12 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.007
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.024
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.855
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.041
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.032
 
 --- 20000 row(s) unloaded.
 >>
@@ -593,12 +605,12 @@ regrhadoop.ksh fs -du -s /bulkload/customer_demographics_salt/merged_customer_de
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.722
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.692
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.048
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.031
 
 --- 20000 row(s) unloaded.
 >>
@@ -631,10 +643,10 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.013
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.591
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.484
 
 --- 20000 row(s) unloaded.
 >>
@@ -654,12 +666,12 @@ regrhadoop.ksh fs -ls /bulkload/customer_demographics_salt/file* |  grep file |
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.012
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.010
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.672
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.580
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.040
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.038
 
 --- 20000 row(s) unloaded.
 >>
@@ -792,12 +804,12 @@ CD_DEMO_SK   CD_GENDER
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.616
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.487
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.055
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.030
 
 --- 20000 row(s) unloaded.
 >>log;
@@ -835,9 +847,9 @@ Task:  EMPTY TARGET    Status: Started
 Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.562
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.492
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.046
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.033
 
 --- 20000 row(s) unloaded.
 >>
@@ -887,15 +899,19 @@ CD_DEMO_SK   CD_GENDER
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.480
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.980
 
 --- 20000 row(s) unloaded.
 >>
 >>select count(*) from hive.hive.unload_customer_demographics;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 (EXPR)              
 --------------------
 
@@ -937,10 +953,10 @@ CD_DEMO_SK   CD_GENDER
 +>select * from trafodion.hbase.customer_address ;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.007
 Task:  EXTRACT         Status: Started
        Rows Processed: 50000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.946
+Task:  EXTRACT         Status: Ended      ET: 00:00:02.111
 
 --- 50000 row(s) unloaded.
 >>
@@ -990,15 +1006,19 @@ CA_ADDRESS_SK  CA_ADDRESS_ID
 +>select * from trafodion.hbase.customer_address ;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.003
 Task:  EXTRACT         Status: Started
        Rows Processed: 50000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.891
+Task:  EXTRACT         Status: Ended      ET: 00:00:02.075
 
 --- 50000 row(s) unloaded.
 >>
 >>select count(*) from hive.hive.unload_customer_address;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 (EXPR)              
 --------------------
 
@@ -1052,10 +1072,10 @@ CA_ADDRESS_SK  CA_ADDRESS_ID
 +>select * from trafodion.hbase.customer_salt;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 100000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:06.124
+Task:  EXTRACT         Status: Ended      ET: 00:00:06.402
 
 --- 100000 row(s) unloaded.
 >>select count(*) from hive.hive.unload_customer;
@@ -1105,10 +1125,10 @@ C_CUSTOMER_SK  C_CUSTOMER_ID
 +>select * from trafodion.hbase.customer_demographics_salt;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.144
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.008
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.930
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.985
 
 --- 20000 row(s) unloaded.
 >>
@@ -1158,12 +1178,12 @@ CD_DEMO_SK   CD_GENDER
 +>select * from trafodion.hbase.customer_address where ca_address_sk < 100;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
 Task:  EXTRACT         Status: Started
        Rows Processed: 99 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.207
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.203
 Task:  MERGE FILES     Status: Started
-Task:  MERGE FILES     Status: Ended      ET: 00:00:00.029
+Task:  MERGE FILES     Status: Ended      ET: 00:00:00.021
 
 --- 99 row(s) unloaded.
 >>
@@ -1199,10 +1219,10 @@ regrhadoop.ksh fs -rm /user/hive/exttables/unload_customer_demographics/*
 +>select ss_sold_date_sk,ss_store_sk, sum (ss_quantity) from store_sales_salt group by  ss_sold_date_sk ,ss_store_sk;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
 Task:  EXTRACT         Status: Started
        Rows Processed: 12349 
-Task:  EXTRACT         Status: Ended      ET: 00:00:06.278
+Task:  EXTRACT         Status: Ended      ET: 00:00:06.351
 
 --- 12349 row(s) unloaded.
 >>
@@ -1321,10 +1341,10 @@ SS_SOLD_DATE_SK  SS_STORE_SK  SS_QUANTITY
 +>select * from trafodion.hbase.customer_salt c join trafodion.hbase.customer_address ca on c.c_current_addr_sk = ca.ca_address_sk ;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  EXTRACT         Status: Started
        Rows Processed: 100000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:07.279
+Task:  EXTRACT         Status: Ended      ET: 00:00:07.648
 
 --- 100000 row(s) unloaded.
 >>
@@ -1372,10 +1392,10 @@ C_CUSTOMER_SK  C_CUSTOMER_ID
 +>select * from customer_address where ca_address_sk < 1000 union select * from customer_address where ca_address_sk > 40000  and ca_address_sk < 41000;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.141
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.008
 Task:  EXTRACT         Status: Started
        Rows Processed: 1998 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.957
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.490
 
 --- 1998 row(s) unloaded.
 >>
@@ -1486,7 +1506,7 @@ ESP_EXCHANGE ==============================  SEQ_NO 3        ONLY CHILD 2
   use_snapshot_scan ...... TRUE
   full_table_name ........ TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
   snapshot_name .......... TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT_SNAP111
-  snapshot_temp_location   /bulkload/20160526204449/
+  snapshot_temp_location   /bulkload/20160530055515/
 grep -i -e 'explain reg' -e snapshot -e full_table_name  -e esp_exchange  LOG018_REGULAR_SCAN_PLAN.TXT | grep -v snapshot_scan_run_id
 >>--no snapshot
 >>explain reg;
@@ -1566,7 +1586,7 @@ grep -i -e 'explain snp' -e snapshot -e full_table_name -e esp_exchange LOG018_S
   use_snapshot_scan ...... TRUE
   full_table_name ........ TRAFODION.HBASE.CUSTOMER_ADDRESS
   snapshot_name .......... TRAFODION.HBASE.CUSTOMER_ADDRESS_SNAP111
-  snapshot_temp_location   /bulkload/20160526204456/
+  snapshot_temp_location   /bulkload/20160530055522/
 grep -i -e 'explain reg' -e snapshot -e full_table_name  -e esp_exchange  LOG018_REGULAR_SCAN_PLAN.TXT | grep -v snapshot_scan_run_id
 >>--no snapshot
 >>explain reg;
@@ -1648,11 +1668,11 @@ grep -i -e 'explain snp' -e snapshot -e full_table_name -e esp_exchange LOG018_S
   use_snapshot_scan ...... TRUE
   full_table_name ........ TRAFODION.HBASE.CUSTOMER_SALT
   snapshot_name .......... TRAFODION.HBASE.CUSTOMER_SALT_SNAP111
-  snapshot_temp_location   /bulkload/20160526204518/
+  snapshot_temp_location   /bulkload/20160530055545/
   use_snapshot_scan ...... TRUE
   full_table_name ........ TRAFODION.HBASE.CUSTOMER_ADDRESS
   snapshot_name .......... TRAFODION.HBASE.CUSTOMER_ADDRESS_SNAP111
-  snapshot_temp_location   /bulkload/20160526204518/
+  snapshot_temp_location   /bulkload/20160530055545/
 grep -i -e 'explain reg' -e snapshot -e full_table_name  -e esp_exchange  LOG018_REGULAR_SCAN_PLAN.TXT | grep -v snapshot_scan_run_id
 >>--no snapshot
 >>explain reg;
@@ -1765,13 +1785,13 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +><<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.006
 Task:  VERIFY SNAPSHO  Status: Started
        Snapshots verified: 1 
-Task:  VERIFY SNAPSHO  Status: Ended      ET: 00:00:00.288
+Task:  VERIFY SNAPSHO  Status: Ended      ET: 00:00:00.294
 Task:  EXTRACT         Status: Started
        Rows Processed: 50000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:02.176
+Task:  EXTRACT         Status: Ended      ET: 00:00:02.144
 
 --- 50000 row(s) unloaded.
 >>
@@ -1843,13 +1863,13 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +>select * from trafodion.hbase.customer_demographics_salt <<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.002
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.003
 Task:  VERIFY SNAPSHO  Status: Started
        Snapshots verified: 1 
-Task:  VERIFY SNAPSHO  Status: Ended      ET: 00:00:00.337
+Task:  VERIFY SNAPSHO  Status: Ended      ET: 00:00:00.311
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.975
+Task:  EXTRACT         Status: Ended      ET: 00:00:02.081
 
 --- 20000 row(s) unloaded.
 >>
@@ -1900,18 +1920,22 @@ Task:  EMPTY TARGET    Status: Started
 Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.010
 Task:  CREATE SNAPSHO  Status: Started
        Snapshots created: 1 
-Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:00.474
+Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:01.416
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.272
+Task:  EXTRACT         Status: Ended      ET: 00:00:01.191
 Task:  DELETE SNAPSHO  Status: Started
        Snapshots deleted: 1 
-Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.014
+Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.010
 
 --- 20000 row(s) unloaded.
 >>
 >>select count(*) from hive.hive.unload_customer_demographics;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 (EXPR)              
 --------------------
 
@@ -1954,16 +1978,16 @@ CD_DEMO_SK   CD_GENDER
 +>select * from trafodion.hbase.customer_demographics_salt <<+ cardinality 10e10 >>;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.020
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.009
 Task:  CREATE SNAPSHO  Status: Started
        Snapshots created: 1 
-Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:00.597
+Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:00.602
 Task:  EXTRACT         Status: Started
        Rows Processed: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.095
+Task:  EXTRACT         Status: Ended      ET: 00:00:01.481
 Task:  DELETE SNAPSHO  Status: Started
        Snapshots deleted: 1 
-Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.006
+Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.005
 
 --- 20000 row(s) unloaded.
 >>
@@ -1977,6 +2001,10 @@ Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.006
 --- 1 row(s) selected.
 >>select [first 20] * from hive.hive.unload_customer_demographics where cd_demo_sk < 100 order by cd_demo_sk;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 CD_DEMO_SK   CD_GENDER                                                                                             CD_MARITAL_STATUS                                                                                     CD_EDUCATION_STATUS                                                                                   CD_PURCHASE_ESTIMATE  CD_CREDIT_RATING                                                                                      CD_DEP_COUNT  CD_DEP_EMPLOYED_COUNT  CD_DEP_COLLEGE_COUNT
 -----------  ----------------------------------------------------------------------------------------------------  ----------------------------------------------------------------------------------------------------  ----------------------------------------------------------------------------------------------------  --------------------  ----------------------------------------------------------------------------------------------------  ------------  ---------------------  --------------------
 
@@ -2012,16 +2040,16 @@ CD_DEMO_SK   CD_GENDER
 +>select * from customer_address where ca_address_sk < 1000 union select * from customer_address where ca_address_sk > 40000  and ca_address_sk < 41000;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.004
 Task:  CREATE SNAPSHO  Status: Started
        Snapshots created: 1 
-Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:01.048
+Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:00.627
 Task:  EXTRACT         Status: Started
        Rows Processed: 1998 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.662
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.648
 Task:  DELETE SNAPSHO  Status: Started
        Snapshots deleted: 1 
-Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.003
+Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.004
 
 --- 1998 row(s) unloaded.
 >>
@@ -2096,13 +2124,13 @@ CA_ADDRESS_SK  CA_ADDRESS_ID
 +>select * from trafodion.hbase.customer_salt c join trafodion.hbase.customer_address ca on c.c_current_addr_sk = ca.ca_address_sk ;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.007
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.005
 Task:  CREATE SNAPSHO  Status: Started
        Snapshots created: 2 
-Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:02.782
+Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:02.377
 Task:  EXTRACT         Status: Started
        Rows Processed: 100000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:08.942
+Task:  EXTRACT         Status: Ended      ET: 00:00:08.169
 Task:  DELETE SNAPSHO  Status: Started
        Snapshots deleted: 2 
 Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.007
@@ -2173,16 +2201,16 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 +>select c_first_name,c_last_name from trafodion.hbase.customer_salt;
 Task: UNLOAD           Status: Started
 Task:  EMPTY TARGET    Status: Started
-Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.008
+Task:  EMPTY TARGET    Status: Ended      ET: 00:00:00.007
 Task:  CREATE SNAPSHO  Status: Started
        Snapshots created: 1 
-Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:00.365
+Task:  CREATE SNAPSHO  Status: Ended      ET: 00:00:00.420
 Task:  EXTRACT         Status: Started
        Rows Processed: 100000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.183
+Task:  EXTRACT         Status: Ended      ET: 00:00:01.144
 Task:  DELETE SNAPSHO  Status: Started
        Snapshots deleted: 1 
-Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.003
+Task:  DELETE SNAPSHO  Status: Ended      ET: 00:00:00.004
 
 --- 100000 row(s) unloaded.
 >>
@@ -2255,7 +2283,7 @@ unload with delimiter 0 into '/bulkload/test' select * from CUSTOMER_ADDRESS;
 Task: UNLOAD           Status: Started
 Task:  EXTRACT         Status: Started
        Rows Processed: 50000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:01.997
+Task:  EXTRACT         Status: Ended      ET: 00:00:01.974
 
 --- 50000 row(s) unloaded.
 >>--unload  24 -- should give an error
@@ -2320,7 +2348,7 @@ regrhadoop.ksh fs -rm /user/hive/exttables/unload_customer_demographics/*
 Task: UNLOAD           Status: Started
 Task:  EXTRACT         Status: Started
        Rows Processed but NOT Written to Disk: 20000 
-Task:  EXTRACT         Status: Ended      ET: 00:00:00.709
+Task:  EXTRACT         Status: Ended      ET: 00:00:00.690
 
 --- 20000 row(s) unloaded.
 >>select count(*) from hive.hive.unload_customer_demographics;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/TEST003
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST003 b/core/sql/regress/hive/TEST003
index ada9c87..7e3641b 100644
--- a/core/sql/regress/hive/TEST003
+++ b/core/sql/regress/hive/TEST003
@@ -67,7 +67,6 @@ log LOG003 clear;
 
 set schema hive.hive;
 cqd attempt_esp_parallelism 'off';
-cqd AUTO_QUERY_RETRY 'OFF';
 cqd hive_max_esps  '1';
 cqd PARALLEL_NUM_ESPS '1';
 cqd HIVE_MAX_STRING_LENGTH '25' ;
@@ -101,7 +100,6 @@ insert into hive.ins_time_dim values ('a', 2, 3, 4, 5, 6, 'c', 'd', 'e', 'f');
 
 --try new HIVE SYNTAX
 --------------
-cqd query_cache '0';
 insert into TABLE hive.ins_customer_address select * from hive.customer_address;
 
 select count(*) from hive.customer_address;
@@ -168,7 +166,6 @@ cqd PARALLEL_NUM_ESPS '2';
 set schema hive;
 cqd hive_max_esps '2';
 cqd HIVE_MAX_STRING_LENGTH '25' ;
-cqd query_cache '0';
 control query shape esp_exchange(cut);
 prepare s from insert into table ins_store_sales_summary select ss_sold_date_sk,ss_store_sk, sum (ss_quantity) from store_sales group by  ss_sold_date_sk ,ss_store_sk; 
 explain options 'f' s;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/TEST006
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST006 b/core/sql/regress/hive/TEST006
index 10e3d55..37a2c8b 100644
--- a/core/sql/regress/hive/TEST006
+++ b/core/sql/regress/hive/TEST006
@@ -33,7 +33,6 @@ log LOG006 clear;
 set schema hive.hive;
 set terminal_charset utf8;
 
-cqd AUTO_QUERY_RETRY 'OFF';
 cqd HIVE_MAX_STRING_LENGTH '25' ;
 cqd HIST_ROWCOUNT_REQUIRING_STATS '50000';
 cqd mode_seahive 'ON';

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/b1a8f024/core/sql/regress/hive/TEST018
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST018 b/core/sql/regress/hive/TEST018
index 034a2fb..86758e6 100644
--- a/core/sql/regress/hive/TEST018
+++ b/core/sql/regress/hive/TEST018
@@ -27,13 +27,11 @@ set schema trafodion.hbase;
 cqd comp_bool_226 'on';
 cqd TRAF_TABLE_SNAPSHOT_SCAN_TABLE_SIZE_THRESHOLD  '0';
 cqd hive_max_string_length '60';
-cqd query_cache '0';
 cqd HIST_ROWCOUNT_REQUIRING_STATS '50000';
+cqd AUTO_QUERY_RETRY_WARNINGS 'ON';
 
 obey TEST018(clean_up);
 
-
-
 log LOG018 clear;
 
 sh regrhive.ksh -v -f $REGRTSTDIR/TEST018_create_hive_tables.hive &> $REGRRUNDIR/LOG018_create_hive_tables.log ;


[3/8] incubator-trafodion git commit: Merge remote branch 'origin/master' into ansharma_hivets_br

Posted by an...@apache.org.
Merge remote branch 'origin/master' into ansharma_hivets_br


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/772b4a34
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/772b4a34
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/772b4a34

Branch: refs/heads/master
Commit: 772b4a3410821f8dda0ce2cad10cc81c31ff6f1d
Parents: cb6a75c 7862d94
Author: Anoop Sharma <an...@esgyn.com>
Authored: Fri May 27 14:01:09 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Fri May 27 14:01:09 2016 +0000

----------------------------------------------------------------------
 .gitattributes                                  |    6 -
 .gitignore                                      |    1 +
 .rat-excludes                                   |    2 +-
 core/Makefile                                   |    2 +-
 core/conn/Makefile                              |    5 +-
 core/conn/jdbcT4/Makefile                       |    1 +
 .../jdbcT4/src/main/java/T4Messages.properties  |    6 +-
 .../java/org/trafodion/jdbc/t4/Address.java     |    2 +-
 .../jdbc/t4/HPT4ConnectionPoolDataSource.java   |  346 ----
 .../t4/HPT4ConnectionPoolDataSourceFactory.java |   68 -
 .../org/trafodion/jdbc/t4/HPT4DataSource.java   |  366 ----
 .../jdbc/t4/HPT4DataSourceFactory.java          |   62 -
 .../java/org/trafodion/jdbc/t4/HPT4Desc.java    |  542 ------
 .../org/trafodion/jdbc/t4/HPT4Exception.java    |   38 -
 .../java/org/trafodion/jdbc/t4/HPT4Handle.java  |   84 -
 .../org/trafodion/jdbc/t4/HPT4Messages.java     |  324 ----
 .../jdbc/t4/HPT4ParameterMetaData.java          |  243 ---
 .../trafodion/jdbc/t4/HPT4PooledConnection.java |  238 ---
 .../jdbc/t4/HPT4PooledConnectionManager.java    |  385 ----
 .../jdbc/t4/HPT4ResultSetMetaData.java          |  309 ---
 .../jdbc/t4/InitializeDialogueReply.java        |    6 +-
 .../java/org/trafodion/jdbc/t4/InputOutput.java |   50 +-
 .../trafodion/jdbc/t4/InterfaceConnection.java  |   52 +-
 .../trafodion/jdbc/t4/InterfaceResultSet.java   |   34 +-
 .../trafodion/jdbc/t4/InterfaceStatement.java   |  148 +-
 .../java/org/trafodion/jdbc/t4/NCSAddress.java  |   22 +-
 .../jdbc/t4/PreparedStatementManager.java       |    2 +-
 .../java/org/trafodion/jdbc/t4/T4Address.java   |   24 +-
 .../org/trafodion/jdbc/t4/T4Connection.java     |   36 +-
 .../org/trafodion/jdbc/t4/T4DSProperties.java   |    6 +-
 .../trafodion/jdbc/t4/T4DatabaseMetaData.java   |  132 +-
 .../java/org/trafodion/jdbc/t4/T4Driver.java    |   14 +-
 .../org/trafodion/jdbc/t4/T4Properties.java     |   20 +-
 .../java/org/trafodion/jdbc/t4/T4ResultSet.java |   12 +-
 .../java/org/trafodion/jdbc/t4/T4Statement.java |   20 +-
 .../org/trafodion/jdbc/t4/T4_Dcs_Cancel.java    |    2 +-
 .../org/trafodion/jdbc/t4/T4_Dcs_Connect.java   |   10 +-
 .../jdbc/t4/TrafT4CallableStatement.java        |   58 +-
 .../org/trafodion/jdbc/t4/TrafT4Connection.java |  108 +-
 .../jdbc/t4/TrafT4ConnectionPoolDataSource.java |  346 ++++
 .../TrafT4ConnectionPoolDataSourceFactory.java  |   68 +
 .../org/trafodion/jdbc/t4/TrafT4DataSource.java |  366 ++++
 .../jdbc/t4/TrafT4DataSourceFactory.java        |   62 +
 .../java/org/trafodion/jdbc/t4/TrafT4Desc.java  |  542 ++++++
 .../org/trafodion/jdbc/t4/TrafT4Exception.java  |   38 +
 .../org/trafodion/jdbc/t4/TrafT4Handle.java     |   84 +
 .../org/trafodion/jdbc/t4/TrafT4Messages.java   |  324 ++++
 .../jdbc/t4/TrafT4ParameterMetaData.java        |  243 +++
 .../jdbc/t4/TrafT4PooledConnection.java         |  238 +++
 .../jdbc/t4/TrafT4PooledConnectionManager.java  |  385 ++++
 .../jdbc/t4/TrafT4PreparedStatement.java        |  118 +-
 .../org/trafodion/jdbc/t4/TrafT4ResultSet.java  |  296 +--
 .../jdbc/t4/TrafT4ResultSetMetaData.java        |  309 +++
 .../org/trafodion/jdbc/t4/TrafT4Statement.java  |   70 +-
 .../java/org/trafodion/jdbc/t4/Utility.java     |   24 +-
 .../jdbc/t4/odbc_Dcs_GetObjRefHdl_exc_.java     |   16 +-
 .../jdbc/t4/odbc_Dcs_StopSrvr_exc_.java         |    8 +-
 .../jdbc/t4/odbc_SQLSvc_Close_exc_.java         |    8 +-
 .../t4/odbc_SQLSvc_EndTransaction_exc_.java     |    8 +-
 .../jdbc/t4/odbc_SQLSvc_Execute_exc_.java       |   10 +-
 .../jdbc/t4/odbc_SQLSvc_Fetch_exc_.java         |   10 +-
 .../t4/odbc_SQLSvc_GetSQLCatalogs_exc_.java     |    6 +-
 .../t4/odbc_SQLSvc_InitializeDialogue_exc_.java |    6 +-
 .../jdbc/t4/odbc_SQLSvc_Prepare_exc_.java       |   10 +-
 .../odbc_SQLSvc_SetConnectionOption_exc_.java   |   12 +-
 .../t4/odbc_SQLSvc_TerminateDialogue_exc_.java  |    8 +-
 .../java/org/trafodion/jdbc/t4/RunAllTests.java |    2 +
 core/conn/odb/build.bat                         |   56 +
 core/conn/odb/odb/odb.vcxproj                   |    2 +
 core/rest/Makefile                              |   16 +-
 core/rest/pom.xml                               |   40 +-
 core/rest/src/assembly/all.xml                  |   11 +-
 .../org/trafodion/rest/util/JdbcT4Util.java     |    6 +-
 core/sqf/.gitignore                             |    4 +
 core/sqf/Makefile                               |   20 +-
 core/sqf/sql/scripts/install_traf_components    |   50 +-
 .../TransactionalScanner.java.tmpl              |    2 +-
 .../transactional/SplitBalanceHelper.java       |   51 +-
 .../transactional/TrxRegionObserver.java.tmpl   |    4 +-
 core/sql/bin/SqlciErrors.txt                    |    1 +
 core/sql/executor/ExExeUtilLoad.cpp             |    2 +-
 core/sql/executor/ExFastTransport.cpp           |    8 +-
 core/sql/generator/GenPreCode.cpp               |    1 -
 core/sql/regress/executor/EXPECTED013.SB        |   42 +-
 core/sql/regress/executor/TEST013               |    4 -
 core/sql/regress/hive/EXPECTED018               |  191 +-
 core/sql/regress/hive/TEST018                   |    6 +
 core/sql/regress/tools/runregr_executor.ksh     |    2 +-
 core/sql/sqlcomp/CmpSeabaseDDLschema.cpp        |    2 +-
 core/sql/sqlcomp/NADefaults.h                   |    6 +
 core/sql/sqlcomp/nadefaults.cpp                 |   43 +-
 .../java/org/trafodion/sql/HTableClient.java    |    5 +-
 core/sql/ustat/hs_const.h                       |    1 +
 core/sql/ustat/hs_parser.cpp                    |   13 +
 dcs/Makefile                                    |   17 +-
 dcs/pom.xml                                     |   38 +-
 dcs/src/assembly/all.xml                        |    8 +-
 .../java/org/trafodion/dcs/util/JdbcT4Util.java |   24 +-
 .../src/asciidoc/_chapters/jdbct4.adoc          |    9 +-
 .../src/asciidoc/_chapters/odbc_windows.adoc    |    6 +-
 .../src/asciidoc/_chapters/commands.adoc        |    4 +-
 .../src/asciidoc/_chapters/activate.adoc        |    2 +-
 .../src/asciidoc/_chapters/enable_security.adoc |   12 +-
 .../src/asciidoc/_chapters/introduction.adoc    |   26 +-
 .../src/asciidoc/_chapters/prepare.adoc         |   12 +-
 .../src/asciidoc/_chapters/requirements.adoc    |    6 +-
 .../src/asciidoc/_chapters/script_install.adoc  |   38 +-
 .../src/asciidoc/_chapters/script_remove.adoc   |    8 +-
 .../src/asciidoc/_chapters/script_upgrade.adoc  |   26 +-
 .../src/asciidoc/_chapters/sql_statements.adoc  |    9 +-
 docs/src/site/markdown/documentation.md         |    2 +
 docs/src/site/markdown/download.md              |    6 +-
 docs/src/site/markdown/index.md                 |   16 +-
 docs/src/site/resources/css/site.css            |    2 +-
 docs/src/site/site.xml                          |    3 +-
 install/installer/dcs_installer                 |   19 +-
 install/installer/rest_installer                |   21 +-
 .../installer/traf_apache_hadoop_config_setup   |  842 --------
 install/installer/traf_cloudera_mods            |    2 +-
 install/installer/traf_config                   |   62 +-
 install/installer/traf_config_check             |   44 +-
 install/installer/traf_config_setup             |   69 +-
 install/installer/traf_hortonworks_mods         |    4 +-
 install/installer/traf_package_setup            |    2 +
 .../installer/trafodion_apache_hadoop_install   |  763 --------
 install/installer/trafodion_config_default      |   14 +-
 install/installer/trafodion_install             |  148 +-
 install/traf_tools_setup.sh                     |    1 +
 licenses/Makefile                               |   15 +-
 licenses/lic-dcs-bin                            |  652 -------
 licenses/lic-dcs-src                            |   69 -
 licenses/lic-rest-bin                           | 1416 --------------
 licenses/lic-server-bin                         | 1834 ++++++++++++++++++
 licenses/lic-server-src                         |   69 +
 licenses/not-dcs-bin                            |   20 -
 licenses/not-rest-bin                           |   20 -
 licenses/note-server-bin                        |   20 +
 pom.xml                                         |    1 +
 138 files changed, 6259 insertions(+), 8114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/772b4a34/core/sql/bin/SqlciErrors.txt
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/772b4a34/core/sql/sqlcomp/nadefaults.cpp
----------------------------------------------------------------------


[8/8] incubator-trafodion git commit: Merge [TRAFODION-2006] PR-511 Handling of hive data and structure modification

Posted by an...@apache.org.
Merge [TRAFODION-2006] PR-511 Handling of hive data and structure modification


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/c39d3abf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/c39d3abf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/c39d3abf

Branch: refs/heads/master
Commit: c39d3abf60ecb3734beb8fa83962016c01156057
Parents: d199362 51a9c73
Author: Anoop Sharma <an...@edev06.novalocal>
Authored: Tue May 31 22:51:56 2016 +0000
Committer: Anoop Sharma <an...@edev06.novalocal>
Committed: Tue May 31 22:51:56 2016 +0000

----------------------------------------------------------------------
 core/sql/bin/SqlciErrors.txt            |   2 +
 core/sql/cli/SessionDefaults.cpp        |   6 +
 core/sql/comexe/ComTdbExeUtil.cpp       |   6 +-
 core/sql/comexe/ComTdbExeUtil.h         |  12 +-
 core/sql/comexe/ComTdbFastTransport.cpp |   3 +-
 core/sql/comexe/ComTdbFastTransport.h   |   5 +-
 core/sql/comexe/ComTdbHdfsScan.cpp      |  49 ++++++-
 core/sql/comexe/ComTdbHdfsScan.h        |  28 +++-
 core/sql/executor/ExExeUtil.h           |   9 +-
 core/sql/executor/ExExeUtilMisc.cpp     |  95 +++++++++---
 core/sql/executor/ExFastTransport.cpp   |  87 +++++++++--
 core/sql/executor/ExFastTransport.h     |   5 +
 core/sql/executor/ExHdfsScan.cpp        |  81 ++++++++++-
 core/sql/executor/ExHdfsScan.h          |  13 +-
 core/sql/exp/ExpErrorEnums.h            |   1 +
 core/sql/exp/ExpLOBaccess.cpp           | 158 +++++++++++++++++---
 core/sql/exp/ExpLOBaccess.h             |  81 ++++++-----
 core/sql/exp/ExpLOBenums.h              |   3 +
 core/sql/exp/ExpLOBinterface.cpp        |  46 +++++-
 core/sql/exp/ExpLOBinterface.h          |  10 ++
 core/sql/generator/GenFastTransport.cpp |  62 ++++----
 core/sql/generator/GenRelExeUtil.cpp    |   5 +-
 core/sql/generator/GenRelScan.cpp       |  57 +++++---
 core/sql/optimizer/BindRelExpr.cpp      |   3 +-
 core/sql/optimizer/HDFSHook.cpp         |   5 +
 core/sql/optimizer/HDFSHook.h           |   7 +
 core/sql/optimizer/NATable.cpp          |  13 +-
 core/sql/optimizer/RelExeUtil.h         |   9 +-
 core/sql/optimizer/RelFastTransport.cpp |   1 +
 core/sql/regress/executor/EXPECTED020   |   4 +
 core/sql/regress/hive/EXPECTED003       |  46 +++---
 core/sql/regress/hive/EXPECTED005       | 117 ++++++++++++---
 core/sql/regress/hive/EXPECTED006       |   3 -
 core/sql/regress/hive/EXPECTED015       |  59 ++++----
 core/sql/regress/hive/EXPECTED018       | 208 +++++++++++++++------------
 core/sql/regress/hive/TEST003           |  12 +-
 core/sql/regress/hive/TEST005           |  38 ++++-
 core/sql/regress/hive/TEST006           |   1 -
 core/sql/regress/hive/TEST015           |   3 +-
 core/sql/regress/hive/TEST018           |   4 +-
 core/sql/sqlcomp/DefaultConstants.h     |   4 +
 core/sql/sqlcomp/nadefaults.cpp         |   2 +
 42 files changed, 1014 insertions(+), 349 deletions(-)
----------------------------------------------------------------------



[7/8] incubator-trafodion git commit: hive data modification detection: commit #4

Posted by an...@apache.org.
hive data modification detection: commit #4


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/51a9c73e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/51a9c73e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/51a9c73e

Branch: refs/heads/master
Commit: 51a9c73ed37c3711178483c4a200f17a92d1c08d
Parents: b1a8f02
Author: Anoop Sharma <an...@esgyn.com>
Authored: Tue May 31 18:03:40 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Tue May 31 18:03:40 2016 +0000

----------------------------------------------------------------------
 core/sql/cli/SessionDefaults.cpp        |   2 +
 core/sql/comexe/ComTdbExeUtil.cpp       |   6 +-
 core/sql/comexe/ComTdbExeUtil.h         |  12 ++-
 core/sql/comexe/ComTdbHdfsScan.h        |   4 +-
 core/sql/executor/ExExeUtil.h           |   9 +-
 core/sql/executor/ExExeUtilMisc.cpp     |  95 ++++++++++++++-----
 core/sql/executor/ExFastTransport.cpp   |   2 +-
 core/sql/executor/ExHdfsScan.cpp        |   2 +-
 core/sql/exp/ExpErrorEnums.h            |   1 +
 core/sql/exp/ExpLOBaccess.cpp           | 133 ++++-----------------------
 core/sql/exp/ExpLOBaccess.h             |   5 +-
 core/sql/exp/ExpLOBinterface.h          |   5 +-
 core/sql/generator/GenFastTransport.cpp |   2 +-
 core/sql/generator/GenRelExeUtil.cpp    |   5 +-
 core/sql/optimizer/BindRelExpr.cpp      |   3 +-
 core/sql/optimizer/HDFSHook.cpp         |  50 ++++------
 core/sql/optimizer/NATable.cpp          |   3 +
 core/sql/optimizer/RelExeUtil.h         |   9 +-
 core/sql/optimizer/RelFastTransport.cpp |   1 +
 core/sql/regress/hive/EXPECTED003       |  37 ++++++--
 core/sql/regress/hive/EXPECTED005       |  36 +++++---
 core/sql/regress/hive/EXPECTED015       |  59 ++++++------
 core/sql/regress/hive/TEST003           |   9 +-
 core/sql/regress/hive/TEST005           |  11 +--
 core/sql/regress/hive/TEST015           |   3 +-
 25 files changed, 240 insertions(+), 264 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/cli/SessionDefaults.cpp
----------------------------------------------------------------------
diff --git a/core/sql/cli/SessionDefaults.cpp b/core/sql/cli/SessionDefaults.cpp
index 024754b..5e79138 100644
--- a/core/sql/cli/SessionDefaults.cpp
+++ b/core/sql/cli/SessionDefaults.cpp
@@ -782,6 +782,8 @@ static const AQRInfo::AQRErrorMap aqrErrorMap[] =
   // parallel purgedata failed
   AQREntry(   8022,      0,      3,    60,      0,   0, "",    0,     1),
 
+  // hive data modification timestamp mismatch.
+  // query will be AQR'd and hive metadata will be reloaded.
   AQREntry(   8436,      0,      1,     0,      0,   2, "04:05",  0,     0),
 
   // FS memory errors

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/comexe/ComTdbExeUtil.cpp
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbExeUtil.cpp b/core/sql/comexe/ComTdbExeUtil.cpp
index 3442714..e3dd630 100644
--- a/core/sql/comexe/ComTdbExeUtil.cpp
+++ b/core/sql/comexe/ComTdbExeUtil.cpp
@@ -1189,7 +1189,8 @@ ComTdbExeUtilFastDelete::ComTdbExeUtilFastDelete(
      NABoolean isHiveTruncate,
      char * hiveTableLocation,
      char * hiveHostName,
-     Lng32 hivePortNum)
+     Lng32 hivePortNum,
+     Int64 hiveModTS)
      : ComTdbExeUtil(ComTdbExeUtil::FAST_DELETE_,
 		     NULL, 0, (Int16)SQLCHARSETCODE_UNKNOWN,
 		     tableName, tableNameLen,
@@ -1211,7 +1212,8 @@ ComTdbExeUtilFastDelete::ComTdbExeUtilFastDelete(
        lobNumArray_(lobNumArray),
        hiveTableLocation_(hiveTableLocation),
        hiveHdfsHost_(hiveHostName),
-       hiveHdfsPort_(hivePortNum)
+       hiveHdfsPort_(hivePortNum),
+       hiveModTS_(hiveModTS)
 {
   setIsHiveTruncate(isHiveTruncate);
   setNodeType(ComTdb::ex_FAST_DELETE);

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/comexe/ComTdbExeUtil.h
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbExeUtil.h b/core/sql/comexe/ComTdbExeUtil.h
index ea3ef69..eb70eac 100644
--- a/core/sql/comexe/ComTdbExeUtil.h
+++ b/core/sql/comexe/ComTdbExeUtil.h
@@ -1518,7 +1518,8 @@ public:
 			  NABoolean ishiveTruncate = FALSE,
 			  char * hiveTableLocation = NULL,
                           char * hiveHostName = NULL,
-                          Lng32 hivePortNum = 0
+                          Lng32 hivePortNum = 0,
+                          Int64 hiveModTS = -1
 			  );
 
   Long pack(void *);
@@ -1569,6 +1570,11 @@ public:
     return hiveHdfsPort_;
   }
 
+  Lng32 getHiveModTS() const
+  {
+    return hiveModTS_;
+  }
+
   // ---------------------------------------------------------------------
   // Used by the internal SHOWPLAN command to get attributes of a TDB.
   // ---------------------------------------------------------------------
@@ -1645,7 +1651,9 @@ private:
   NABasicPtr  hiveTableLocation_;                    // 56-63
   NABasicPtr hiveHdfsHost_;                          // 64-71
   Int32 hiveHdfsPort_;                               // 72-75
-  char fillersComTdbExeUtilFastDelete_[52];          // 76-127
+  char fillers1_[4];                                 // 76-79
+  Int64 hiveModTS_;                                  // 80-87
+  char fillersComTdbExeUtilFastDelete_[40];          // 88-127
 };
 
 class ComTdbExeUtilGetStatistics : public ComTdbExeUtil

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/comexe/ComTdbHdfsScan.h
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbHdfsScan.h b/core/sql/comexe/ComTdbHdfsScan.h
index ac83311..392f7cf 100755
--- a/core/sql/comexe/ComTdbHdfsScan.h
+++ b/core/sql/comexe/ComTdbHdfsScan.h
@@ -135,7 +135,7 @@ class ComTdbHdfsScan : public ComTdb
   char fillersComTdbHdfsScan1_[2];                            // 190 - 191
   NABasicPtr nullFormat_;                                     // 192 - 199
 
-  // next 3 params used to check if data under hdfsFileDir
+  // next 4 params are used to check if data under hdfsFileDir
   // was modified after query was compiled.
   NABasicPtr hdfsRootDir_;                                     // 200 - 207
   Int64  modTSforDir_;                                         // 208 - 215
@@ -196,7 +196,7 @@ public:
                  char * loggingLocation = NULL,
                  char * errCountId = NULL,
 
-                 // next 3 params used to check if data under hdfsFileDir
+                 // next 4 params are used to check if data under hdfsFileDir
                  // was modified after query was compiled.
                  char * hdfsRootDir  = NULL,
                  Int64  modTSforDir   = -1,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/executor/ExExeUtil.h
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExExeUtil.h b/core/sql/executor/ExExeUtil.h
index 6ed29c1..062362b 100755
--- a/core/sql/executor/ExExeUtil.h
+++ b/core/sql/executor/ExExeUtil.h
@@ -3299,6 +3299,7 @@ class ExExeUtilHiveTruncateTcb : public ExExeUtilTcb
     {
       INITIAL_,
       ERROR_,
+      DATA_MOD_CHECK_,
       EMPTY_DIRECTORY_,
       DONE_
     };
@@ -3306,18 +3307,10 @@ class ExExeUtilHiveTruncateTcb : public ExExeUtilTcb
   ExExeUtilFastDeleteTdb & fdTdb() const
     {return (ExExeUtilFastDeleteTdb &) tdb;};
 
-
-//  short doHiveTruncate(char * objectName,
-//                     NABoolean isIndex,
-//                     NABoolean fastDelUsingResetEOF);
-
   short injectError(const char * val);
 
   Step step_;
 
-  char  hdfsHost_[500];
-  int  hdfsPort_;
-  char  hiveTableLocation_[513];
   int   numExistingFiles_;
   void * lobGlob_;
 };

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/executor/ExExeUtilMisc.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExExeUtilMisc.cpp b/core/sql/executor/ExExeUtilMisc.cpp
index 1aea6c1..267d22e 100644
--- a/core/sql/executor/ExExeUtilMisc.cpp
+++ b/core/sql/executor/ExExeUtilMisc.cpp
@@ -2240,13 +2240,7 @@ ExExeUtilHiveTruncateTcb::ExExeUtilHiveTruncateTcb(
   qparent_.down->allocatePstate(this);
 
   numExistingFiles_ = 0;
-  memset (hdfsHost_, '\0', sizeof(hdfsHost_));
-  memset (hiveTableLocation_, '\0', sizeof(hiveTableLocation_));
 
-  strncpy(hdfsHost_, fdTdb().getHiveHdfsHost(), sizeof(hdfsHost_));
-  hdfsPort_ = fdTdb().getHiveHdfsPort();
-  char * outputPath =  fdTdb().getHiveTableLocation();
-  strncpy(hiveTableLocation_, outputPath, 512);
   step_ = INITIAL_;
 }
 
@@ -2290,26 +2284,84 @@ short ExExeUtilHiveTruncateTcb::work()
       case INITIAL_:
       {
 
-        //nothing for now --
-        // more stuff later
+        if (fdTdb().getHiveModTS() > 0)
+          step_ = DATA_MOD_CHECK_;
+        else
+          step_ = EMPTY_DIRECTORY_;
+      }
+      break;
+
+      case DATA_MOD_CHECK_:
+      {
+        cliRC = ExpLOBinterfaceDataModCheck
+          (lobGlob_,
+           fdTdb().getHiveTableLocation(),
+           fdTdb().getHiveHdfsHost(),
+           fdTdb().getHiveHdfsPort(),
+           fdTdb().getHiveModTS(),
+           0);
+
+        if (cliRC < 0)
+        {
+          Lng32 cliError = 0;
+          
+          Lng32 intParam1 = -cliRC;
+          ComDiagsArea * diagsArea = NULL;
+          ExRaiseSqlError(getHeap(), &diagsArea, 
+                          (ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE),
+                          NULL, &intParam1, 
+                          &cliError, 
+                          NULL, 
+                          "HDFS",
+                          (char*)"ExpLOBInterfaceEmptyDirectory",
+                          getLobErrStr(intParam1));
+          pentry_down->setDiagsArea(diagsArea);
+          step_ = ERROR_;
+          break;
+        }
+
+        if (cliRC == 1) // data mod check failed
+        {
+          ComDiagsArea * diagsArea = NULL;
+          ExRaiseSqlError(getHeap(), &diagsArea, 
+                          (ExeErrorCode)(EXE_HIVE_DATA_MOD_CHECK_ERROR));
+          pentry_down->setDiagsArea(diagsArea);
+          
+          step_ = ERROR_;
+          break;
+        }
+   
         step_ = EMPTY_DIRECTORY_;
       }
-        break;
+      break;
 
       case EMPTY_DIRECTORY_:
       {
-        Lng32 retCode= ExpLOBinterfaceEmptyDirectory(
-                                    lobGlob_,
-                                    (char*)"",                  //name is empty
-                                    hiveTableLocation_,
-                                    Lob_HDFS_File,
-                                    hdfsHost_,
-                                    hdfsPort_,
-                                    0 ,
-                                    1 ,
-                                    0);
-        if (retCode != 0)
+        cliRC = ExpLOBinterfaceEmptyDirectory(
+             lobGlob_,
+             (char*)"",                  //name is empty
+             fdTdb().getHiveTableLocation(),
+             Lob_HDFS_File,
+             fdTdb().getHiveHdfsHost(),
+             fdTdb().getHiveHdfsPort(),
+             0 ,
+             1 ,
+             0);
+        if (cliRC != 0)
         {
+          Lng32 cliError = 0;
+          
+          Lng32 intParam1 = -cliRC;
+          ComDiagsArea * diagsArea = NULL;
+          ExRaiseSqlError(getHeap(), &diagsArea, 
+                          (ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE),
+                          NULL, &intParam1, 
+                          &cliError, 
+                          NULL, 
+                          "HDFS",
+                          (char*)"ExpLOBInterfaceEmptyDirectory",
+                          getLobErrStr(intParam1));
+          pentry_down->setDiagsArea(diagsArea);
           step_ = ERROR_;
         }
         else
@@ -2318,6 +2370,7 @@ short ExExeUtilHiveTruncateTcb::work()
         }
       }
       break;
+
       case ERROR_:
       {
         if (qparent_.up->isFull())
@@ -2350,7 +2403,7 @@ short ExExeUtilHiveTruncateTcb::work()
 
         step_ = DONE_;
       }
-        break;
+      break;
 
       case DONE_:
       {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/executor/ExFastTransport.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExFastTransport.cpp b/core/sql/executor/ExFastTransport.cpp
index 45c3959..bb8d4dc 100644
--- a/core/sql/executor/ExFastTransport.cpp
+++ b/core/sql/executor/ExFastTransport.cpp
@@ -738,7 +738,7 @@ ExWorkProcRetcode ExHdfsFastExtractTcb::work()
       {
         ComDiagsArea * diagsArea = NULL;
         ExRaiseSqlError(getHeap(), &diagsArea, 
-                        (ExeErrorCode)(8436));
+                        (ExeErrorCode)(EXE_HIVE_DATA_MOD_CHECK_ERROR));
         pentry_down->setDiagsArea(diagsArea);
         pstate.step_ = EXTRACT_ERROR;
         break;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/executor/ExHdfsScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExHdfsScan.cpp b/core/sql/executor/ExHdfsScan.cpp
index b204fb2..82e01b3 100644
--- a/core/sql/executor/ExHdfsScan.cpp
+++ b/core/sql/executor/ExHdfsScan.cpp
@@ -468,7 +468,7 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
                   {
                     ComDiagsArea * diagsArea = NULL;
                     ExRaiseSqlError(getHeap(), &diagsArea, 
-                                    (ExeErrorCode)(8436));
+                                    (ExeErrorCode)(EXE_HIVE_DATA_MOD_CHECK_ERROR));
                     pentry_down->setDiagsArea(diagsArea);
                     step_ = HANDLE_ERROR_AND_DONE;
                     break;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/exp/ExpErrorEnums.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpErrorEnums.h b/core/sql/exp/ExpErrorEnums.h
index de4d00f..987fe7e 100644
--- a/core/sql/exp/ExpErrorEnums.h
+++ b/core/sql/exp/ExpErrorEnums.h
@@ -153,6 +153,7 @@ enum ExeErrorCode
   EXE_IS_BITWISE_AND_ERROR		= 8431,
   EXE_UNSIGNED_OVERFLOW                 = 8432,
   EXE_INVALID_CHARACTER                 = 8433,
+  EXE_HIVE_DATA_MOD_CHECK_ERROR         = 8436,
   EXE_HISTORY_BUFFER_TOO_SMALL		= 8440,
   EXE_OLAP_OVERFLOW_NOT_SUPPORTED       = 8441,
   EXE_ERROR_FROM_LOB_INTERFACE          = 8442,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/exp/ExpLOBaccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index 7db4a40..ad127ea 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -110,104 +110,6 @@ ExLob::~ExLob()
    
 }
 
-#ifdef __ignore
-Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode, 
-                               char *dir, 
-			       LobsStorage storage,
-                               char *hdfsServer, Int64 hdfsPort,
-                               char *lobLocation,
-                               int bufferSize , short replication ,
-                               int blockSize, Int64 lobMaxSize, 
-                               ExLobGlobals *lobGlobals)
-{
-  int openFlags;
-  mode_t filePerms = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
-  struct timespec startTime;
-  struct timespec endTime;
-  Int64 secs, nsecs, totalnsecs;
- 
-  if (dir) 
-    {
-      if (dir_.empty()) 
-	{
-	  dir_ = string(dir);
-	}
-
-      if (lobFile)
-        snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s/%s", dir_.c_str(), lobFile);
-      
-    } 
-  else if (lobFile)
-    { 
-      snprintf(lobDataFile_, MAX_LOB_FILE_NAME_LEN, "%s", lobFile);
-      
-    }
-
-  hdfsServer_ = hdfsServer;
-  hdfsPort_ = hdfsPort;
-
-  if (fs_ == NULL)
-    {
-      fs_ = hdfsConnect(hdfsServer_, hdfsPort_);
-      if (fs_ == NULL)
-        return LOB_HDFS_CONNECT_ERROR;
-    }
-
-  if (lobGlobals)
-    lobGlobals->setHdfsFs(fs_);
-  
-  if (storage_ != Lob_Invalid_Storage)
-    {
-      return LOB_INIT_ERROR;
-    } 
-  else 
-    {
-      storage_ = storage;
-    }
-
-  stats_.init(); 
-
-  if (lobLocation)
-    lobLocation_ = lobLocation;
-  clock_gettime(CLOCK_MONOTONIC, &startTime);
-
-  clock_gettime(CLOCK_MONOTONIC, &endTime);
-
-  secs = endTime.tv_sec - startTime.tv_sec;
-  nsecs = endTime.tv_nsec - startTime.tv_nsec;
-  if (nsecs < 0) 
-    {
-      secs--;
-      nsecs += NUM_NSECS_IN_SEC;
-    }
-  totalnsecs = (secs * NUM_NSECS_IN_SEC) + nsecs;
-  stats_.hdfsConnectionTime += totalnsecs;
-    
-  if (mode == EX_LOB_CREATE) 
-    { 
-      // check if file is already created
-      hdfsFileInfo *fInfo = hdfsGetPathInfo(fs_, lobDataFile_);
-      if (fInfo != NULL) 
-	{
-	  hdfsFreeFileInfo(fInfo, 1);
-	  return LOB_DATA_FILE_CREATE_ERROR;
-	} 
-      openFlags = O_WRONLY | O_CREAT;   
-      fdData_ = hdfsOpenFile(fs_, lobDataFile_, openFlags, bufferSize, replication, blockSize);
-      if (!fdData_) 
-	{
-          return LOB_DATA_FILE_CREATE_ERROR;
-	}
-      hdfsCloseFile(fs_, fdData_);
-      fdData_ = NULL;
-     
-    }
-  lobGlobalHeap_ = lobGlobals->getHeap();    
-  return LOB_OPER_OK;
-    
-}
-#endif
-
 Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode, 
                                char *dir, 
 			       LobsStorage storage,
@@ -573,34 +475,34 @@ Ex_Lob_Error ExLob::emptyDirectory()
     int numExistingFiles=0;
     hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, lobDataFile_);
     if (fileInfos == NULL)
-      {
-        return LOB_DATA_FILE_NOT_FOUND_ERROR; //here a directory
-      }
+    {
+      return LOB_DIR_NAME_ERROR;
+    }
 
     fileInfos = hdfsListDirectory(fs_, lobDataFile_, &numExistingFiles);
-    if (fileInfos == NULL)
-      {
-        return LOB_OPER_OK;
-      }
-    
-    for (int i = 0; i < numExistingFiles; i++) 
+    if (fileInfos == NULL) // empty directory
     {
-#ifdef USE_HADOOP_1
-      int retCode = hdfsDelete(fs_, fileInfos[i].mName);
-#else
-      int retCode = hdfsDelete(fs_, fileInfos[i].mName, 0);
-#endif
+      return LOB_OPER_OK;
+    }
+
+    NABoolean error = FALSE;
+    for (int i = 0; ((NOT error) && (i < numExistingFiles)); i++) 
+    {
+      // if dir, recursively delete it and everything under it
+      int retCode = hdfsDelete(fs_, fileInfos[i].mName, 1);
       if (retCode !=0)
       {
-        //ex_assert(retCode == 0, "delete returned error");
-        return LOB_DATA_FILE_DELETE_ERROR;
+        error = TRUE;
       }
     }
+
     if (fileInfos)
     {
       hdfsFreeFileInfo(fileInfos, numExistingFiles);
     }
-    
+
+    if (error)
+      return LOB_DATA_FILE_DELETE_ERROR;
 
     return LOB_OPER_OK;
 }
@@ -2274,6 +2176,7 @@ Ex_Lob_Error ExLobsOper (
   if (globPtr == NULL)
     {
       if ((operation == Lob_Init) ||
+          (operation == Lob_Empty_Directory) ||
           (operation == Lob_Data_Mod_Check))
 	{
 	  globPtr = (void *) new ExLobGlobals();

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/exp/ExpLOBaccess.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.h b/core/sql/exp/ExpLOBaccess.h
index 416529d..2cce09e 100644
--- a/core/sql/exp/ExpLOBaccess.h
+++ b/core/sql/exp/ExpLOBaccess.h
@@ -481,10 +481,7 @@ class ExLob
 
   // dirPath: path to needed directory (includes directory name)
   // modTS is the latest timestamp on any file/dir under dirPath.
-  // numFilesInDir is the total number of files under dirPath.
-  // This method validates that current modTS is not greater then input modTS
-  // and current number of files in dirPath are the same as input numFilesInDir.
-  // If either condition is not true, then check fails.
+  // This method validates that current modTS is not greater then input modTS.
   // Return: LOB_OPER_OK, if passes. LOB_DATA_MOD_CHECK_ERROR, if fails.
   Ex_Lob_Error dataModCheck(
        char * dirPath, 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/exp/ExpLOBinterface.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBinterface.h b/core/sql/exp/ExpLOBinterface.h
index 689e422..859417e 100644
--- a/core/sql/exp/ExpLOBinterface.h
+++ b/core/sql/exp/ExpLOBinterface.h
@@ -293,10 +293,7 @@ Lng32 ExpLOBinterfacePurgeBackupLobDataFile(void *& lobGlob,  char *hdfsServer,
 
 // dirPath: path to needed directory (includes directory name)
 // modTS is the latest timestamp on any file/dir under dirPath.
-// numFilesInDir is the total number of files under dirPath.
-// This method validates that current modTS is not greater then input modTS
-// and current number of files in dirPath are the same as input numFilesInDir.
-// If either condition is not true, then check fails.
+// This method validates that current modTS is not greater then input modTS.
 // Return: 1, if check fails. 0, if passes. -1, if error.
 Lng32 ExpLOBinterfaceDataModCheck(void * lobGlob,
                                   char * dirPath,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/generator/GenFastTransport.cpp
----------------------------------------------------------------------
diff --git a/core/sql/generator/GenFastTransport.cpp b/core/sql/generator/GenFastTransport.cpp
index 7e1ee3e..6a4b4e7 100644
--- a/core/sql/generator/GenFastTransport.cpp
+++ b/core/sql/generator/GenFastTransport.cpp
@@ -624,7 +624,7 @@ PhysicalFastExtract::codeGen(Generator *generator)
   {
     newTdb->setIsHiveInsert(1);
     newTdb->setIncludeHeader(0);
-    setOverwriteHiveTable( getOverwriteHiveTable());
+    newTdb->setOverwriteHiveTable( getOverwriteHiveTable());
   }
   else
   {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/generator/GenRelExeUtil.cpp
----------------------------------------------------------------------
diff --git a/core/sql/generator/GenRelExeUtil.cpp b/core/sql/generator/GenRelExeUtil.cpp
index 55eba37..7629657 100644
--- a/core/sql/generator/GenRelExeUtil.cpp
+++ b/core/sql/generator/GenRelExeUtil.cpp
@@ -3237,12 +3237,11 @@ short ExeUtilFastDelete::codeGen(Generator * generator)
 			    (ex_cri_desc *)(generator->getCriDesc(Generator::DOWN)),
 			    (queue_index)getDefault(GEN_DDL_SIZE_DOWN),
 			    (queue_index)getDefault(GEN_DDL_SIZE_UP),
-#pragma nowarn(1506)   // warning elimination 
 			    getDefault(GEN_DDL_NUM_BUFFERS),
 			    getDefault(GEN_DDL_BUFFER_SIZE),
 			    isHiveTable(),
-			    hiveTableLocation, hiveHdfsHost, hiveHdfsPort);
-#pragma warn(1506)  // warning elimination 
+			    hiveTableLocation, hiveHdfsHost, hiveHdfsPort,
+                            hiveModTS_);
 
   if (doPurgedataCat_)
     exe_util_tdb->setDoPurgedataCat(TRUE);

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/optimizer/BindRelExpr.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/BindRelExpr.cpp b/core/sql/optimizer/BindRelExpr.cpp
index b81fdcf..9c3f0c2 100644
--- a/core/sql/optimizer/BindRelExpr.cpp
+++ b/core/sql/optimizer/BindRelExpr.cpp
@@ -9195,7 +9195,8 @@ RelExpr *Insert::bindNode(BindWA *bindWA)
                           TRUE,
                           new (bindWA->wHeap()) NAString(tableDir),
                           new (bindWA->wHeap()) NAString(hostName),
-                          hdfsPort);
+                          hdfsPort,
+                          hTabStats->getModificationTS());
 
       //new root to prevent  error 4056 when binding
       newRelExpr = new (bindWA->wHeap()) RelRoot(newRelExpr);

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/optimizer/HDFSHook.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.cpp b/core/sql/optimizer/HDFSHook.cpp
index fda6611..0d6fd34 100644
--- a/core/sql/optimizer/HDFSHook.cpp
+++ b/core/sql/optimizer/HDFSHook.cpp
@@ -1070,59 +1070,41 @@ void HHDFSTableStats::print(FILE *ofd)
   fprintf(ofd,"====================================================================\n");
 }
 
-//extern __thread hdfsFS *globalFS;
-hdfsFS *globalFS;
-
 NABoolean HHDFSTableStats::connectHDFS(const NAString &host, Int32 port)
 {
   NABoolean result = TRUE;
 
   // establish connection to HDFS if needed
-  if (globalFS == NULL ||
-      *globalFS == NULL ||
+  if (fs_ == NULL ||
       currHdfsHost_ != host ||
       currHdfsPort_ != port)
     {
-      if (globalFS && *globalFS)
-        disconnectHDFS();
-
-      if (globalFS == NULL)
+      if (fs_)
         {
-          globalFS = new hdfsFS;
-          *globalFS = NULL;
+          hdfsDisconnect(fs_);
+          fs_ = NULL;
         }
-
-      if (*globalFS == NULL)
+      fs_ = hdfsConnect(host, port);
+      
+      if (fs_ == NULL)
         {
-          *globalFS = hdfsConnect(host, port);
+          NAString errMsg("hdfsConnect to ");
+          errMsg += host;
+          errMsg += ":";
+          errMsg += port;
+          errMsg += " failed";
+          diags_.recordError(errMsg, "HHDFSTableStats::connectHDFS");
+          result = FALSE;
         }
-
       currHdfsHost_ = host;
       currHdfsPort_ = port;
     }
-
-  fs_ = *globalFS;
-  if (fs_ == NULL)
-    {
-      NAString errMsg("hdfsConnect to ");
-      errMsg += host;
-      errMsg += ":";
-      errMsg += port;
-      errMsg += " failed";
-      diags_.recordError(errMsg, "HHDFSTableStats::connectHDFS");
-      result = FALSE;
-    }
-  
   return result;
 }
 
 void HHDFSTableStats::disconnectHDFS()
 {
-  if (globalFS && *globalFS)
-    {
-      hdfsDisconnect(*globalFS);
-      *globalFS = NULL;
-    }
-
+  if (fs_)
+    hdfsDisconnect(fs_);
   fs_ = NULL;
 }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/optimizer/NATable.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/NATable.cpp b/core/sql/optimizer/NATable.cpp
index 81eb5b0..66d18b6 100644
--- a/core/sql/optimizer/NATable.cpp
+++ b/core/sql/optimizer/NATable.cpp
@@ -7308,6 +7308,9 @@ NATable * NATableDB::get(const ExtendedQualName* key, BindWA* bindWA, NABoolean
      }
   }
 
+  // the reload cqd will be set during aqr after compiletime and runtime
+  // timestamp mismatch is detected.
+  // If set, reload hive metadata.
   if ((cachedNATable->isHiveTable()) &&
       (CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON) &&
       (CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/optimizer/RelExeUtil.h
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/RelExeUtil.h b/core/sql/optimizer/RelExeUtil.h
index 5d0150b..5c782e9 100644
--- a/core/sql/optimizer/RelExeUtil.h
+++ b/core/sql/optimizer/RelExeUtil.h
@@ -1034,7 +1034,8 @@ public:
 		    NABoolean isHiveTable = FALSE,
 		    NAString * hiveTableLocation = NULL,
                     NAString * hiveHostName = NULL,
-                    Int32 hiveHdfsPort = 0)
+                    Int32 hiveHdfsPort = 0,
+                    Int64 hiveModTS = -1)
        : ExeUtilExpr(FAST_DELETE_, name, exprNode, NULL, stmtText, stmtTextCharSet, oHeap),
          doPurgedataCat_(doPurgedataCat),
          noLog_(noLog), ignoreTrigger_(ignoreTrigger),
@@ -1044,7 +1045,8 @@ public:
          offlineTable_(FALSE),
          doLabelPurgedata_(FALSE),
          numLOBs_(0),
-         isHiveTable_(isHiveTable)
+         isHiveTable_(isHiveTable),
+         hiveModTS_(hiveModTS)
   {
     if (isHiveTable )
       {
@@ -1123,6 +1125,9 @@ private:
   NAString  hiveTableLocation_;
   NAString hiveHostName_;
   Int32 hiveHdfsPort_;
+
+  // timestamp of hiveTableLocation. 
+  Int64 hiveModTS_;
 };
 
 class ExeUtilMaintainObject : public ExeUtilExpr

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/optimizer/RelFastTransport.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/RelFastTransport.cpp b/core/sql/optimizer/RelFastTransport.cpp
index 45d4168..4734d2a 100644
--- a/core/sql/optimizer/RelFastTransport.cpp
+++ b/core/sql/optimizer/RelFastTransport.cpp
@@ -98,6 +98,7 @@ RelExpr * FastExtract::copyTopNode(RelExpr *derivedNode,
   result->recordSeparator_ = recordSeparator_ ;
   result->selectList_ = selectList_;
   result->isSequenceFile_ = isSequenceFile_;
+  result->overwriteHiveTable_ = overwriteHiveTable_;
 
   return RelExpr::copyTopNode(result, outHeap);
 }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/regress/hive/EXPECTED003
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED003 b/core/sql/regress/hive/EXPECTED003
index 79cdb5a..0d0c3d0 100644
--- a/core/sql/regress/hive/EXPECTED003
+++ b/core/sql/regress/hive/EXPECTED003
@@ -2,21 +2,18 @@
 >>set schema hive.hive;
 
 --- SQL operation complete.
->>cqd attempt_esp_parallelism 'off';
-
---- SQL operation complete.
->>cqd hive_max_esps  '1';
-
---- SQL operation complete.
->>cqd PARALLEL_NUM_ESPS '1';
-
---- SQL operation complete.
+>>--cqd attempt_esp_parallelism 'off';
+>>--cqd hive_max_esps  '1';
+>>--cqd PARALLEL_NUM_ESPS '1';
 >>cqd HIVE_MAX_STRING_LENGTH '25' ;
 
 --- SQL operation complete.
 >>cqd mode_seahive 'ON';
 
 --- SQL operation complete.
+>>cqd auto_query_retry_warnings 'ON';
+
+--- SQL operation complete.
 >>
 >>prepare s from insert into hive.ins_customer select * from hive.customer;
 
@@ -135,6 +132,10 @@ P_PROMO_SK   P_PROMO_ID                 P_START_DATE_SK  P_END_DATE_SK  P_ITEM_S
 
 *** ERROR[8822] The statement was not prepared.
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[4023] The degree of each row value constructor (20) must equal the degree of the target table column list (19).
+
 >>-- number of columns doesn't match
 >>
 >>prepare s from
@@ -144,6 +145,10 @@ P_PROMO_SK   P_PROMO_ID                 P_START_DATE_SK  P_END_DATE_SK  P_ITEM_S
 
 *** ERROR[8822] The statement was not prepared.
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[4039] Column T_TIME_SK is of type INTEGER, incompatible with the value's type, CHAR(1).
+
 >>-- wrong data types
 >>
 >>
@@ -172,6 +177,10 @@ P_PROMO_SK   P_PROMO_ID                 P_START_DATE_SK  P_END_DATE_SK  P_ITEM_S
 >>
 >>insert OVERWRITE TABLE hive.ins_customer_address select * from hive.customer_address;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 --- 50000 row(s) inserted.
 >>
 >>select count(*) from hive.customer_address;
@@ -193,6 +202,10 @@ P_PROMO_SK   P_PROMO_ID                 P_START_DATE_SK  P_END_DATE_SK  P_ITEM_S
 >>--execute  again
 >>insert OVERWRITE TABLE hive.ins_customer_address select * from hive.customer_address;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 --- 50000 row(s) inserted.
 >>
 >>select count(*) from hive.customer_address;
@@ -595,7 +608,7 @@ test lp bug # 1355477
 > SS_SOLD_DATE_SK  SS_STORE_SK  SS_QUANTITY
 > ---------------  -----------  -----------
 >>--execute again --overwrite should get rid og existing data from previous run
->>control query shape union(cut,esp_exchange(cut));
+>>control query shape union(cut, esp_exchange(cut));
 
 --- SQL operation complete.
 >>prepare s from insert overwrite table ins_store_sales_summary select ss_sold_date_sk,ss_store_sk, sum (ss_quantity) from store_sales group by  ss_sold_date_sk ,ss_store_sk;
@@ -624,6 +637,10 @@ LC   RC   OP   OPERATOR              OPT       DESCRIPTION           CARD
 --- SQL operation complete.
 >>execute s;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 --- 12768 row(s) inserted.
 >>control query shape cut;
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/regress/hive/EXPECTED005
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED005 b/core/sql/regress/hive/EXPECTED005
index 8e26184..eaa414e 100644
--- a/core/sql/regress/hive/EXPECTED005
+++ b/core/sql/regress/hive/EXPECTED005
@@ -161,11 +161,12 @@ Y                                          9525
 >>insert into newtable values ('abc');
 
 --- 1 row(s) inserted.
->>cqd query_cache '0';
-
---- SQL operation complete.
 >>select * from newtable;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry. 
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 A                        
 -------------------------
 
@@ -173,9 +174,6 @@ abc
 
 --- 1 row(s) selected.
 >>-- expect to see the row, but only because query cache is off
->>cqd query_cache reset;
-
---- SQL operation complete.
 >>
 >>insert into hiveregr5.newtable2 values ('xyz');
 
@@ -289,9 +287,6 @@ A            B
 >>-- overwrite the table with auto-generated partitions
 >>sh regrhive.ksh -v -f $REGRTSTDIR/TEST005_d.hive.sql;
 >>
->>cqd query_cache '0';
-
---- SQL operation complete.
 >>prepare s4 from 
 +>  select c_preferred_cust_flag,
 +>         count(*) 
@@ -652,14 +647,18 @@ C1           C2                    C3                         C4               C
 >>sh echo "create table thive(a int);" > TEST005_junk;
 >>sh regrhive.ksh -f TEST005_junk;
 >>
->>select * from hive.hive.thive;
+>>select a from hive.hive.thive;
 
 --- 0 row(s) selected.
 >>
 >>sh echo "insert into thive values (1);" > TEST005_junk;
 >>sh regrhive.ksh -f TEST005_junk;
 >>
->>select * from hive.hive.thive;
+>>select a from hive.hive.thive;
+
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry. 
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
 
 A          
 -----------
@@ -670,7 +669,7 @@ A
 >>insert into hive.hive.thive values (2);
 
 --- 1 row(s) inserted.
->>select * from hive.hive.thive;
+>>select a from hive.hive.thive;
 
 A          
 -----------
@@ -689,6 +688,19 @@ A
 >>sh echo "insert into thive values (1,2);" > TEST005_junk;
 >>sh regrhive.ksh -f TEST005_junk;
 >>
+>>select a from hive.hive.thive;
+
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry. 
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
+A          
+-----------
+
+          1
+
+--- 1 row(s) selected.
+>>
 >>select * from hive.hive.thive;
 
 A            B          

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/regress/hive/EXPECTED015
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED015 b/core/sql/regress/hive/EXPECTED015
index f1d6067..162a3bc 100644
--- a/core/sql/regress/hive/EXPECTED015
+++ b/core/sql/regress/hive/EXPECTED015
@@ -115,10 +115,11 @@
 >>cqd COMPRESSED_INTERNAL_FORMAT_DEFRAG_RATIO '100';
 
 --- SQL operation complete.
->>cqd query_cache '0';
+>>cqd HIVE_NUM_ESPS_PER_DATANODE '3';
 
 --- SQL operation complete.
->>cqd HIVE_NUM_ESPS_PER_DATANODE '3';
+>>
+>>cqd auto_query_retry_warnings 'ON';
 
 --- SQL operation complete.
 >>
@@ -209,9 +210,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.T015T2
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.T015T2
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.T015T2
        Rows Processed: 5 
-Task:  PREPARATION     Status: Ended      ET: 00:00:00.184
+Task:  PREPARATION     Status: Ended      ET: 00:00:00.051
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.T015T2
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.359
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.781
 
 --- 5 row(s) loaded.
 >>
@@ -233,9 +234,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.T015T2
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.T015T2
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.T015T2
        Rows Processed: 5 
-Task:  PREPARATION     Status: Ended      ET: 00:00:00.176
+Task:  PREPARATION     Status: Ended      ET: 00:00:00.162
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.T015T2
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.203
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.183
 
 --- 5 row(s) loaded.
 >>
@@ -258,9 +259,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.T015T2
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.T015T2
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.T015T2
        Rows Processed: 5 
-Task:  PREPARATION     Status: Ended      ET: 00:00:00.195
+Task:  PREPARATION     Status: Ended      ET: 00:00:00.064
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.T015T2
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.318
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.327
 
 --- 5 row(s) loaded.
 >>
@@ -288,9 +289,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.T015T2
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.T015T2
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.T015T2
        Rows Processed: 5 
-Task:  PREPARATION     Status: Ended      ET: 00:00:00.179
+Task:  PREPARATION     Status: Ended      ET: 00:00:00.164
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.T015T2
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.380
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.268
 
 --- 5 row(s) loaded.
 >>
@@ -461,9 +462,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRE
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_ADDRESS
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRESS
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:08.673
+Task:  PREPARATION     Status: Ended      ET: 00:00:08.885
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRESS
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.402
+Task:  COMPLETION      Status: Ended      ET: 00:00:01.379
 
 --- 5000 row(s) loaded.
 >>
@@ -543,9 +544,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRE
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_ADDRESS_NOPK
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRESS_NOPK
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:01.062
+Task:  PREPARATION     Status: Ended      ET: 00:00:01.054
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_ADDRESS_NOPK
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.294
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.820
 
 --- 5000 row(s) loaded.
 >>
@@ -644,9 +645,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:04.130
+Task:  PREPARATION     Status: Ended      ET: 00:00:03.594
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
-Task:  COMPLETION      Status: Ended      ET: 00:00:00.631
+Task:  COMPLETION      Status: Ended      ET: 00:00:00.323
 
 --- 5000 row(s) loaded.
 >>
@@ -691,8 +692,6 @@ CD_DEMO_SK   CD_GENDER  CD_MARITAL_STATUS  CD_EDUCATION_STATUS   CD_PURCHASE_EST
 >>---------------------
 >>select count(*) from hive.hive.customer_demographics where cd_demo_sk <= 5000;
 
-*** WARNING[6008] Statistics for column (CD_DEMO_SK) from table HIVE.HIVE.CUSTOMER_DEMOGRAPHICS were not available. As a result, the access path chosen might not be the best possible.
-
 (EXPR)              
 --------------------
 
@@ -750,9 +749,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:06.200
+Task:  PREPARATION     Status: Ended      ET: 00:00:05.596
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
-Task:  COMPLETION      Status: Ended      ET: 00:00:01.735
+Task:  COMPLETION      Status: Ended      ET: 00:00:01.203
 
 --- 5000 row(s) loaded.
 >>
@@ -1262,9 +1261,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:06.517
+Task:  PREPARATION     Status: Ended      ET: 00:00:04.891
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
-Task:  COMPLETION      Status: Ended      ET: 00:00:02.242
+Task:  COMPLETION      Status: Ended      ET: 00:00:02.090
 
 --- 5000 row(s) loaded.
 >>
@@ -1305,11 +1304,11 @@ Task:  DISABLE INDEXE  Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  DISABLE INDEXE  Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
        Rows Processed: 1000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:04.908
+Task:  PREPARATION     Status: Ended      ET: 00:00:04.588
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
-Task:  COMPLETION      Status: Ended      ET: 00:00:01.357
+Task:  COMPLETION      Status: Ended      ET: 00:00:01.075
 Task:  POPULATE INDEX  Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS
-Task:  POPULATE INDEX  Status: Ended      ET: 00:00:12.289
+Task:  POPULATE INDEX  Status: Ended      ET: 00:00:11.865
 
 --- 1000 row(s) loaded.
 >>
@@ -1354,11 +1353,11 @@ Task:  DISABLE INDEXE  Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOG
 Task:  DISABLE INDEXE  Status: Ended      Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:05.592
+Task:  PREPARATION     Status: Ended      ET: 00:00:05.812
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
-Task:  COMPLETION      Status: Ended      ET: 00:00:02.018
+Task:  COMPLETION      Status: Ended      ET: 00:00:02.106
 Task:  POPULATE INDEX  Status: Started    Object: TRAFODION.HBASE.CUSTOMER_DEMOGRAPHICS_SALT
-Task:  POPULATE INDEX  Status: Ended      ET: 00:00:14.563
+Task:  POPULATE INDEX  Status: Ended      ET: 00:00:12.813
 
 --- 5000 row(s) loaded.
 >>
@@ -1532,9 +1531,9 @@ Task:  CLEANUP         Status: Started    Object: TRAFODION.HBASE."customer_addr
 Task:  CLEANUP         Status: Ended      Object: TRAFODION.HBASE."customer_address_delim"
 Task:  PREPARATION     Status: Started    Object: TRAFODION.HBASE."customer_address_delim"
        Rows Processed: 5000 
-Task:  PREPARATION     Status: Ended      ET: 00:00:04.154
+Task:  PREPARATION     Status: Ended      ET: 00:00:03.585
 Task:  COMPLETION      Status: Started    Object: TRAFODION.HBASE."customer_address_delim"
-Task:  COMPLETION      Status: Ended      ET: 00:00:01.729
+Task:  COMPLETION      Status: Ended      ET: 00:00:01.575
 
 --- 5000 row(s) loaded.
 >>

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/regress/hive/TEST003
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST003 b/core/sql/regress/hive/TEST003
index 7e3641b..3761906 100644
--- a/core/sql/regress/hive/TEST003
+++ b/core/sql/regress/hive/TEST003
@@ -66,11 +66,12 @@ order by s desc;
 log LOG003 clear;
 
 set schema hive.hive;
-cqd attempt_esp_parallelism 'off';
-cqd hive_max_esps  '1';
-cqd PARALLEL_NUM_ESPS '1';
+--cqd attempt_esp_parallelism 'off';
+--cqd hive_max_esps  '1';
+--cqd PARALLEL_NUM_ESPS '1';
 cqd HIVE_MAX_STRING_LENGTH '25' ;
 cqd mode_seahive 'ON';
+cqd auto_query_retry_warnings 'ON';
 
 prepare s from insert into hive.ins_customer select * from hive.customer;
 execute s;
@@ -187,7 +188,7 @@ sh diff -b LOG003_ORIG_STORE_SALES_SUMMARY.DAT LOG003_INS_STORE_SALES_SUMMARY.DA
 
 log LOG003;
 --execute again --overwrite should get rid og existing data from previous run
-control query shape union(cut,esp_exchange(cut));
+control query shape union(cut, esp_exchange(cut));
 prepare s from insert overwrite table ins_store_sales_summary select ss_sold_date_sk,ss_store_sk, sum (ss_quantity) from store_sales group by  ss_sold_date_sk ,ss_store_sk; 
 explain options 'f' s;
 execute s;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/regress/hive/TEST005
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST005 b/core/sql/regress/hive/TEST005
index e44b633..cd3fa56 100644
--- a/core/sql/regress/hive/TEST005
+++ b/core/sql/regress/hive/TEST005
@@ -144,10 +144,8 @@ where c_customer_sk between 20000 and 39999;
 select * from newtable;
 -- no rows, but should know the new table
 insert into newtable values ('abc');
-cqd query_cache '0';
 select * from newtable;
 -- expect to see the row, but only because query cache is off
-cqd query_cache reset;
 
 insert into hiveregr5.newtable2 values ('xyz');
 select * from hiveregr5.newtable2;
@@ -190,7 +188,6 @@ select a,b from newtable;
 -- overwrite the table with auto-generated partitions
 sh regrhive.ksh -v -f $REGRTSTDIR/TEST005_d.hive.sql;
 
-cqd query_cache '0';
 prepare s4 from 
   select c_preferred_cust_flag,
          count(*) 
@@ -292,14 +289,14 @@ sh regrhive.ksh -f TEST005_junk;
 sh echo "create table thive(a int);" > TEST005_junk;
 sh regrhive.ksh -f TEST005_junk;
 
-select * from hive.hive.thive;
+select a from hive.hive.thive;
 
 sh echo "insert into thive values (1);" > TEST005_junk;
 sh regrhive.ksh -f TEST005_junk;
 
-select * from hive.hive.thive;
+select a from hive.hive.thive;
 insert into hive.hive.thive values (2);
-select * from hive.hive.thive;
+select a from hive.hive.thive;
 
 sh echo "drop table thive;" > TEST005_junk;
 sh regrhive.ksh -f TEST005_junk;
@@ -310,6 +307,8 @@ sh regrhive.ksh -f TEST005_junk;
 sh echo "insert into thive values (1,2);" > TEST005_junk;
 sh regrhive.ksh -f TEST005_junk;
 
+select a from hive.hive.thive;
+
 select * from hive.hive.thive;
 
 log;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/51a9c73e/core/sql/regress/hive/TEST015
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST015 b/core/sql/regress/hive/TEST015
index ed0dd12..dfac48f 100644
--- a/core/sql/regress/hive/TEST015
+++ b/core/sql/regress/hive/TEST015
@@ -39,9 +39,10 @@ obey TEST015(setup);
 cqd COMPRESSED_INTERNAL_FORMAT 'ON';
 cqd COMPRESSED_INTERNAL_FORMAT_BMO 'ON';
 cqd COMPRESSED_INTERNAL_FORMAT_DEFRAG_RATIO '100';
-cqd query_cache '0';
 cqd HIVE_NUM_ESPS_PER_DATANODE '3';
 
+cqd auto_query_retry_warnings 'ON';
+
 obey TEST015(test_bulk_load_simple);
 
 log;


[4/8] incubator-trafodion git commit: hive data modification detection: commit #2

Posted by an...@apache.org.
hive data modification detection: commit #2


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/1820da1c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/1820da1c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/1820da1c

Branch: refs/heads/master
Commit: 1820da1cf3776a318951c241814e92a4980ccd34
Parents: 772b4a3
Author: Anoop Sharma <an...@esgyn.com>
Authored: Sat May 28 01:10:23 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Sat May 28 01:10:23 2016 +0000

----------------------------------------------------------------------
 core/sql/cli/SessionDefaults.cpp   |   3 +-
 core/sql/comexe/ComTdbHdfsScan.cpp |  36 ++++++---
 core/sql/comexe/ComTdbHdfsScan.h   |  15 ++--
 core/sql/executor/ExHdfsScan.cpp   |  11 ++-
 core/sql/exp/ExpLOBaccess.cpp      | 134 +++++++++++++++++++-------------
 core/sql/exp/ExpLOBaccess.h        |   7 +-
 core/sql/exp/ExpLOBinterface.cpp   |   6 +-
 core/sql/exp/ExpLOBinterface.h     |   2 +-
 core/sql/generator/GenRelScan.cpp  |  24 ++++--
 core/sql/optimizer/HDFSHook.cpp    |  49 ++++++++----
 core/sql/optimizer/HDFSHook.h      |   3 +
 core/sql/optimizer/NATable.cpp     |  10 ++-
 core/sql/regress/hive/EXPECTED005  | 133 +++++++++++++++++++++++--------
 core/sql/regress/hive/TEST005      |  33 +++++++-
 14 files changed, 328 insertions(+), 138 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/cli/SessionDefaults.cpp
----------------------------------------------------------------------
diff --git a/core/sql/cli/SessionDefaults.cpp b/core/sql/cli/SessionDefaults.cpp
index 593804c..024754b 100644
--- a/core/sql/cli/SessionDefaults.cpp
+++ b/core/sql/cli/SessionDefaults.cpp
@@ -750,6 +750,7 @@ static const QueryString cqdInfo[] =
 , {"transform_to_sidetree_insert"}, {"OFF"}
 , {"METADATA_CACHE_SIZE"}, {"0"}
 , {"QUERY_CACHE"}, {"0"}
+, {"TRAF_RELOAD_NATABLE_CACHE"}, {"ON"}
 };
 
 static const AQRInfo::AQRErrorMap aqrErrorMap[] = 
@@ -781,7 +782,7 @@ static const AQRInfo::AQRErrorMap aqrErrorMap[] =
   // parallel purgedata failed
   AQREntry(   8022,      0,      3,    60,      0,   0, "",    0,     1),
 
-  AQREntry(   8436,      0,      1,     0,      0,   1, "04",  0,     0),
+  AQREntry(   8436,      0,      1,     0,      0,   2, "04:05",  0,     0),
 
   // FS memory errors
   AQREntry(   8550,     30,      1,    60,      0,   0, "",    0,     0),

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/comexe/ComTdbHdfsScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbHdfsScan.cpp b/core/sql/comexe/ComTdbHdfsScan.cpp
index a6aac21..91d7468 100755
--- a/core/sql/comexe/ComTdbHdfsScan.cpp
+++ b/core/sql/comexe/ComTdbHdfsScan.cpp
@@ -69,9 +69,10 @@ ComTdbHdfsScan::ComTdbHdfsScan(
                                char * loggingLocation,
                                char * errCountId,
 
-                               char * hdfsFilesDir,
+                               char * hdfsRootDir,
                                Int64  modTSforDir,
-                               Lng32  numFilesInDir
+                               Lng32  numOfPartCols,
+                               Queue * hdfsDirsToCheck
 
                                )
 : ComTdb( ComTdb::ex_HDFS_SCAN,
@@ -113,9 +114,10 @@ ComTdbHdfsScan::ComTdbHdfsScan(
   errCountTable_(errCountTable),
   loggingLocation_(loggingLocation),
   errCountRowId_(errCountId),
-  hdfsFilesDir_(hdfsFilesDir),
+  hdfsRootDir_(hdfsRootDir),
   modTSforDir_(modTSforDir),
-  numFilesInDir_(numFilesInDir)
+  numOfPartCols_(numOfPartCols),
+  hdfsDirsToCheck_(hdfsDirsToCheck)
 {};
 
 ComTdbHdfsScan::~ComTdbHdfsScan()
@@ -151,7 +153,8 @@ Long ComTdbHdfsScan::pack(void * space)
   loggingLocation_.pack(space);
   errCountRowId_.pack(space);
 
-  hdfsFilesDir_.pack(space);
+  hdfsRootDir_.pack(space);
+  hdfsDirsToCheck_.pack(space);
 
   return ComTdb::pack(space);
 }
@@ -185,7 +188,8 @@ Lng32 ComTdbHdfsScan::unpack(void * base, void * reallocator)
   if (loggingLocation_.unpack(base)) return -1;
   if (errCountRowId_.unpack(base)) return -1;
 
-  if (hdfsFilesDir_.unpack(base)) return -1;
+  if (hdfsRootDir_.unpack(base)) return -1;
+  if (hdfsDirsToCheck_.unpack(base, reallocator)) return -1;
 
   return ComTdb::unpack(base, reallocator);
 }
@@ -434,14 +438,26 @@ void ComTdbHdfsScan::displayContents(Space * space,ULng32 flag)
             }
         }
 
-      if (hdfsFilesDir_)
+      if (hdfsRootDir_)
         {
-          str_sprintf(buf, "hdfsDir: %s", hdfsFilesDir_);
+          str_sprintf(buf, "hdfsRootDir: %s", hdfsRootDir_);
           space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short));
 
-          str_sprintf(buf, "modTSforDir_ = %Ld, numFilesInDir_ = %d",
-                      modTSforDir_, numFilesInDir_);
+          str_sprintf(buf, "modTSforDir_ = %Ld, numOfPartCols_ = %d",
+                      modTSforDir_, numOfPartCols_);
           space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short));
+
+          if (hdfsDirsToCheck())
+            {
+              hdfsDirsToCheck()->position();
+              char * dir = NULL;
+              while ((dir = (char*)hdfsDirsToCheck()->getNext()) != NULL)
+                {
+                  str_sprintf(buf, "Dir Name: %s", dir);
+                  space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short));
+                }
+            }
+
         }
 
     }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/comexe/ComTdbHdfsScan.h
----------------------------------------------------------------------
diff --git a/core/sql/comexe/ComTdbHdfsScan.h b/core/sql/comexe/ComTdbHdfsScan.h
index 0b17947..c60b192 100755
--- a/core/sql/comexe/ComTdbHdfsScan.h
+++ b/core/sql/comexe/ComTdbHdfsScan.h
@@ -136,11 +136,12 @@ class ComTdbHdfsScan : public ComTdb
 
   // next 3 params used to check if data under hdfsFileDir
   // was modified after query was compiled.
-  NABasicPtr hdfsFilesDir_;                                    // 192 - 199
+  NABasicPtr hdfsRootDir_;                                     // 192 - 199
   Int64  modTSforDir_;                                         // 200 - 207
-  Lng32  numFilesInDir_;                                       // 208 - 211
+  Lng32  numOfPartCols_;                                       // 208 - 211
+  QueuePtr hdfsDirsToCheck_;                                   // 212 - 219
 
-  char fillersComTdbHdfsScan2_[12];                           // 212 - 223
+  char fillersComTdbHdfsScan2_[4];                             // 220 - 223
     
 public:
   enum HDFSFileType
@@ -195,9 +196,10 @@ public:
 
                  // next 3 params used to check if data under hdfsFileDir
                  // was modified after query was compiled.
-                 char * hdfsFilesDir  = NULL,
+                 char * hdfsRootDir  = NULL,
                  Int64  modTSforDir   = -1,
-                 Lng32  numFilesInDir = -1
+                 Lng32  numOfPartCols = -1,
+                 Queue * hdfsDirsToCheck = NULL
                  );
 
   ~ComTdbHdfsScan();
@@ -329,7 +331,8 @@ public:
   {
     return workCriDesc_->getTupleDescriptor(moveExprColsTuppIndex_);
   }
-  
+
+  Queue * hdfsDirsToCheck() { return hdfsDirsToCheck_; }
 };
 
 inline ComTdb * ComTdbHdfsScan::getChildTdb()

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/executor/ExHdfsScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/executor/ExHdfsScan.cpp b/core/sql/executor/ExHdfsScan.cpp
index dbb5e7c..9010c83 100644
--- a/core/sql/executor/ExHdfsScan.cpp
+++ b/core/sql/executor/ExHdfsScan.cpp
@@ -423,14 +423,19 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
         case CHECK_FOR_DATA_MOD:
         case CHECK_FOR_DATA_MOD_AND_DONE:
           {
-            char * dirPath = hdfsScanTdb().hdfsFilesDir_;
+            char * dirPath = hdfsScanTdb().hdfsRootDir_;
             if (! dirPath)
               dataModCheckDone_ = TRUE;
 
             if (NOT dataModCheckDone_)
               {
                 Int64 modTS = hdfsScanTdb().modTSforDir_;
-                Lng32 numFilesInDir = hdfsScanTdb().numFilesInDir_;
+                Lng32 numOfPartLevels = hdfsScanTdb().numOfPartCols_;
+
+                if (hdfsScanTdb().hdfsDirsToCheck())
+                  {
+                    // TBD
+                  }
 
                 retcode = ExpLOBinterfaceDataModCheck
                   (lobGlob_,
@@ -438,7 +443,7 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
                    hdfsScanTdb().hostName_,
                    hdfsScanTdb().port_,
                    modTS,
-                   numFilesInDir);
+                   numOfPartLevels);
                 
                 if (retcode < 0)
                   {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/exp/ExpLOBaccess.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index 5c1d2fa..3ac537b 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -110,13 +110,16 @@ ExLob::~ExLob()
    
 }
 
+__thread hdfsFS *globalFS = NULL;
+ 
 Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode, 
                                char *dir, 
 			       LobsStorage storage,
                                char *hdfsServer, Int64 hdfsPort,
                                char *lobLocation,
                                int bufferSize , short replication ,
-                               int blockSize, Int64 lobMaxSize, ExLobGlobals *lobGlobals)
+                               int blockSize, Int64 lobMaxSize, 
+                               ExLobGlobals *lobGlobals)
 {
   int openFlags;
   mode_t filePerms = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
@@ -141,34 +144,41 @@ Ex_Lob_Error ExLob::initialize(char *lobFile, Ex_Lob_Mode mode,
       
     }
 
-  if (storage_ != Lob_Invalid_Storage) 
+  hdfsServer_ = hdfsServer;
+  hdfsPort_ = hdfsPort;
+
+  if (globalFS == NULL)
+    {
+      globalFS = new hdfsFS;
+      *globalFS = NULL;
+    }
+  
+  if (*globalFS == NULL)
+    {
+      *globalFS = hdfsConnect(hdfsServer_, hdfsPort_);
+      if (*globalFS == NULL)
+        return LOB_HDFS_CONNECT_ERROR;
+    }
+
+  fs_ = *globalFS;
+  if (lobGlobals)
+    lobGlobals->setHdfsFs(fs_);
+  
+  if (storage_ != Lob_Invalid_Storage)
     {
       return LOB_INIT_ERROR;
-    } else 
+    } 
+  else 
     {
       storage_ = storage;
     }
 
   stats_.init(); 
 
-  hdfsServer_ = hdfsServer;
-  hdfsPort_ = hdfsPort;
   if (lobLocation)
     lobLocation_ = lobLocation;
   clock_gettime(CLOCK_MONOTONIC, &startTime);
 
-  if (lobGlobals->getHdfsFs() == NULL)
-    {
-      fs_ = hdfsConnect(hdfsServer_, hdfsPort_);
-      if (fs_ == NULL) 
-	return LOB_HDFS_CONNECT_ERROR;
-      lobGlobals->setHdfsFs(fs_);
-    } 
-  else 
-    {
-      fs_ = lobGlobals->getHdfsFs();
-    }
-
   clock_gettime(CLOCK_MONOTONIC, &endTime);
 
   secs = endTime.tv_sec - startTime.tv_sec;
@@ -379,28 +389,17 @@ Ex_Lob_Error ExLob::writeDataSimple(char *data, Int64 size, LobsSubOper subOpera
     return LOB_OPER_OK;
 }
 
-Ex_Lob_Error ExLob::dataModCheck(
+Ex_Lob_Error ExLob::dataModCheck2(
        char * dirPath, 
        Int64  inputModTS,
-       Lng32  inputNumFilesInDir,
-       Lng32  &numFilesInDir)
+       Lng32  numOfPartLevels)
 {
-  // find mod time of dir
-  hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, dirPath);
-  if (fileInfos == NULL)
-    {
-      return LOB_DATA_FILE_NOT_FOUND_ERROR;
-    }
-
-  Int64 currModTS = fileInfos[0].mLastMod;
-  hdfsFreeFileInfo(fileInfos, 1);
-  if ((inputModTS > 0) &&
-      (currModTS > inputModTS))
-    return LOB_DATA_MOD_CHECK_ERROR;
+  if (numOfPartLevels == 0)
+    return LOB_OPER_OK;
 
-  // find number of files in dirPath.
   Lng32 currNumFilesInDir = 0;
-  fileInfos = hdfsListDirectory(fs_, dirPath, &currNumFilesInDir);
+  hdfsFileInfo * fileInfos = 
+    hdfsListDirectory(fs_, dirPath, &currNumFilesInDir);
   if ((currNumFilesInDir > 0) && (fileInfos == NULL))
     {
       return LOB_DATA_FILE_NOT_FOUND_ERROR;
@@ -412,17 +411,9 @@ Ex_Lob_Error ExLob::dataModCheck(
       hdfsFileInfo &fileInfo = fileInfos[i];
       if (fileInfo.mKind == kObjectKindDirectory)
         {
-          if (dataModCheck(fileInfo.mName, inputModTS, 
-                           inputNumFilesInDir, numFilesInDir) ==
-              LOB_DATA_MOD_CHECK_ERROR)
-            {
-              failed = TRUE;
-            }
-        }
-      else if (fileInfo.mKind == kObjectKindFile)
-        {
-          numFilesInDir++;
-          if (numFilesInDir > inputNumFilesInDir)
+          Int64 currModTS = fileInfo.mLastMod;
+          if ((inputModTS > 0) &&
+              (currModTS > inputModTS))
             failed = TRUE;
         }
     }
@@ -431,6 +422,47 @@ Ex_Lob_Error ExLob::dataModCheck(
   if (failed)
     return LOB_DATA_MOD_CHECK_ERROR;
 
+  numOfPartLevels--;
+  Ex_Lob_Error err = LOB_OPER_OK;
+  if (numOfPartLevels > 0)
+    {
+      for (Lng32 i = 0; ((NOT failed) && (i < currNumFilesInDir)); i++)
+        {
+          hdfsFileInfo &fileInfo = fileInfos[i];
+          err = dataModCheck2(fileInfo.mName, inputModTS, numOfPartLevels);
+          if (err != LOB_OPER_OK)
+            return err;
+        }
+    }
+
+  return LOB_OPER_OK;
+}
+
+// numOfPartLevels: 0, if not partitioned
+//                  N, number of partitioning cols
+Ex_Lob_Error ExLob::dataModCheck(
+       char * dirPath, 
+       Int64  inputModTS,
+       Lng32  numOfPartLevels)
+{
+  // find mod time of root dir
+  hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, dirPath);
+  if (fileInfos == NULL)
+    {
+      return LOB_DATA_FILE_NOT_FOUND_ERROR;
+    }
+
+  Int64 currModTS = fileInfos[0].mLastMod;
+  hdfsFreeFileInfo(fileInfos, 1);
+  if ((inputModTS > 0) &&
+      (currModTS > inputModTS))
+    return LOB_DATA_MOD_CHECK_ERROR;
+
+  if (numOfPartLevels > 0)
+    {
+      return dataModCheck2(dirPath, inputModTS, numOfPartLevels);
+    }
+
   return LOB_OPER_OK;
 }
 
@@ -2387,17 +2419,13 @@ Ex_Lob_Error ExLobsOper (
       {
         lobPtr->initialize(NULL, EX_LOB_RW,
                            NULL, storage, hdfsServer, hdfsPort, NULL, 
-                           bufferSize, replication, blockSize);
+                           bufferSize, replication, blockSize, lobMaxSize, 
+                           lobGlobals);
 
         Int64 inputModTS = *(Int64*)blackBox;
-        Int32 inputNumFilesInDir = 
+        Int32 inputNumOfPartLevels = 
           *(Lng32*)&((char*)blackBox)[sizeof(inputModTS)];
-        Int32 numFilesInDir = 0;
-        err = lobPtr->dataModCheck(dir, inputModTS, 
-                                   inputNumFilesInDir, numFilesInDir);
-        if ((err == LOB_OPER_OK) &&
-            (numFilesInDir != inputNumFilesInDir))
-          err = LOB_DATA_MOD_CHECK_ERROR;
+        err = lobPtr->dataModCheck(dir, inputModTS, inputNumOfPartLevels);
       }
       break;
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/exp/ExpLOBaccess.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBaccess.h b/core/sql/exp/ExpLOBaccess.h
index 138116c..518fbd7 100644
--- a/core/sql/exp/ExpLOBaccess.h
+++ b/core/sql/exp/ExpLOBaccess.h
@@ -489,8 +489,11 @@ class ExLob
   Ex_Lob_Error dataModCheck(
        char * dirPath, 
        Int64  modTS,
-       Lng32  inputNumFilesInDir,
-       Lng32  &numFilesInDir);
+       Lng32  numOfPartLevels);
+  Ex_Lob_Error dataModCheck2(
+       char * dirPath, 
+       Int64  modTS,
+       Lng32  numOfPartLevels);
 
   Ex_Lob_Error emptyDirectory();
   ExLobStats *getStats() { return &stats_; }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/exp/ExpLOBinterface.cpp
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBinterface.cpp b/core/sql/exp/ExpLOBinterface.cpp
index a984635..6fe6fa9 100644
--- a/core/sql/exp/ExpLOBinterface.cpp
+++ b/core/sql/exp/ExpLOBinterface.cpp
@@ -236,7 +236,7 @@ Lng32 ExpLOBinterfaceDataModCheck(void * lobGlob,
                                   char * lobHdfsServer,
                                   Lng32  lobHdfsPort,
                                   Int64  modTS,
-                                  Lng32  numFilesInDir)
+                                  Lng32  numOfPartLevels)
 {
   Ex_Lob_Error err;
 
@@ -247,8 +247,8 @@ Lng32 ExpLOBinterfaceDataModCheck(void * lobGlob,
 
   char dirInfoBuf[100];
   *(Int64*)dirInfoBuf = modTS;
-  *(Lng32*)&dirInfoBuf[sizeof(modTS)] = numFilesInDir;
-  Lng32 dirInfoBufLen = sizeof(modTS) + sizeof(numFilesInDir);
+  *(Lng32*)&dirInfoBuf[sizeof(modTS)] = numOfPartLevels;
+  Lng32 dirInfoBufLen = sizeof(modTS) + sizeof(numOfPartLevels);
   err = ExLobsOper((char*)"",
                    NULL, 0,
                    lobHdfsServer, lobHdfsPort,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/exp/ExpLOBinterface.h
----------------------------------------------------------------------
diff --git a/core/sql/exp/ExpLOBinterface.h b/core/sql/exp/ExpLOBinterface.h
index a9b7597..689e422 100644
--- a/core/sql/exp/ExpLOBinterface.h
+++ b/core/sql/exp/ExpLOBinterface.h
@@ -303,7 +303,7 @@ Lng32 ExpLOBinterfaceDataModCheck(void * lobGlob,
                                   char * lobHdfsServer,
                                   Lng32  lobHdfsPort,
                                   Int64  modTS,
-                                  Lng32  numFilesInDir);
+                                  Lng32  numOfPartLevels);
 
 Lng32 ExpLOBinterfaceEmptyDirectory(void * lobGlob,
                             char * lobName,

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/generator/GenRelScan.cpp
----------------------------------------------------------------------
diff --git a/core/sql/generator/GenRelScan.cpp b/core/sql/generator/GenRelScan.cpp
index a781815..5474d94 100644
--- a/core/sql/generator/GenRelScan.cpp
+++ b/core/sql/generator/GenRelScan.cpp
@@ -1147,19 +1147,29 @@ if (hTabStats->isOrcFile())
     space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName()), 0);
 
   // info needed to validate hdfs file structs
-  //  const HHDFSTableStats* hTabStats = 
-  //    getIndexDesc()->getNAFileSet()->getHHDFSTableStats();
-  char * hdfsDir = NULL;
+  char * hdfsRootDir = NULL;
   Int64 modTS = -1;
-  Lng32 numFilesInDir = -1;
+  Lng32 numOfPartLevels = -1;
+  Queue * hdfsDirsToCheck = NULL;
   if (CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON)
     {
-      hdfsDir =
+      hdfsRootDir =
         space->allocateAndCopyToAlignedSpace(hTabStats->tableDir().data(),
                                              hTabStats->tableDir().length(),
                                              0);
       modTS = hTabStats->getModificationTS();
-      numFilesInDir =  hTabStats->getNumFiles();
+      numOfPartLevels = hTabStats->numOfPartCols();
+
+      // if specific directories are to checked based on the query struct
+      // (for example, when certain partitions are explicitly specified), 
+      // add them to hdfsDirsToCheck.
+      // At runtime, only these dirs will be checked for data modification.
+      // ** TBD **
+
+      // Right now, timestamp info is not being generated correctly for
+      // partitioned files. Skip data mod check for them.
+      if (numOfPartLevels > 0)
+        hdfsRootDir = NULL;
     }
 
   // create hdfsscan_tdb
@@ -1202,7 +1212,7 @@ if (hTabStats->isOrcFile())
 		   logLocation,
 		   errCountRowId,
 
-                   hdfsDir, modTS, numFilesInDir
+                   hdfsRootDir, modTS, numOfPartLevels, hdfsDirsToCheck
 		   );
 
   generator->initTdbFields(hdfsscan_tdb);

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/optimizer/HDFSHook.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.cpp b/core/sql/optimizer/HDFSHook.cpp
index 90df234..a165b51 100644
--- a/core/sql/optimizer/HDFSHook.cpp
+++ b/core/sql/optimizer/HDFSHook.cpp
@@ -1069,41 +1069,58 @@ void HHDFSTableStats::print(FILE *ofd)
   fprintf(ofd,"====================================================================\n");
 }
 
+extern __thread hdfsFS *globalFS;
+
 NABoolean HHDFSTableStats::connectHDFS(const NAString &host, Int32 port)
 {
   NABoolean result = TRUE;
 
   // establish connection to HDFS if needed
-  if (fs_ == NULL ||
+  if (globalFS == NULL ||
+      *globalFS == NULL ||
       currHdfsHost_ != host ||
       currHdfsPort_ != port)
     {
-      if (fs_)
+      if (globalFS && *globalFS)
+        disconnectHDFS();
+
+      if (globalFS == NULL)
         {
-          hdfsDisconnect(fs_);
-          fs_ = NULL;
+          globalFS = new hdfsFS;
+          *globalFS = NULL;
         }
-      fs_ = hdfsConnect(host, port);
-      
-      if (fs_ == NULL)
+
+      if (*globalFS == NULL)
         {
-          NAString errMsg("hdfsConnect to ");
-          errMsg += host;
-          errMsg += ":";
-          errMsg += port;
-          errMsg += " failed";
-          diags_.recordError(errMsg, "HHDFSTableStats::connectHDFS");
-          result = FALSE;
+          *globalFS = hdfsConnect(host, port);
         }
+
       currHdfsHost_ = host;
       currHdfsPort_ = port;
     }
+
+  fs_ = *globalFS;
+  if (fs_ == NULL)
+    {
+      NAString errMsg("hdfsConnect to ");
+      errMsg += host;
+      errMsg += ":";
+      errMsg += port;
+      errMsg += " failed";
+      diags_.recordError(errMsg, "HHDFSTableStats::connectHDFS");
+      result = FALSE;
+    }
+  
   return result;
 }
 
 void HHDFSTableStats::disconnectHDFS()
 {
-  if (fs_)
-    hdfsDisconnect(fs_);
+  if (globalFS && *globalFS)
+    {
+      hdfsDisconnect(*globalFS);
+      *globalFS = NULL;
+    }
+
   fs_ = NULL;
 }

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/optimizer/HDFSHook.h
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/HDFSHook.h b/core/sql/optimizer/HDFSHook.h
index 1ab474c..4f80904 100644
--- a/core/sql/optimizer/HDFSHook.h
+++ b/core/sql/optimizer/HDFSHook.h
@@ -330,6 +330,9 @@ public:
 
   const NAString &tableDir() const { return tableDir_; }
 
+  const Lng32 numOfPartCols() const { return numOfPartCols_; }
+  const Lng32 totalNumPartitions() const { return totalNumPartitions_; }
+
 private:
   enum FileType
   {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/optimizer/NATable.cpp
----------------------------------------------------------------------
diff --git a/core/sql/optimizer/NATable.cpp b/core/sql/optimizer/NATable.cpp
index 9f46209..81eb5b0 100644
--- a/core/sql/optimizer/NATable.cpp
+++ b/core/sql/optimizer/NATable.cpp
@@ -7308,6 +7308,13 @@ NATable * NATableDB::get(const ExtendedQualName* key, BindWA* bindWA, NABoolean
      }
   }
 
+  if ((cachedNATable->isHiveTable()) &&
+      (CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON) &&
+      (CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))
+    {
+      removeEntry = TRUE;
+    }
+
   //Found in cache.  If that's all the caller wanted, return now.
   if ( !removeEntry && findInCacheOnly )
      return cachedNATable;
@@ -7896,7 +7903,8 @@ NATable * NATableDB::get(CorrName& corrName, BindWA * bindWA,
       table = NULL;
     }
 
-  if (table && ((table->isHbaseTable() || table->isSeabaseTable()) && !(table->isSeabaseMDTable())))
+  if (table && ((table->isHbaseTable() || table->isSeabaseTable()) && 
+                !(table->isSeabaseMDTable())))
     {
       if ((CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))
 	{

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/regress/hive/EXPECTED005
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/EXPECTED005 b/core/sql/regress/hive/EXPECTED005
index eff0c42..6118229 100644
--- a/core/sql/regress/hive/EXPECTED005
+++ b/core/sql/regress/hive/EXPECTED005
@@ -4,7 +4,7 @@
 --- SQL operation complete.
 >>set terminal_charset utf8;
 >>
->>cqd AUTO_QUERY_RETRY 'OFF';
+>>cqd AUTO_QUERY_RETRY_WARNINGS 'ON';
 
 --- SQL operation complete.
 >>cqd HIVE_MAX_STRING_LENGTH '25' ;
@@ -13,9 +13,6 @@
 >>cqd mode_seahive 'ON';
 
 --- SQL operation complete.
->>cqd CALL_EMBEDDED_ARKCMP 'OFF';
-
---- SQL operation complete.
 >>cqd HIST_ROWCOUNT_REQUIRING_STATS '50000';
 
 --- SQL operation complete.
@@ -72,6 +69,10 @@
 +>  order by 1
 +>  ;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry. 
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 C_PREFERRED_CUST_FLAG      (EXPR)              
 -------------------------  --------------------
 
@@ -218,12 +219,16 @@ xyz
 >>-- s1 should still return 0 rows - for now
 >>execute s2;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry. 
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 C_PREFERRED_CUST_FLAG      (EXPR)              
 -------------------------  --------------------
 
-N                                          9789
-Y                                          9525
-?                                           685
+N                                         19631
+Y                                         18984
+?                                          1384
 
 --- 3 row(s) selected.
 >>execute s3;
@@ -270,6 +275,10 @@ Y                                          9525
 --- 1 row(s) inserted.
 >>select a,b from newtable;
 
+*** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry. 
+
+*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions.
+
 A            B                        
 -----------  -------------------------
 
@@ -307,9 +316,9 @@ A            B
 C_PREFERRED_CUST_FLAG      (EXPR)              
 -------------------------  --------------------
 
-N                                          9789
-Y                                          9525
-?                                           685
+N                                         19631
+Y                                         18984
+?                                          1384
 
 --- 3 row(s) selected.
 >>execute s4;
@@ -453,21 +462,21 @@ TINT    SM      I            BIG                   STR                        F
 --- SQL operation complete.
 >>select c1, CONVERTTOHEX(c2) from tbl_gbk;
 
-C1           (EXPR)
+C1           (EXPR)                                            
 -----------  --------------------------------------------------
 
-          3  EC8B90EC978E
-          5  EC8B90EC978E
-          2  EC8B90EC978E
-          4  EC8B90EC978E
-          6  EC8B90EC978E
-          7  EC8B90EC978E
-          8  EC8B90EC978E
-          3  ECBB93EB9F8FECAB97EB9B91
-          2  ECBB93EB9F8FECAB97EB9B91
-          6  ECBB93EB9F8FECAB97EB9B91
-         19  ECBB93EB9F8FECAB97EB9B91
-          8  ECBB93EB9F8FECAB97EB9B91
+          3  EC8B90EC978E                                      
+          5  EC8B90EC978E                                      
+          2  EC8B90EC978E                                      
+          4  EC8B90EC978E                                      
+          6  EC8B90EC978E                                      
+          7  EC8B90EC978E                                      
+          8  EC8B90EC978E                                      
+          3  ECBB93EB9F8FECAB97EB9B91                          
+          2  ECBB93EB9F8FECAB97EB9B91                          
+          6  ECBB93EB9F8FECAB97EB9B91                          
+         19  ECBB93EB9F8FECAB97EB9B91                          
+          8  ECBB93EB9F8FECAB97EB9B91                          
 
 --- 12 row(s) selected.
 >>cqd HIVE_FILE_CHARSET reset;
@@ -516,7 +525,7 @@ C1           C2           C3           C4
 --- 10 row(s) loaded.
 >>select * from trafodion.seabase.tbl_dos_num;
 
-C1           C2
+C1           C2         
 -----------  -----------
 
           0        39478
@@ -544,16 +553,16 @@ C1           C2
 --- SQL operation complete.
 >>select * from tbl_bad;
 
-C1           C2                    C3                         C4               C5      C6                          C7                                                 C8
------------  --------------------  -------------------------  ---------------  ------  --------------------------  -------------------------                          ------
+C1           C2                    C3                         C4               C5      C6                          C7                         C8
+-----------  --------------------  -------------------------  ---------------  ------  --------------------------  -------------------------  ------
 
-          ?                     ?  c                                        ?       ?  ?                                                   ?                               ?
-          ?                     ?  c                                        ?       ?  2017-01-01 10:10:10.000000   1.01000000000000000E+000                               1
-          ?                     ?                                           ?       ?  ?                                                   ?                               ?
-          1                     1  averylongstring            -1.0000000E+000       0  2017-01-01 10:10:10.000000   1.00010000000000000E+002                               1
-          2                     2  good                        1.1000000E+000       2  2017-01-01 10:10:10.000000   2.00000000000000000E+002                            1000
-          3                     3  good                        1.0000000E+000       2  2017-01-01 10:10:10.000000   2.10000000000000000E+002                              10
-          ?            4294967295  good                        3.3999999E+038       ?  2017-01-01 10:10:10.000000   1.69999999999999968E+308                              10
+          ?                     ?  c                                        ?       ?  ?                                                   ?       ?
+          ?                     ?  c                                        ?       ?  2017-01-01 10:10:10.000000   1.01000000000000000E+000       1
+          ?                     ?                                           ?       ?  ?                                                   ?       ?
+          1                     1  averylongstring            -1.0000000E+000       0  2017-01-01 10:10:10.000000   1.00010000000000000E+002       1
+          2                     2  good                        1.1000000E+000       2  2017-01-01 10:10:10.000000   2.00000000000000000E+002    1000
+          3                     3  good                        1.0000000E+000       2  2017-01-01 10:10:10.000000   2.10000000000000000E+002      10
+          ?            4294967295  good                        3.3999999E+038       ?  2017-01-01 10:10:10.000000   1.69999999999999968E+308      10
           0            9999999999  bad                                      ?       ?  ?                                                   ?       ?
 
 --- 8 row(s) selected.
@@ -631,4 +640,62 @@ C1           C2                    C3                         C4               C
 >>cqd HIVE_SCAN_SPECIAL_MODE reset;
 
 --- SQL operation complete.
+>>
+>>-- tests for hive timestamp mismatch check
+>>cqd auto_query_retry_warnings 'ON';
+
+--- SQL operation complete.
+>>
+>>sh echo "drop table thive;" > TEST005_junk;
+>>sh regrhive.ksh -f TEST005_junk;
+>>
+>>sh echo "create table thive(a int);" > TEST005_junk;
+>>sh regrhive.ksh -f TEST005_junk;
+>>
+>>select * from hive.hive.thive;
+
+--- 0 row(s) selected.
+>>
+>>sh echo "insert into thive values (1);" > TEST005_junk;
+>>sh regrhive.ksh -f TEST005_junk;
+>>
+>>select * from hive.hive.thive;
+
+A          
+-----------
+
+          1
+
+--- 1 row(s) selected.
+>>insert into hive.hive.thive values (2);
+
+--- 1 row(s) inserted.
+>>select * from hive.hive.thive;
+
+A          
+-----------
+
+          1
+          2
+
+--- 2 row(s) selected.
+>>
+>>sh echo "drop table thive;" > TEST005_junk;
+>>sh regrhive.ksh -f TEST005_junk;
+>>
+>>sh echo "create table thive(a int, b int);" > TEST005_junk;
+>>sh regrhive.ksh -f TEST005_junk;
+>>
+>>sh echo "insert into thive values (1,2);" > TEST005_junk;
+>>sh regrhive.ksh -f TEST005_junk;
+>>
+>>select * from hive.hive.thive;
+
+A            B          
+-----------  -----------
+
+          1            2
+
+--- 1 row(s) selected.
+>>
 >>log;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/1820da1c/core/sql/regress/hive/TEST005
----------------------------------------------------------------------
diff --git a/core/sql/regress/hive/TEST005 b/core/sql/regress/hive/TEST005
index ad7cf0e..e44b633 100644
--- a/core/sql/regress/hive/TEST005
+++ b/core/sql/regress/hive/TEST005
@@ -58,10 +58,9 @@ log LOG005 clear;
 set schema hive.hive;
 set terminal_charset utf8;
 
-cqd AUTO_QUERY_RETRY 'OFF';
+cqd AUTO_QUERY_RETRY_WARNINGS 'ON';
 cqd HIVE_MAX_STRING_LENGTH '25' ;
 cqd mode_seahive 'ON';
-cqd CALL_EMBEDDED_ARKCMP 'OFF';
 cqd HIST_ROWCOUNT_REQUIRING_STATS '50000';
 ------------------------------------------------------------
 -- Testing query plan invalidation in the compiler, but
@@ -283,4 +282,34 @@ c8 smallint
 cqd HIVE_SCAN_SPECIAL_MODE '2';
 insert into trafodion.seabase.traf_tbl_bad select * from tbl_bad;
 cqd HIVE_SCAN_SPECIAL_MODE reset;
+
+-- tests for hive timestamp mismatch check
+cqd auto_query_retry_warnings 'ON';
+
+sh echo "drop table thive;" > TEST005_junk;
+sh regrhive.ksh -f TEST005_junk;
+
+sh echo "create table thive(a int);" > TEST005_junk;
+sh regrhive.ksh -f TEST005_junk;
+
+select * from hive.hive.thive;
+
+sh echo "insert into thive values (1);" > TEST005_junk;
+sh regrhive.ksh -f TEST005_junk;
+
+select * from hive.hive.thive;
+insert into hive.hive.thive values (2);
+select * from hive.hive.thive;
+
+sh echo "drop table thive;" > TEST005_junk;
+sh regrhive.ksh -f TEST005_junk;
+
+sh echo "create table thive(a int, b int);" > TEST005_junk;
+sh regrhive.ksh -f TEST005_junk;
+
+sh echo "insert into thive values (1,2);" > TEST005_junk;
+sh regrhive.ksh -f TEST005_junk;
+
+select * from hive.hive.thive;
+
 log;


[5/8] incubator-trafodion git commit: Merge remote branch 'origin/master' into ansharma_hivets_br

Posted by an...@apache.org.
Merge remote branch 'origin/master' into ansharma_hivets_br

Conflicts:
	core/sql/comexe/ComTdbHdfsScan.h
	core/sql/generator/GenRelScan.cpp
	core/sql/regress/hive/EXPECTED005


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/fa70e683
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/fa70e683
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/fa70e683

Branch: refs/heads/master
Commit: fa70e6831470cccfd86887f9607591f242512fa7
Parents: 1820da1 d199362
Author: Anoop Sharma <an...@esgyn.com>
Authored: Sat May 28 01:19:42 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Sat May 28 01:19:42 2016 +0000

----------------------------------------------------------------------
 Makefile                                        |   1 +
 core/Makefile                                   |  12 +-
 core/conn/jdbcT4/pom.xml                        |   7 +-
 .../jdbcT4/src/main/java/T4Messages.properties  |   4 +-
 core/conn/jdbc_type2/build.xml                  |  11 +-
 core/conn/odbc/src/odbc/Makefile                |   8 +-
 core/conn/trafci/install/Installer.java         |   7 +-
 .../odbc/odbcclient/unixcli/makefile.lnx        |   6 +-
 .../odbc/odbcclient/unixcli/package/TRAFDSN     |   2 +-
 .../odbc/odbcclient/unixcli/package/mklnxpkg.sh |   2 +
 .../unixcli/package/runconnect_test.sh          |  26 +
 core/dbsecurity/scripts/sqcertgen               |   5 -
 core/dbsecurity/scripts/update_auth             |   9 +-
 core/sqf/Makefile                               |   2 +
 core/sqf/samples/.gitignore                     |   4 +
 core/sqf/sql/scripts/dcscheck                   |  20 +-
 core/sqf/sql/scripts/install_traf_components    |   3 +-
 core/sqf/sql/scripts/sqcheck                    |  57 +-
 core/sqf/sqvers                                 |   2 +-
 core/sql/comexe/ComTdbFastTransport.h           |   4 +-
 core/sql/comexe/ComTdbHdfsScan.cpp              |  13 +-
 core/sql/comexe/ComTdbHdfsScan.h                |  56 +-
 core/sql/common/ComSmallDefs.h                  |   3 +
 core/sql/executor/ExFastTransport.cpp           |  18 +-
 core/sql/executor/ExHdfsScan.cpp                |  38 +-
 core/sql/executor/hiveHook.cpp                  |  27 +-
 core/sql/generator/GenFastTransport.cpp         |   5 +-
 core/sql/generator/GenRelScan.cpp               |  32 +-
 core/sql/lib_mgmt/pom.xml                       |   2 +-
 core/sql/optimizer/BindRelExpr.cpp              |  30 +-
 core/sql/optimizer/HDFSHook.cpp                 |   1 +
 core/sql/optimizer/HDFSHook.h                   |   4 +
 core/sql/optimizer/RelFastTransport.cpp         |   1 +
 core/sql/optimizer/RelFastTransport.h           |  18 +-
 core/sql/optimizer/hiveHook.h                   |  25 +-
 core/sql/regress/core/EXPECTED162               |  22 +-
 core/sql/regress/hive/EXPECTED001               |  22 +-
 core/sql/regress/hive/EXPECTED003               | 244 ++++----
 core/sql/regress/hive/EXPECTED004               | 240 ++++----
 core/sql/regress/hive/EXPECTED005               |  10 +-
 core/sql/regress/hive/EXPECTED006               |  16 +-
 core/sql/regress/hive/EXPECTED015               |  64 +--
 core/sql/regress/hive/EXPECTED018               | 570 +++++++++++--------
 core/sql/regress/hive/TEST018                   |  48 +-
 .../hive/TEST018_create_hive_tables.hive        |  16 +-
 core/sql/regress/tools/runregr_hive.ksh         |  39 +-
 core/sql/sqlcomp/DefaultConstants.h             |   1 -
 core/sql/sqlcomp/nadefaults.cpp                 |   1 -
 core/sql/ustat/hs_globals.cpp                   |  28 +-
 dcs/pom.xml                                     |   8 +
 install/Makefile                                |  18 +-
 install/installer/traf_cloudera_mods            |   1 -
 install/installer/traf_package_setup            |   2 +-
 53 files changed, 1077 insertions(+), 738 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/comexe/ComTdbHdfsScan.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/comexe/ComTdbHdfsScan.h
----------------------------------------------------------------------
diff --cc core/sql/comexe/ComTdbHdfsScan.h
index c60b192,70573e1..ac83311
--- a/core/sql/comexe/ComTdbHdfsScan.h
+++ b/core/sql/comexe/ComTdbHdfsScan.h
@@@ -131,18 -131,11 +131,18 @@@ class ComTdbHdfsScan : public ComTd
    NABasicPtr loggingLocation_;                                // 168 - 175
    NABasicPtr errCountRowId_;                                  // 176 - 183
    UInt32  hiveScanMode_;                                      // 184 - 187
- 
-   char fillersComTdbHdfsScan1_[4];                           // 188 - 191
+   UInt16 origTuppIndex_;                                      // 188 - 189
+   char fillersComTdbHdfsScan1_[2];                            // 190 - 191
+   NABasicPtr nullFormat_;                                     // 192 - 199
 -  char fillersComTdbHdfsScan2_[8];                           // 200 - 207
  
 +  // next 3 params used to check if data under hdfsFileDir
 +  // was modified after query was compiled.
-   NABasicPtr hdfsRootDir_;                                     // 192 - 199
-   Int64  modTSforDir_;                                         // 200 - 207
-   Lng32  numOfPartCols_;                                       // 208 - 211
-   QueuePtr hdfsDirsToCheck_;                                   // 212 - 219
- 
++  NABasicPtr hdfsRootDir_;                                     // 200 - 207
++  Int64  modTSforDir_;                                         // 208 - 215
++  Lng32  numOfPartCols_;                                       // 216 - 219
 +  char fillersComTdbHdfsScan2_[4];                             // 220 - 223
++  QueuePtr hdfsDirsToCheck_;                                   // 224 - 231
 +    
  public:
    enum HDFSFileType
    {

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/executor/ExHdfsScan.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/generator/GenRelScan.cpp
----------------------------------------------------------------------
diff --cc core/sql/generator/GenRelScan.cpp
index 5474d94,9c17fad..d1cb243
--- a/core/sql/generator/GenRelScan.cpp
+++ b/core/sql/generator/GenRelScan.cpp
@@@ -1146,32 -1178,15 +1165,41 @@@ if (hTabStats->isOrcFile()
    char * tablename = 
      space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName()), 0);
  
+   char * nullFormat = NULL;
+   if (hTabStats->getNullFormat())
+     {
+       nullFormat = 
+         space->allocateAndCopyToAlignedSpace(hTabStats->getNullFormat(),
+                                              strlen(hTabStats->getNullFormat()),
+                                              0);
+     }
+ 
 +  // info needed to validate hdfs file structs
 +  char * hdfsRootDir = NULL;
 +  Int64 modTS = -1;
 +  Lng32 numOfPartLevels = -1;
 +  Queue * hdfsDirsToCheck = NULL;
 +  if (CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON)
 +    {
 +      hdfsRootDir =
 +        space->allocateAndCopyToAlignedSpace(hTabStats->tableDir().data(),
 +                                             hTabStats->tableDir().length(),
 +                                             0);
 +      modTS = hTabStats->getModificationTS();
 +      numOfPartLevels = hTabStats->numOfPartCols();
 +
 +      // if specific directories are to checked based on the query struct
 +      // (for example, when certain partitions are explicitly specified), 
 +      // add them to hdfsDirsToCheck.
 +      // At runtime, only these dirs will be checked for data modification.
 +      // ** TBD **
 +
 +      // Right now, timestamp info is not being generated correctly for
 +      // partitioned files. Skip data mod check for them.
 +      if (numOfPartLevels > 0)
 +        hdfsRootDir = NULL;
 +    }
 +
    // create hdfsscan_tdb
    ComTdbHdfsScan *hdfsscan_tdb = new(space) 
      ComTdbHdfsScan(

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/optimizer/HDFSHook.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/optimizer/HDFSHook.h
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/regress/hive/EXPECTED005
----------------------------------------------------------------------
diff --cc core/sql/regress/hive/EXPECTED005
index 6118229,28d4f37..3c286d6
--- a/core/sql/regress/hive/EXPECTED005
+++ b/core/sql/regress/hive/EXPECTED005
@@@ -226,9 -221,9 +226,9 @@@ xy
  C_PREFERRED_CUST_FLAG      (EXPR)              
  -------------------------  --------------------
  
- N                                         19631
 -                                            685
 -N                                          9789
 -Y                                          9525
++                                          19631
 +Y                                         18984
 +?                                          1384
  
  --- 3 row(s) selected.
  >>execute s3;
@@@ -316,9 -307,9 +316,9 @@@ A            
  C_PREFERRED_CUST_FLAG      (EXPR)              
  -------------------------  --------------------
  
- N                                         19631
 -                                            685
 -N                                          9789
 -Y                                          9525
++                                          19631
 +Y                                         18984
 +?                                          1384
  
  --- 3 row(s) selected.
  >>execute s4;

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/sqlcomp/DefaultConstants.h
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/fa70e683/core/sql/sqlcomp/nadefaults.cpp
----------------------------------------------------------------------


[2/8] incubator-trafodion git commit: Merge remote branch 'origin/master' into ansharma_hivets_br

Posted by an...@apache.org.
Merge remote branch 'origin/master' into ansharma_hivets_br

Conflicts:
	core/sql/sqlcomp/DefaultConstants.h


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/cb6a75c6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/cb6a75c6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/cb6a75c6

Branch: refs/heads/master
Commit: cb6a75c63f7cfa0ed211dfe22f733ab0fedd4383
Parents: f472822 d02fe47
Author: Anoop Sharma <an...@esgyn.com>
Authored: Mon May 23 15:33:29 2016 +0000
Committer: Anoop Sharma <an...@esgyn.com>
Committed: Mon May 23 15:33:29 2016 +0000

----------------------------------------------------------------------
 core/sql/cli/Context.cpp                | 26 ++++++++++++----
 core/sql/cli/Context.h                  |  1 +
 core/sql/cli/ExSqlComp.cpp              |  5 +++-
 core/sql/cli/ExSqlComp.h                |  2 ++
 core/sql/cli/SessionDefaults.cpp        |  9 ++++++
 core/sql/cli/SessionDefaults.h          | 12 +++++++-
 core/sql/cli/Statement.cpp              |  2 +-
 core/sql/executor/ex_control.cpp        | 12 ++++++++
 core/sql/optimizer/BindRelExpr.cpp      | 44 +++++++++++++++++++++++++++-
 core/sql/optimizer/RelFastTransport.cpp |  4 +--
 core/sql/optimizer/RelFastTransport.h   | 14 ++++-----
 core/sql/regress/executor/EXPECTED020   |  1 +
 core/sql/regress/hive/EXPECTED003       | 26 ++++++++++++++++
 core/sql/regress/hive/TEST003           | 11 +++++++
 core/sql/sqlcomp/DefaultConstants.h     |  2 ++
 core/sql/sqlcomp/nadefaults.cpp         |  2 ++
 16 files changed, 155 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/cb6a75c6/core/sql/cli/SessionDefaults.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/cb6a75c6/core/sql/regress/executor/EXPECTED020
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/cb6a75c6/core/sql/sqlcomp/DefaultConstants.h
----------------------------------------------------------------------
diff --cc core/sql/sqlcomp/DefaultConstants.h
index a48f1c8,8faee51..2cd6ef6
--- a/core/sql/sqlcomp/DefaultConstants.h
+++ b/core/sql/sqlcomp/DefaultConstants.h
@@@ -3817,10 -3817,7 +3817,12 @@@ enum DefaultConstant
    //     // 2 : todo
    HIVE_SCAN_SPECIAL_MODE,
  
 +  // if set, data modification check is done at runtime before running
 +  // a query.
 +  HIVE_DATA_MOD_CHECK,
 +
+   COMPILER_IDLE_TIMEOUT,
++
    // This enum constant must be the LAST one in the list; it's a count,
    // not an Attribute (it's not IN DefaultDefaults; it's the SIZE of it)!
    __NUM_DEFAULT_ATTRIBUTES

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/cb6a75c6/core/sql/sqlcomp/nadefaults.cpp
----------------------------------------------------------------------