You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2016/10/17 20:42:56 UTC

[64/67] [abbrv] hive git commit: HIVE-14671 : merge master into hive-14535 (Sergey Shelukhin)

HIVE-14671 : merge master into hive-14535 (Sergey Shelukhin)

Conflicts:
	ql/src/java/org/apache/hadoop/hive/ql/Driver.java


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2474f063
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2474f063
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2474f063

Branch: refs/heads/hive-14535
Commit: 2474f063aaaf3a91fdab9d9c5358723072183ddf
Parents: eacf9f9 36e810f
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Oct 17 12:31:12 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Oct 17 12:31:12 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hive/beeline/BeeLine.java   |  176 ++-
 .../java/org/apache/hive/beeline/Commands.java  |   52 +-
 .../BeelineHS2ConnectionFileParseException.java |   30 +
 .../hs2connection/HS2ConnectionFileParser.java  |   88 ++
 .../hs2connection/HS2ConnectionFileUtils.java   |  119 ++
 .../HiveSiteHS2ConnectionFileParser.java        |  172 +++
 .../UserHS2ConnectionFileParser.java            |  117 ++
 .../apache/hive/beeline/TestBeeLineHistory.java |    4 +-
 .../TestUserHS2ConnectionFileParser.java        |  211 ++++
 beeline/src/test/resources/hive-site.xml        |    5 +
 .../test-hs2-conn-conf-kerberos-http.xml        |   48 +
 .../test-hs2-conn-conf-kerberos-nossl.xml       |   32 +
 .../test-hs2-conn-conf-kerberos-ssl.xml         |   40 +
 .../resources/test-hs2-connection-conf-list.xml |   36 +
 .../test-hs2-connection-config-noauth.xml       |   28 +
 .../test-hs2-connection-multi-conf-list.xml     |   37 +
 .../test-hs2-connection-zookeeper-config.xml    |   32 +
 .../apache/hadoop/hive/common/FileUtils.java    |    2 +
 .../org/apache/hadoop/hive/conf/Constants.java  |    4 +
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    8 +-
 .../apache/hadoop/hive/conf/HiveConfUtil.java   |   91 ++
 .../hive/common/util/HiveStringUtils.java       |   32 +
 data/conf/spark/yarn-client/hive-site.xml       |    4 +-
 data/files/identity_udf.jar                     |  Bin 0 -> 710 bytes
 .../hcatalog/pig/AbstractHCatStorerTest.java    | 1096 ++++++++++++++++++
 .../hive/hcatalog/pig/TestAvroHCatStorer.java   |   77 ++
 .../hive/hcatalog/pig/TestHCatStorer.java       | 1036 ++---------------
 .../hive/hcatalog/pig/TestOrcHCatStorer.java    |   33 +
 .../hcatalog/pig/TestParquetHCatStorer.java     |  200 ++++
 .../hive/hcatalog/pig/TestRCFileHCatStorer.java |   32 +
 .../pig/TestSequenceFileHCatStorer.java         |   33 +
 .../hcatalog/pig/TestTextFileHCatStorer.java    |   33 +
 .../hive/minikdc/TestJdbcWithMiniKdcCookie.java |    1 -
 .../hive/beeline/TestBeeLineWithArgs.java       |   15 +
 .../TestBeelineConnectionUsingHiveSite.java     |  109 ++
 .../TestBeelineWithHS2ConnectionFile.java       |  214 ++++
 .../TestBeelineWithUserHs2ConnectionFile.java   |  129 +++
 metastore/if/hive_metastore.thrift              |    2 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |   44 +
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   20 +-
 .../hadoop/hive/metastore/api/TxnInfo.java      |  206 +++-
 .../src/gen/thrift/gen-php/metastore/Types.php  |   46 +
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   28 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |    6 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   12 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  675 +++++++----
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |    8 +
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |    2 +
 .../ql/exec/spark/HiveSparkClientFactory.java   |   14 +
 .../ql/exec/spark/LocalHiveSparkClient.java     |    6 +
 .../ql/exec/spark/RemoteHiveSparkClient.java    |    4 +
 .../rules/HiveSortLimitPullUpConstantsRule.java |   11 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    6 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   37 +-
 .../hadoop/hive/ql/plan/ShowTxnsDesc.java       |    2 +-
 .../hadoop/hive/ql/util/DependencyResolver.java |    2 +-
 .../ql/exec/TestHiveCredentialProviders.java    |  314 +++++
 .../clientpositive/distinct_windowing_no_cbo.q  |   63 +
 ql/src/test/queries/clientpositive/windowing.q  |    6 +
 .../clientpositive/dbtxnmgr_showlocks.q.out     |    2 +-
 .../distinct_windowing_no_cbo.q.out             |  796 +++++++++++++
 .../llap/cbo_rp_windowing_2.q.out               |    5 +-
 .../results/clientpositive/llap/windowing.q.out |  110 +-
 .../spark/constprog_semijoin.q.out              |   20 +-
 .../clientpositive/spark/index_bitmap3.q.out    |    4 +-
 .../spark/index_bitmap_auto.q.out               |    4 +-
 .../spark/infer_bucket_sort_map_operators.q.out |    8 +-
 .../infer_bucket_sort_reducers_power_two.q.out  |    2 +-
 .../clientpositive/spark/windowing.q.out        |  105 +-
 .../service/cli/operation/OperationManager.java |    3 +
 .../service/cli/thrift/ThriftHttpServlet.java   |    4 +-
 .../cli/session/TestSessionManagerMetrics.java  |   83 +-
 .../thrift/ThriftCliServiceTestWithCookie.java  |    1 -
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |    8 +-
 .../hive/spark/client/SparkClientImpl.java      |   18 +-
 .../ptest2/conf/deployed/master-mr2.properties  |  160 +++
 .../hive/ptest/execution/ExecutionPhase.java    |   21 +-
 .../ptest/execution/conf/QFileTestBatch.java    |   11 +-
 .../ptest/execution/TestExecutionPhase.java     |    8 +-
 .../TestScripts.testPrepGit.approved.txt        |    4 +-
 .../TestScripts.testPrepHadoop1.approved.txt    |    4 +-
 .../TestScripts.testPrepNone.approved.txt       |    4 +-
 .../TestScripts.testPrepSvn.approved.txt        |    4 +-
 83 files changed, 5976 insertions(+), 1303 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index a2997b0,9e5fd37..690cdff
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@@ -1323,171 -1391,180 +1396,216 @@@ public class Driver implements CommandP
      errorMessage = null;
      SQLState = null;
      downstreamError = null;
- 
-     HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf,
-         alreadyCompiled ? ctx.getCmd() : command);
-     // Get all the driver run hooks and pre-execute them.
-     List<HiveDriverRunHook> driverRunHooks;
+     stateLock.lock();
      try {
-       driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS,
-           HiveDriverRunHook.class);
-       for (HiveDriverRunHook driverRunHook : driverRunHooks) {
-           driverRunHook.preDriverRun(hookContext);
+       if (alreadyCompiled) {
+         if (driverState == DriverState.COMPILED) {
+           driverState = DriverState.EXECUTING;
+         } else {
+           errorMessage = "FAILED: Precompiled query has been cancelled or closed.";
+           console.printError(errorMessage);
+           return createProcessorResponse(12);
+         }
+       } else {
+         driverState = DriverState.COMPILING;
        }
-     } catch (Exception e) {
-       errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
-       SQLState = ErrorMsg.findSQLState(e.getMessage());
-       downstreamError = e;
-       console.printError(errorMessage + "\n"
-           + org.apache.hadoop.util.StringUtils.stringifyException(e));
-       return createProcessorResponse(12);
+     } finally {
+       stateLock.unlock();
      }
  
-     PerfLogger perfLogger = null;
- 
-     int ret;
-     if (!alreadyCompiled) {
-       // compile internal will automatically reset the perf logger
-       ret = compileInternal(command);
-       // then we continue to use this perf logger
-       perfLogger = SessionState.getPerfLogger();
-       if (ret != 0) {
-         return createProcessorResponse(ret);
+     // a flag that helps to set the correct driver state in finally block by tracking if
+     // the method has been returned by an error or not.
+     boolean isFinishedWithError = true;
+     try {
+       HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf,
+           alreadyCompiled ? ctx.getCmd() : command);
+       // Get all the driver run hooks and pre-execute them.
+       List<HiveDriverRunHook> driverRunHooks;
+       try {
+         driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS,
+             HiveDriverRunHook.class);
+         for (HiveDriverRunHook driverRunHook : driverRunHooks) {
+             driverRunHook.preDriverRun(hookContext);
+         }
+       } catch (Exception e) {
+         errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
+         SQLState = ErrorMsg.findSQLState(e.getMessage());
+         downstreamError = e;
+         console.printError(errorMessage + "\n"
+             + org.apache.hadoop.util.StringUtils.stringifyException(e));
+         return createProcessorResponse(12);
        }
-     } else {
-       // reuse existing perf logger.
-       perfLogger = SessionState.getPerfLogger();
-       // Since we're reusing the compiled plan, we need to update its start time for current run
-       plan.setQueryStartTime(perfLogger.getStartTime(PerfLogger.DRIVER_RUN));
-     }
-     // the reason that we set the txn manager for the cxt here is because each
-     // query has its own ctx object. The txn mgr is shared across the
-     // same instance of Driver, which can run multiple queries.
-     HiveTxnManager txnManager = SessionState.get().getTxnMgr();
-     ctx.setHiveTxnManager(txnManager);
- 
-     boolean startTxnImplicitly = false;
-     {
-       //this block ensures op makes sense in given context, e.g. COMMIT is valid only if txn is open
-       //DDL is not allowed in a txn, etc.
-       //an error in an open txn does a rollback of the txn
-       if (txnManager.isTxnOpen() && !plan.getOperation().isAllowedInTransaction()) {
-         assert !txnManager.getAutoCommit() : "didn't expect AC=true";
-         return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_TXN, null,
-           plan.getOperationName(), Long.toString(txnManager.getCurrentTxnId())));
-       }
-       if(!txnManager.isTxnOpen() && plan.getOperation().isRequiresOpenTransaction()) {
-         return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN, null, plan.getOperationName()));
-       }
-       if(!txnManager.isTxnOpen() && plan.getOperation() == HiveOperation.QUERY && !txnManager.getAutoCommit()) {
-         //this effectively makes START TRANSACTION optional and supports JDBC setAutoCommit(false) semantics
-         //also, indirectly allows DDL to be executed outside a txn context
-         startTxnImplicitly = true;
-       }
-       if(txnManager.getAutoCommit() && plan.getOperation() == HiveOperation.START_TRANSACTION) {
+ 
+       PerfLogger perfLogger = null;
+ 
+       int ret;
+       if (!alreadyCompiled) {
+         // compile internal will automatically reset the perf logger
+         ret = compileInternal(command, true);
+         // then we continue to use this perf logger
+         perfLogger = SessionState.getPerfLogger();
+         if (ret != 0) {
+           return createProcessorResponse(ret);
+         }
+       } else {
+         // reuse existing perf logger.
+         perfLogger = SessionState.getPerfLogger();
+         // Since we're reusing the compiled plan, we need to update its start time for current run
+         plan.setQueryStartTime(perfLogger.getStartTime(PerfLogger.DRIVER_RUN));
+       }
+       // the reason that we set the txn manager for the cxt here is because each
+       // query has its own ctx object. The txn mgr is shared across the
+       // same instance of Driver, which can run multiple queries.
+       HiveTxnManager txnManager = SessionState.get().getTxnMgr();
+       ctx.setHiveTxnManager(txnManager);
+ 
+       boolean startTxnImplicitly = false;
+       {
+         //this block ensures op makes sense in given context, e.g. COMMIT is valid only if txn is open
+         //DDL is not allowed in a txn, etc.
+         //an error in an open txn does a rollback of the txn
+         if (txnManager.isTxnOpen() && !plan.getOperation().isAllowedInTransaction()) {
+           assert !txnManager.getAutoCommit() : "didn't expect AC=true";
+           return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_TXN, null,
+             plan.getOperationName(), Long.toString(txnManager.getCurrentTxnId())));
+         }
+         if(!txnManager.isTxnOpen() && plan.getOperation().isRequiresOpenTransaction()) {
+           return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN, null, plan.getOperationName()));
+         }
+         if(!txnManager.isTxnOpen() && plan.getOperation() == HiveOperation.QUERY && !txnManager.getAutoCommit()) {
+           //this effectively makes START TRANSACTION optional and supports JDBC setAutoCommit(false) semantics
+           //also, indirectly allows DDL to be executed outside a txn context
+           startTxnImplicitly = true;
+         }
+         if(txnManager.getAutoCommit() && plan.getOperation() == HiveOperation.START_TRANSACTION) {
            return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_AUTOCOMMIT, null, plan.getOperationName()));
+         }
        }
-     }
-     if(plan.getOperation() == HiveOperation.SET_AUTOCOMMIT) {
-       try {
-         if(plan.getAutoCommitValue() && !txnManager.getAutoCommit()) {
-           /*here, if there is an open txn, we want to commit it; this behavior matches
-           * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)*/
-           releaseLocksAndCommitOrRollback(true, null);
-           txnManager.setAutoCommit(true);
+       if(plan.getOperation() == HiveOperation.SET_AUTOCOMMIT) {
+         try {
+           if(plan.getAutoCommitValue() && !txnManager.getAutoCommit()) {
+             /*here, if there is an open txn, we want to commit it; this behavior matches
+             * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)*/
+             releaseLocksAndCommitOrRollback(true, null);
+             txnManager.setAutoCommit(true);
+           }
+           else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) {
+             txnManager.setAutoCommit(false);
+           }
+           else {/*didn't change autoCommit value - no-op*/}
          }
-         else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) {
-           txnManager.setAutoCommit(false);
+         catch(LockException e) {
+           return handleHiveException(e, 12);
          }
-         else {/*didn't change autoCommit value - no-op*/}
        }
-       catch(LockException e) {
-         return handleHiveException(e, 12);
+ 
+       if (requiresLock()) {
+         // a checkpoint to see if the thread is interrupted or not before an expensive operation
+         if (isInterrupted()) {
+           ret = handleInterruption("at acquiring the lock.");
+         } else {
+           ret = acquireLocksAndOpenTxn(startTxnImplicitly);
+         }
+         if (ret != 0) {
+           return rollback(createProcessorResponse(ret));
+         }
+       }
++
++      try {
++        acquireWriteIds(plan, conf);
++      } catch (HiveException e) {
++        return handleHiveException(e, 1);
 +      }
-     }
 +
-     if (requiresLock()) {
-       ret = acquireLocksAndOpenTxn(startTxnImplicitly);
+       ret = execute(true);
        if (ret != 0) {
+         //if needRequireLock is false, the release here will do nothing because there is no lock
          return rollback(createProcessorResponse(ret));
        }
-     }
-     try {
-       acquireWriteIds(plan, conf);
-     } catch (HiveException e) {
-       return handleHiveException(e, 1);
-     }
-     ret = execute();
-     if (ret != 0) {
-       //if needRequireLock is false, the release here will do nothing because there is no lock
-       return rollback(createProcessorResponse(ret));
-     }
  
-     //if needRequireLock is false, the release here will do nothing because there is no lock
-     try {
-       if(txnManager.getAutoCommit() || plan.getOperation() == HiveOperation.COMMIT) {
-         releaseLocksAndCommitOrRollback(true, null);
-       }
-       else if(plan.getOperation() == HiveOperation.ROLLBACK) {
-         releaseLocksAndCommitOrRollback(false, null);
-       }
-       else {
-         //txn (if there is one started) is not finished
+       //if needRequireLock is false, the release here will do nothing because there is no lock
+       try {
+         if(txnManager.getAutoCommit() || plan.getOperation() == HiveOperation.COMMIT) {
+           releaseLocksAndCommitOrRollback(true, null);
+         }
+         else if(plan.getOperation() == HiveOperation.ROLLBACK) {
+           releaseLocksAndCommitOrRollback(false, null);
+         }
+         else {
+           //txn (if there is one started) is not finished
+         }
+       } catch (LockException e) {
+         return handleHiveException(e, 12);
        }
-     } catch (LockException e) {
-       return handleHiveException(e, 12);
-     }
  
-     perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_RUN);
-     queryDisplay.setPerfLogStarts(QueryDisplay.Phase.EXECUTION, perfLogger.getStartTimes());
-     queryDisplay.setPerfLogEnds(QueryDisplay.Phase.EXECUTION, perfLogger.getEndTimes());
+       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_RUN);
+       queryDisplay.setPerfLogStarts(QueryDisplay.Phase.EXECUTION, perfLogger.getStartTimes());
+       queryDisplay.setPerfLogEnds(QueryDisplay.Phase.EXECUTION, perfLogger.getEndTimes());
  
-     // Take all the driver run hooks and post-execute them.
-     try {
-       for (HiveDriverRunHook driverRunHook : driverRunHooks) {
-           driverRunHook.postDriverRun(hookContext);
+       // Take all the driver run hooks and post-execute them.
+       try {
+         for (HiveDriverRunHook driverRunHook : driverRunHooks) {
+             driverRunHook.postDriverRun(hookContext);
+         }
+       } catch (Exception e) {
+         errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
+         SQLState = ErrorMsg.findSQLState(e.getMessage());
+         downstreamError = e;
+         console.printError(errorMessage + "\n"
+             + org.apache.hadoop.util.StringUtils.stringifyException(e));
+         return createProcessorResponse(12);
+       }
+       isFinishedWithError = false;
+       return createProcessorResponse(ret);
+     } finally {
+       if (isInterrupted()) {
+         closeInProcess(true);
+       } else {
+         // only release the related resources ctx, driverContext as normal
+         releaseResources();
+       }
+       stateLock.lock();
+       try {
+         if (driverState == DriverState.INTERRUPT) {
+           driverState = DriverState.ERROR;
+         } else {
+           driverState = isFinishedWithError ? DriverState.ERROR : DriverState.EXECUTED;
+         }
+       } finally {
+         stateLock.unlock();
        }
-     } catch (Exception e) {
-       errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
-       SQLState = ErrorMsg.findSQLState(e.getMessage());
-       downstreamError = e;
-       console.printError(errorMessage + "\n"
-           + org.apache.hadoop.util.StringUtils.stringifyException(e));
-       return createProcessorResponse(12);
      }
- 
-     return createProcessorResponse(ret);
    }
  
 +  private static void acquireWriteIds(QueryPlan plan, HiveConf conf) throws HiveException {
 +    // Output IDs are put directly into FileSinkDesc; here, we only need to take care of inputs.
 +    for (ReadEntity input : plan.getInputs()) {
 +      Table t = extractMmTable(input);
 +      if (t == null) continue;
 +      ValidWriteIds ids = Hive.get().getValidWriteIdsForTable(t.getDbName(), t.getTableName());
 +      ids.addToConf(conf, t.getDbName(), t.getTableName());
 +      if (plan.getFetchTask() != null) {
 +        ids.addToConf(plan.getFetchTask().getFetchConf(), t.getDbName(), t.getTableName());
 +      }
 +    }
 +  }
 +
 +  private static Table extractMmTable(ReadEntity input) {
 +    Table t = null;
 +    switch (input.getType()) {
 +      case TABLE:
 +        t = input.getTable();
 +        break;
 +      case DUMMYPARTITION:
 +      case PARTITION:
 +        t = input.getPartition().getTable();
 +        break;
 +      default: return null;
 +    }
 +    return (t != null && !t.isTemporary()
 +        && MetaStoreUtils.isMmTable(t.getParameters())) ? t : null;
 +  }
 +
    private CommandProcessorResponse rollback(CommandProcessorResponse cpr) {
      //console.printError(cpr.toString());
      try {

http://git-wip-us.apache.org/repos/asf/hive/blob/2474f063/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------