You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@sqoop.apache.org by af...@apache.org on 2016/03/08 00:15:37 UTC

sqoop git commit: SQOOP-2855: Sqoop2: Enrich Generic JDBC Connector resource file

Repository: sqoop
Updated Branches:
  refs/heads/sqoop2 6ed1a190e -> 2452b262e


SQOOP-2855: Sqoop2: Enrich Generic JDBC Connector resource file

(Jarek Jarcec Cecho via Abraham Fine)


Project: http://git-wip-us.apache.org/repos/asf/sqoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/sqoop/commit/2452b262
Tree: http://git-wip-us.apache.org/repos/asf/sqoop/tree/2452b262
Diff: http://git-wip-us.apache.org/repos/asf/sqoop/diff/2452b262

Branch: refs/heads/sqoop2
Commit: 2452b262e2cb3432a6309721b25fba8cbb538de3
Parents: 6ed1a19
Author: Abraham Fine <af...@apache.org>
Authored: Mon Mar 7 15:14:40 2016 -0800
Committer: Abraham Fine <af...@apache.org>
Committed: Mon Mar 7 15:14:40 2016 -0800

----------------------------------------------------------------------
 .../generic-jdbc-connector-config.properties    | 153 +++++++++----------
 1 file changed, 73 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/sqoop/blob/2452b262/connector/connector-generic-jdbc/src/main/resources/generic-jdbc-connector-config.properties
----------------------------------------------------------------------
diff --git a/connector/connector-generic-jdbc/src/main/resources/generic-jdbc-connector-config.properties b/connector/connector-generic-jdbc/src/main/resources/generic-jdbc-connector-config.properties
index 8256beb..6ecc41d 100644
--- a/connector/connector-generic-jdbc/src/main/resources/generic-jdbc-connector-config.properties
+++ b/connector/connector-generic-jdbc/src/main/resources/generic-jdbc-connector-config.properties
@@ -13,124 +13,117 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Generic JDBC Connector Resources
+connector.name = Generic JDBC Connector
 
-############################
 # Link Config
-#
-linkConfig.label = Link configuration
-linkConfig.help = You must supply the information requested in order to \
-                   create a link object.
-
-# jdbc driver
-linkConfig.jdbcDriver.label = JDBC Driver Class
-linkConfig.jdbcDriver.help = Enter the fully qualified class name of the JDBC \
-                   driver that will be used for establishing this connection.\
-                   Note: The driver jar must be in the sqoop lib directory.
-
-# connect string
-linkConfig.connectionString.label = JDBC Connection String
-linkConfig.connectionString.help = Enter the value of JDBC connection string to be \
-                   used by this connector for creating database connections.
-
-# username string
+linkConfig.label = Database connection
+linkConfig.help = Contains configuration that is required to establish connection with your database server.
+
+linkConfig.jdbcDriver.label = Driver class
+linkConfig.jdbcDriver.example = com.mysql.jdbc.Driver
+linkConfig.jdbcDriver.help = Fully qualified class name of the JDBC driver that will be used for establishing \
+  this connection. Check documentation for instructions how to make the driver's jar files available to Sqoop 2 server.
+
+linkConfig.connectionString.label = Connection String
+linkConfig.connectionString.example = jdbc:mysql://mysql.server/sqoop
+linkConfig.connectionString.help = JDBC connection string associated with your database server.
+
 linkConfig.username.label = Username
-linkConfig.username.help = Enter the username to be used for connecting to the \
-                   database.
+linkConfig.username.example = sqoop-user
+linkConfig.username.help = Username to be used for connection to the database server.
 
-# password string
 linkConfig.password.label = Password
-linkConfig.password.help = Enter the password to be used for connecting to the \
-                   database.
+linkConfig.password.example = Sup3rS3cr3t!
+linkConfig.password.help = Password to be used for connection to the database server.
 
-# fetch size int
 linkConfig.fetchSize.label = Fetch Size
-linkConfig.fetchSize.help = Optional hint for JDBC fetch size. See  \
-                   http://docs.oracle.com/javase/7/docs/api/java/sql/Statement.html#setFetchSize(int)
+linkConfig.fetchSize.example = 1000
+linkConfig.fetchSize.help = Optional hint specifying requested JDBC fetch size.
+
+linkConfig.jdbcProperties.label = Connection Properties
+linkConfig.jdbcProperties.example = useCompression=true
+linkConfig.jdbcProperties.help = Key-value pairs that should be passed down to JDBC driver when establishing connection.
 
-# jdbc properties
-linkConfig.jdbcProperties.label = JDBC Connection Properties
-linkConfig.jdbcProperties.help = Enter any JDBC properties that should be \
-                   supplied during the creation of connection.
 
 # From Job Config
-#
-fromJobConfig.label = From database configuration
-fromJobConfig.help = You must supply the information requested below in order to create \
-                 the FROM part of the job object
+fromJobConfig.label = Database source
+fromJobConfig.help = Specifies source and way how the data should be fetched from source database.
 
-# From schema name
 fromJobConfig.schemaName.label = Schema name
-fromJobConfig.schemaName.help = Schema name to read data from 
+fromJobConfig.schemaName.example = my_schema
+fromJobConfig.schemaName.help = Schema name if the table is not stored in default schema. Note: Not all database systems \
+  understands the concept of schema.
 
-# From table name
 fromJobConfig.tableName.label = Table name
-fromJobConfig.tableName.help = Table name to read data from
+fromJobConfig.tableName.example = input_table
+fromJobConfig.tableName.help = Input table name from from which data will be retrieved.
 
-# From table SQL
-fromJobConfig.sql.label = Table SQL statement
-fromJobConfig.sql.help = SQL statement to read data from (Optional if table name is already given)
+fromJobConfig.sql.label = SQL statement
+fromJobConfig.sql.example = select * from input_table where ${CONDITIONS}
+fromJobConfig.sql.help = Import data from given query's results set rather then static table.
 
-# From table columnList
-fromJobConfig.columnList.label = Table column names
-fromJobConfig.columnList.help = Specific columns in the given table name or the SQL query (Optional)
+fromJobConfig.columnList.label = Column names
+fromJobConfig.columnList.example = id,text,city
+fromJobConfig.columnList.help = Subset of columns that should be retrieved from source table.
 
-# From table partition column
-fromJobConfig.partitionColumn.label = Partition column name
-fromJobConfig.partitionColumn.help = A specific column for data partition (Optional)
+fromJobConfig.partitionColumn.label = Partition column
+fromJobConfig.partitionColumn.example = id
+fromJobConfig.partitionColumn.help = Input column that should be use to split the import into independent parallel \
+  processes. This column will be used in condition of generated queries.
 
-# From table allow nulls in partition column
-fromJobConfig.allowNullValueInPartitionColumn.label = Null value allowed for the partition column
-fromJobConfig.allowNullValueInPartitionColumn.help = Whether there are null values in partition column (Defaults to false)
+fromJobConfig.allowNullValueInPartitionColumn.label = Partition column nullable
+fromJobConfig.allowNullValueInPartitionColumn.example = true
+fromJobConfig.allowNullValueInPartitionColumn.help = Set true if partition column can contain NULL value.
 
-# From table boundary
 fromJobConfig.boundaryQuery.label = Boundary query
-fromJobConfig.boundaryQuery.help = The boundary query for data partition  (Optional)
+fromJobConfig.boundaryQuery.example = select min(id), max(id) from input_table
+fromJobConfig.boundaryQuery.help = Customize query to retrieve minimal and maximal value of partition column.
+
 
 # ToJob Config
-#
-toJobConfig.label = To database configuration
-toJobConfig.help = You must supply the information requested in order to create \
-                 the TO part of the job object.
+toJobConfig.label = Database target
+toJobConfig.help = Describes target destination and way how data should be persisted on the RDBMS system.
 
-# To schema name
 toJobConfig.schemaName.label = Schema name
-toJobConfig.schemaName.help = Schema name to write data into
+toJobConfig.schemaName.example = my_schema
+toJobConfig.schemaName.help = Schema name if the table is not stored in default schema. Note: Not all database systems \
+  understands the concept of schema.
 
-# To table name
 toJobConfig.tableName.label = Table name
-toJobConfig.tableName.help = Table name to write data into
-
-# To table SQL
-toJobConfig.sql.label = Table SQL statement
-toJobConfig.sql.help = SQL statement to use to write data into (Optional if table name is already given)
+toJobConfig.tableName.example = target_table
+toJobConfig.tableName.help = Destination table name to store transfer results.
 
-# To table columnList
-toJobConfig.columnList.label = Table column names
-toJobConfig.columnList.help = Specific columns to use in the given table name or the table SQL  (Optional)
+toJobConfig.columnList.label = Column names
+toJobConfig.columnList.example = id,text,city
+toJobConfig.columnList.help = Subset of columns that will will be written to. Omitted columns have to either allow \
+  NULL values or have defined default value.
 
-# To stage table name
-toJobConfig.stageTableName.label = Stage table name
-toJobConfig.stageTableName.help = Name of the staging table to use (Optional)
+toJobConfig.stageTableName.label = Staging table
+toJobConfig.stageTableName.example = staging_target_table
+toJobConfig.stageTableName.help = Name of table with same structure as final table that should be used as a staging \
+  destination. Data will be directly written to final table if no staging table is specified.
 
-# To clear stage table
-toJobConfig.shouldClearStageTable.label = Should clear stage table
-toJobConfig.shouldClearStageTable.help = Indicate if the stage table should be cleared (Defaults to false)
+toJobConfig.shouldClearStageTable.label = Clear stage table
+toJobConfig.shouldClearStageTable.example = true
+toJobConfig.shouldClearStageTable.help = If set to true, staging table will be wiped out upon job start.
 
-# Incremental related configuration
 incrementalRead.label = Incremental read
-incrementalRead.help = Configuration related to incremental read
+incrementalRead.help = Configures optional incremental read from the database where source data are changing over time \
+  and only new changes need to be re-imported.
 
 incrementalRead.checkColumn.label = Check column
-incrementalRead.checkColumn.help = Column that is checked during incremental read for new values
+incrementalRead.checkColumn.example = last_update_date
+incrementalRead.checkColumn.help = Column that is checked during incremental read for new values.
 
 incrementalRead.lastValue.label = Last value
-incrementalRead.lastValue.help = Last read value, fetch will resume with higher values
+incrementalRead.lastValue.example = 19870202
+incrementalRead.lastValue.help = Last imported value, job will read only newer values.
 
 
 # Dialect
 dialect.label = SQL Dialect
-dialect.help = Dialect that should be used for generated queries
+dialect.help = Database dialect that should be used for generated queries.
 
 dialect.identifierEnclose.label = Identifier enclose
-dialect.identifierEnclose.help = Character(s) that should be used to enclose table name, schema, column names, ...
+dialect.identifierEnclose.example = `
+dialect.identifierEnclose.help = Character(s) that should be used to enclose table name, schema or column names.