You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@sqoop.apache.org by ja...@apache.org on 2015/11/05 18:40:55 UTC
[1/6] sqoop git commit: SQOOP-2595: Add Oracle connector to Sqoop 2
Repository: sqoop
Updated Branches:
refs/heads/sqoop2 2a9ae314e -> fa3c77b6a
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleUtilities.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleUtilities.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleUtilities.java
new file mode 100644
index 0000000..0ad78d2
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleUtilities.java
@@ -0,0 +1,613 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Unit tests for OraOopUtilities.
+ */
+public class TestOracleUtilities {
+
+ @Test
+ public void testdecodeOracleTableName() {
+
+ OracleTable context = null;
+
+ // These are the possibilities for double-quote location...
+ // table
+ // "table"
+ // schema.table
+ // schema."table"
+ // "schema".table
+ // "schema"."table"
+
+ // table
+ context = OracleUtilities.decodeOracleTableName("oraoop", "junk");
+ Assert.assertEquals(context.getSchema(), "ORAOOP");
+ Assert.assertEquals(context.getName(), "JUNK");
+
+ // "table"
+ context = OracleUtilities.decodeOracleTableName("oraoop", "\"Junk\"");
+ Assert.assertEquals(context.getSchema(), "ORAOOP");
+ Assert.assertEquals(context.getName(), "Junk");
+
+ // schema.table
+ context =
+ OracleUtilities.decodeOracleTableName("oraoop", "targusr.junk");
+ Assert.assertEquals(context.getSchema(), "TARGUSR");
+ Assert.assertEquals(context.getName(), "JUNK");
+
+ // schema."table"
+ context =
+ OracleUtilities.decodeOracleTableName("oraoop", "targusr.\"Junk\"");
+ Assert.assertEquals(context.getSchema(), "TARGUSR");
+ Assert.assertEquals(context.getName(), "Junk");
+
+ // "schema".table
+ context =
+ OracleUtilities.decodeOracleTableName("oraoop", "\"Targusr\".junk");
+ Assert.assertEquals(context.getSchema(), "Targusr");
+ Assert.assertEquals(context.getName(), "JUNK");
+
+ // "schema"."table"
+ String inputStr = "\"Targusr\".\"Junk\"";
+ context = OracleUtilities.decodeOracleTableName("oraoop", inputStr);
+ Assert.assertEquals(context.getSchema(), "Targusr");
+ Assert.assertEquals(context.getName(), "Junk");
+
+ // Test for "." within schema...
+ context =
+ OracleUtilities.decodeOracleTableName("oraoop", "\"targ.usr\".junk");
+ Assert.assertEquals(context.getSchema(), "targ.usr");
+ Assert.assertEquals(context.getName(), "JUNK");
+
+ // Test for "." within table...
+ context =
+ OracleUtilities.decodeOracleTableName("oraoop",
+ "targusr.\"junk.tab.with.dots\"");
+ Assert.assertEquals(context.getSchema(), "TARGUSR");
+ Assert.assertEquals(context.getName(), "junk.tab.with.dots");
+
+ // Test for "." within schema and within table...
+ context =
+ OracleUtilities.decodeOracleTableName("oraoop",
+ "\"targ.usr\".\"junk.tab.with.dots\"");
+ Assert.assertEquals(context.getSchema(), "targ.usr");
+ Assert.assertEquals(context.getName(), "junk.tab.with.dots");
+ }
+
+ @Test
+ public void testgetCurrentMethodName() {
+
+ String actual = OracleUtilities.getCurrentMethodName();
+ String expected = "testgetCurrentMethodName()";
+
+ Assert.assertEquals(expected, actual);
+
+ }
+
+ @Test
+ public void testgenerateDataChunkId() {
+
+ String expected;
+ String actual;
+
+ expected = "1_1";
+ actual = OracleUtilities.generateDataChunkId(1, 1);
+ Assert.assertEquals(expected, actual);
+
+ expected = "1234_99";
+ actual = OracleUtilities.generateDataChunkId(1234, 99);
+ Assert.assertEquals(expected, actual);
+ }
+
+/* @Test
+ public void testgetDuplicatedStringArrayValues() {
+
+ try {
+ OracleUtilities.getDuplicatedStringArrayValues(null, false);
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ // This is what we want to happen.
+ }
+
+ String[] duplicates = null;
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] {}, false);
+ Assert.assertEquals(0, duplicates.length);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "a", "b",
+ "c", }, false);
+ Assert.assertEquals(0, duplicates.length);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "a", "A",
+ "b", }, false);
+ Assert.assertEquals(0, duplicates.length);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "a", "A",
+ "b", }, true);
+ Assert.assertEquals(1, duplicates.length);
+ Assert.assertEquals("A", duplicates[0]);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "A", "a",
+ "b", }, true);
+ Assert.assertEquals(1, duplicates.length);
+ Assert.assertEquals("a", duplicates[0]);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "A", "a",
+ "b", "A", }, false);
+ Assert.assertEquals(1, duplicates.length);
+ Assert.assertEquals("A", duplicates[0]);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "A", "a",
+ "b", "A", }, true);
+ Assert.assertEquals(2, duplicates.length);
+ Assert.assertEquals("a", duplicates[0]);
+ Assert.assertEquals("A", duplicates[1]);
+
+ duplicates =
+ OraOopUtilities.getDuplicatedStringArrayValues(new String[] { "A", "a",
+ "b", "A", "A", }, true);
+ Assert.assertEquals(2, duplicates.length);
+ Assert.assertEquals("a", duplicates[0]);
+ Assert.assertEquals("A", duplicates[1]);
+ }*/
+
+ @Test
+ public void testgetFullExceptionMessage() {
+
+ try {
+
+ try {
+ try {
+ throw new IOException("lorem ipsum!");
+ } catch (IOException ex) {
+ throw new SQLException("dolor sit amet", ex);
+ }
+ } catch (SQLException ex) {
+ throw new RuntimeException("consectetur adipisicing elit", ex);
+ }
+
+ } catch (Exception ex) {
+ String msg = OracleUtilities.getFullExceptionMessage(ex);
+ if (!msg.contains("IOException") || !msg.contains("lorem ipsum!")) {
+ Assert
+ .fail("Inner exception text has not been included in the message");
+ }
+ if (!msg.contains("SQLException") || !msg.contains("dolor sit amet")) {
+ Assert
+ .fail("Inner exception text has not been included in the message");
+ }
+ if (!msg.contains("RuntimeException")
+ || !msg.contains("consectetur adipisicing elit")) {
+ Assert
+ .fail("Outer exception text has not been included in the message");
+ }
+ }
+ }
+
+/* @Test
+ public void testGetOraOopOracleDataChunkMethod() {
+ try {
+ OracleUtilities.getOraOopOracleDataChunkMethod(null);
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ // This is what we want to happen.
+ }
+
+ OraOopConstants.OraOopOracleDataChunkMethod dataChunkMethod;
+ Configuration conf = new Configuration();
+
+ // Check the default is ROWID
+ dataChunkMethod = OraOopUtilities.getOraOopOracleDataChunkMethod(conf);
+ Assert.assertEquals(OraOopConstants.OraOopOracleDataChunkMethod.ROWID,
+ dataChunkMethod);
+
+ // Invalid value specified
+ OraOopUtilities.LOG.setCacheLogEntries(true);
+ OraOopUtilities.LOG.clearCache();
+ conf.set(OraOopConstants.ORAOOP_ORACLE_DATA_CHUNK_METHOD, "loremipsum");
+ dataChunkMethod = OraOopUtilities.getOraOopOracleDataChunkMethod(conf);
+ String logText = OraOopUtilities.LOG.getLogEntries();
+ OraOopUtilities.LOG.setCacheLogEntries(false);
+ if (!logText.toLowerCase().contains("loremipsum")) {
+ Assert
+ .fail("The LOG should inform the user they've selected an invalid "
+ + "data chunk method - and what that was.");
+ }
+ Assert.assertEquals("Should have used the default value",
+ OraOopConstants.ORAOOP_ORACLE_DATA_CHUNK_METHOD_DEFAULT,
+ dataChunkMethod);
+
+ // Valid value specified
+ conf.set(OraOopConstants.ORAOOP_ORACLE_DATA_CHUNK_METHOD, "partition");
+ dataChunkMethod = OraOopUtilities.getOraOopOracleDataChunkMethod(conf);
+ Assert.assertEquals(OraOopConstants.OraOopOracleDataChunkMethod.PARTITION,
+ dataChunkMethod);
+ }*/
+
+ /*@Test
+ public void testgetOraOopOracleBlockToSplitAllocationMethod() {
+
+ // Invalid arguments test...
+ try {
+ OraOopUtilities.getOraOopOracleBlockToSplitAllocationMethod(null,
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.RANDOM);
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ // This is what we want to happen.
+ }
+
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod allocationMethod;
+ org.apache.hadoop.conf.Configuration conf = new Configuration();
+
+ // No configuration property - and RANDOM used by default...
+ allocationMethod =
+ OraOopUtilities.getOraOopOracleBlockToSplitAllocationMethod(conf,
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.RANDOM);
+ Assert.assertEquals(
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.RANDOM,
+ allocationMethod);
+
+ // No configuration property - and SEQUENTIAL used by default...
+ allocationMethod =
+ OraOopUtilities.getOraOopOracleBlockToSplitAllocationMethod(
+ conf,
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.SEQUENTIAL);
+ Assert.assertEquals(
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.SEQUENTIAL,
+ allocationMethod);
+
+ // An invalid property value specified...
+ OraOopUtilities.LOG.setCacheLogEntries(true);
+ OraOopUtilities.LOG.clearCache();
+ conf.set(OraOopConstants.ORAOOP_ORACLE_BLOCK_TO_SPLIT_ALLOCATION_METHOD,
+ "loremipsum");
+ allocationMethod =
+ OraOopUtilities.getOraOopOracleBlockToSplitAllocationMethod(
+ conf,
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.SEQUENTIAL);
+ String logText = OraOopUtilities.LOG.getLogEntries();
+ OraOopUtilities.LOG.setCacheLogEntries(false);
+ if (!logText.toLowerCase().contains("loremipsum")) {
+ Assert
+ .fail("The LOG should inform the user they've selected an invalid "
+ + "allocation method - and what that was.");
+ }
+
+ if (!logText.contains("ROUNDROBIN or SEQUENTIAL or RANDOM")) {
+ Assert.fail("The LOG should inform the user what the valid choices are.");
+ }
+
+ // An valid property value specified...
+ conf.set(OraOopConstants.ORAOOP_ORACLE_BLOCK_TO_SPLIT_ALLOCATION_METHOD,
+ "sequential");
+ allocationMethod =
+ OraOopUtilities.getOraOopOracleBlockToSplitAllocationMethod(
+ conf,
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.SEQUENTIAL);
+ Assert.assertEquals(
+ OraOopConstants.OraOopOracleBlockToSplitAllocationMethod.SEQUENTIAL,
+ allocationMethod);
+ }*/
+
+ /*@Test
+ public void testgetOraOopTableImportWhereClauseLocation() {
+
+ // Invalid arguments test...
+ try {
+ OraOopUtilities.getOraOopTableImportWhereClauseLocation(null,
+ OraOopConstants.OraOopTableImportWhereClauseLocation.SPLIT);
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ // This is what we want to happen.
+ }
+
+ OraOopConstants.OraOopTableImportWhereClauseLocation location;
+ org.apache.hadoop.conf.Configuration conf = new Configuration();
+
+ // No configuration property - and SPLIT used by default...
+ location =
+ OraOopUtilities.getOraOopTableImportWhereClauseLocation(conf,
+ OraOopConstants.OraOopTableImportWhereClauseLocation.SPLIT);
+ Assert.assertEquals(
+ OraOopConstants.OraOopTableImportWhereClauseLocation.SPLIT, location);
+
+ // An invalid property value specified...
+ OraOopUtilities.LOG.setCacheLogEntries(true);
+ OraOopUtilities.LOG.clearCache();
+ conf.set(OraOopConstants.ORAOOP_TABLE_IMPORT_WHERE_CLAUSE_LOCATION,
+ "loremipsum");
+ location =
+ OraOopUtilities.getOraOopTableImportWhereClauseLocation(conf,
+ OraOopConstants.OraOopTableImportWhereClauseLocation.SPLIT);
+ String logText = OraOopUtilities.LOG.getLogEntries();
+ OraOopUtilities.LOG.setCacheLogEntries(false);
+ if (!logText.toLowerCase().contains("loremipsum")) {
+ Assert
+ .fail("The LOG should inform the user they've selected an invalid "
+ + "where-clause-location - and what that was.");
+ }
+
+ if (!logText.contains("SUBSPLIT or SPLIT")) {
+ Assert.fail("The LOG should inform the user what the valid choices are.");
+ }
+
+ // An valid property value specified...
+ conf.set(OraOopConstants.ORAOOP_TABLE_IMPORT_WHERE_CLAUSE_LOCATION,
+ "split");
+ location =
+ OraOopUtilities.getOraOopTableImportWhereClauseLocation(conf,
+ OraOopConstants.OraOopTableImportWhereClauseLocation.SUBSPLIT);
+ Assert.assertEquals(
+ OraOopConstants.OraOopTableImportWhereClauseLocation.SPLIT, location);
+
+ }*/
+
+ /*@Test
+ public void testpadLeft() {
+
+ String expected = " a";
+ String actual = OracleUtilities.padLeft("a", 4);
+ Assert.assertEquals(expected, actual);
+
+ expected = "abcd";
+ actual = OraOopUtilities.padLeft("abcd", 3);
+ Assert.assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testpadRight() {
+
+ String expected = "a ";
+ String actual = OraOopUtilities.padRight("a", 4);
+ Assert.assertEquals(expected, actual);
+
+ expected = "abcd";
+ actual = OraOopUtilities.padRight("abcd", 3);
+ Assert.assertEquals(expected, actual);
+ }*/
+
+ /*@Test
+ public void testReplaceConfigurationExpression() {
+
+ org.apache.hadoop.conf.Configuration conf = new Configuration();
+
+ // Default value used...
+ String actual =
+ OraOopUtilities.replaceConfigurationExpression(
+ "alter session set timezone = '{oracle.sessionTimeZone|GMT}';",
+ conf);
+ String expected = "alter session set timezone = 'GMT';";
+ Assert.assertEquals("OraOop configuration expression failure.", expected,
+ actual);
+
+ // Configuration property value exists...
+ conf.set("oracle.sessionTimeZone", "Africa/Algiers");
+ actual =
+ OraOopUtilities.replaceConfigurationExpression(
+ "alter session set timezone = '{oracle.sessionTimeZone|GMT}';",
+ conf);
+ expected = "alter session set timezone = 'Africa/Algiers';";
+ Assert.assertEquals("OraOop configuration expression failure.", expected,
+ actual);
+
+ // Multiple properties in one expression...
+ conf.set("expr1", "1");
+ conf.set("expr2", "2");
+ conf.set("expr3", "3");
+ conf.set("expr4", "4");
+ actual =
+ OraOopUtilities.replaceConfigurationExpression("set {expr1}={expr2};",
+ conf);
+ expected = "set 1=2;";
+ Assert.assertEquals("OraOop configuration expression failure.", expected,
+ actual);
+
+ actual =
+ OraOopUtilities.replaceConfigurationExpression(
+ "set {expr4|0}={expr5|5};", conf);
+ expected = "set 4=5;";
+ Assert.assertEquals("OraOop configuration expression failure.", expected,
+ actual);
+ }*/
+
+ /*@Test
+ public void testStackContainsClass() {
+
+ if (OracleUtilities.stackContainsClass("lorem.ipsum.dolor")) {
+ Assert.fail("There's no way the stack actually contains this!");
+ }
+
+ String expected = "org.apache.sqoop.manager.oracle.TestOraOopUtilities";
+ if (!OracleUtilities.stackContainsClass(expected)) {
+ Assert.fail("The stack should contain the class:" + expected);
+ }
+ }*/
+
+ @Test
+ public void testGetImportHint() {
+ FromJobConfig jobConfig = new FromJobConfig();
+
+ String hint = OracleUtilities.getImportHint(jobConfig);
+ Assert.assertEquals(hint, "/*+ NO_INDEX(t) */ ", "Default import hint");
+
+ jobConfig.queryHint = "NO_INDEX(t) SCN_ASCENDING";
+ hint = OracleUtilities.getImportHint(jobConfig);
+ Assert.assertEquals(hint, "/*+ NO_INDEX(t) SCN_ASCENDING */ ",
+ "Changed import hint");
+
+ jobConfig.queryHint = " ";
+ hint = OracleUtilities.getImportHint(jobConfig);
+ Assert.assertEquals(hint, "", "Whitespace import hint");
+
+ }
+
+ @Test
+ public void testSplitStringList() {
+ List<String> result = null;
+ List<String> expected = null;
+
+ expected = new ArrayList<String>();
+ expected.add("abcde");
+ expected.add("ghijklm");
+ result = OracleUtilities.splitStringList("abcde,ghijklm");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("\"abcde\"");
+ expected.add("\"ghijklm\"");
+ result = OracleUtilities.splitStringList("\"abcde\",\"ghijklm\"");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("abcde");
+ expected.add("\"ghijklm\"");
+ result = OracleUtilities.splitStringList("abcde,\"ghijklm\"");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("\"abcde\"");
+ expected.add("ghijklm");
+ result = OracleUtilities.splitStringList("\"abcde\",ghijklm");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("\"ab,cde\"");
+ expected.add("ghijklm");
+ result = OracleUtilities.splitStringList("\"ab,cde\",ghijklm");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("abcde");
+ expected.add("\"ghi,jklm\"");
+ result = OracleUtilities.splitStringList("abcde,\"ghi,jklm\"");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("\"ab,cde\"");
+ expected.add("\"ghi,jklm\"");
+ result = OracleUtilities.splitStringList("\"ab,cde\",\"ghi,jklm\"");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("\"ab,cde\"");
+ expected.add("\"ghi,jklm\"");
+ expected.add("\",Lorem\"");
+ expected.add("\"ip!~sum\"");
+ expected.add("\"do,lo,,r\"");
+ expected.add("\"s#it\"");
+ expected.add("\"am$e$t\"");
+ result =
+ OracleUtilities
+ .splitStringList("\"ab,cde\",\"ghi,jklm\",\",Lorem\",\"ip!~sum\","
+ + "\"do,lo,,r\",\"s#it\",\"am$e$t\"");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("LOREM");
+ expected.add("IPSUM");
+ expected.add("DOLOR");
+ expected.add("SIT");
+ expected.add("AMET");
+ result = OracleUtilities.splitStringList("LOREM,IPSUM,DOLOR,SIT,AMET");
+ Assert.assertEquals(expected, result);
+ }
+
+ @Test
+ public void testSplitOracleStringList() {
+ List<String> result = null;
+ List<String> expected = null;
+
+ expected = new ArrayList<String>();
+ expected.add("LOREM");
+ expected.add("IPSUM");
+ expected.add("DOLOR");
+ expected.add("SIT");
+ expected.add("AMET");
+ result =
+ OracleUtilities.splitOracleStringList("lorem,ipsum,dolor,sit,amet");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("LOREM");
+ expected.add("ipsum");
+ expected.add("dolor");
+ expected.add("SIT");
+ expected.add("amet");
+ result =
+ OracleUtilities
+ .splitOracleStringList("lorem,\"ipsum\",\"dolor\",sit,\"amet\"");
+ Assert.assertEquals(expected, result);
+
+ expected = new ArrayList<String>();
+ expected.add("LOREM");
+ expected.add("ip,sum");
+ expected.add("dol$or");
+ expected.add("SIT");
+ expected.add("am!~#et");
+ result =
+ OracleUtilities
+ .splitOracleStringList("lorem,\"ip,sum\",\"dol$or\",sit,\"am!~#et\"");
+ Assert.assertEquals(expected, result);
+ }
+
+ /*@Test
+ public void testAppendJavaSecurityEgd() {
+ String confProperty = "mapred.child.java.opts";
+ String confValue = "-Djava.security.egd=file:///dev/urandom";
+ Configuration conf = new Configuration();
+
+ String expected = confValue;
+ String actual = null;
+ conf.set(confProperty, "");
+ OraOopUtilities.appendJavaSecurityEgd(conf);
+ actual = conf.get(confProperty);
+ Assert.assertEquals("Append to empty string", expected, actual);
+
+ expected = "-Djava.security.egd=file:/dev/random";
+ conf.set(confProperty, expected);
+ OraOopUtilities.appendJavaSecurityEgd(conf);
+ actual = conf.get(confProperty);
+ Assert.assertEquals("Append to empty string", expected, actual);
+
+ expected = confValue + " -Xmx201m";
+ conf.set(confProperty, "-Xmx201m");
+ OraOopUtilities.appendJavaSecurityEgd(conf);
+ actual = conf.get(confProperty);
+ Assert.assertEquals("Append to empty string", expected, actual);
+ }*/
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleConnectionFactoryTest.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleConnectionFactoryTest.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleConnectionFactoryTest.java
new file mode 100644
index 0000000..a4caa85
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleConnectionFactoryTest.java
@@ -0,0 +1,497 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.integration;
+
+import java.io.StringWriter;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.log4j.Layout;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.WriterAppender;
+import org.apache.sqoop.connector.jdbc.oracle.OracleJdbcConnectorConstants;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ConnectionConfig;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Test OracleConnectionFactory class including initialization statements.
+ */
+public class OracleConnectionFactoryTest extends OracleTestCase {
+
+ private static final String TEST_TABLE_NAME = "sqoop_conn_test";
+
+ private static final String SQL_TABLE =
+ "WITH sqltable AS "
+ + " ( "
+ + " SELECT executions, rows_processed, fetches, "
+ + " ROUND (rows_processed / executions, 2) AS rows_per_exec, "
+ + " ROUND (rows_processed / fetches, 2) AS rows_per_fetch, "
+ + " ROUND (LEAST ( ROUND (rows_processed / fetches, 2) "
+ + " / LEAST (rows_processed / executions, 10), "
+ + " 1 "
+ + " ), "
+ + " 2 "
+ + " ) batch_efficiency, "
+ + " sql_text, u.username parsing_schema_name, buffer_gets, "
+ + " disk_reads, cpu_time/1000 cpu_time, elapsed_time/1000"
+ + " elapsed_time, hash_value sql_id, child_number "
+ + " FROM v$sql s join all_users u on (u.user_id=s.parsing_user_id) "
+ + " WHERE fetches > 0 AND executions > 0 AND rows_processed > 0 "
+ + " AND parsing_schema_id <> 0 AND sql_text like "
+ + " 'select%dba_objects' )"
+ + "SELECT sql_id, child_number, array_wastage, "
+ + " rows_processed, fetches, rows_per_exec, "
+ + " rows_per_fetch, parsing_schema_name, buffer_gets, disk_reads, "
+ + " cpu_time, elapsed_time, sql_text,executions "
+ + " FROM (SELECT sql_id, "
+ + " child_number, "
+ + " rows_processed * (1 - batch_efficiency) array_wastage, "
+ + " rows_processed, " + " fetches, "
+ + " rows_per_exec, "
+ + " rows_per_fetch, " + " sql_text, "
+ + " parsing_schema_name, "
+ + " buffer_gets, " + " disk_reads, "
+ + " cpu_time, " + " elapsed_time, "
+ + " executions " + " FROM sqltable) ";
+
+ @Test
+ public void testSetJdbcFetchSize() {
+ setAndCheckJdbcFetchSize(45);
+ setAndCheckJdbcFetchSize(2000);
+ }
+
+ private void setAndCheckJdbcFetchSize(int jdbcFetchSize) {
+
+ try {
+ Connection conn = getConnection();
+
+ String uniqueJunk =
+ (new SimpleDateFormat("yyyyMMddHHmmsszzz")).format(new Date())
+ + jdbcFetchSize;
+
+ OracleQueries.setJdbcFetchSize(conn, Integer.valueOf(jdbcFetchSize));
+
+ String uniqueSql =
+ String.format("select /*%s*/ * from dba_objects", uniqueJunk);
+ // Usually dba_objects will have a lot of rows
+ ResultSet resultSet1 = conn.createStatement().executeQuery(uniqueSql);
+ while (resultSet1.next()) {
+ // Nothing to do
+ continue;
+ }
+
+ ResultSet resultSet2 =
+ conn.createStatement().executeQuery(SQL_TABLE);
+ boolean sqlFound = false;
+ double rowsPerFetch = 0;
+ while (resultSet2.next()) {
+ String sqlText = resultSet2.getString("SQL_TEXT");
+ if (sqlText.contains(uniqueJunk)) {
+ sqlFound = true;
+ rowsPerFetch = resultSet2.getDouble("ROWS_PER_FETCH");
+ break;
+ }
+ }
+
+ if (!sqlFound) {
+ Assert
+ .fail("Unable to find the performance metrics for the SQL "
+ + "statement being used to check the JDBC fetch size.");
+ }
+
+ if (rowsPerFetch < jdbcFetchSize * 0.95
+ || rowsPerFetch > jdbcFetchSize * 1.05) {
+ Assert
+ .fail(String
+ .format(
+ "The measured JDBC fetch size is not within 5%% of what we "
+ + "expected. Expected=%s rows/fetch, actual=%s rows/fetch",
+ jdbcFetchSize, rowsPerFetch));
+ }
+
+ } catch (SQLException ex) {
+ Assert.fail(ex.getMessage());
+ }
+ }
+
+ @Test
+ public void testCreateOracleJdbcConnectionBadUserName() {
+
+ try {
+ OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ provider.getConnectionUrl(),
+ provider.getConnectionUsername() + "_INVALID",
+ provider.getConnectionPassword());
+
+ Assert
+ .fail("OracleConnectionFactory should have thrown an exception in "
+ + "response to a rubbish user name.");
+
+ } catch (SQLException ex) {
+ Assert.assertEquals(ex.getErrorCode(), 1017); // <- ORA-01017 invalid
+ // username/password; logon denied.
+ }
+ }
+
+ @Test
+ public void testCreateOracleJdbcConnectionBadPassword() {
+
+ try {
+ OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ provider.getConnectionUrl(),
+ provider.getConnectionUsername(),
+ "a" + provider.getConnectionPassword());
+
+ Assert
+ .fail("OracleConnectionFactory should have thrown an exception in "
+ + "response to a rubbish password.");
+
+ } catch (SQLException ex) {
+ Assert.assertEquals(ex.getErrorCode(), 1017); // <- ORA-01017 invalid
+ // username/password; logon denied.
+ }
+ }
+
+ @Test
+ public void testCreateOracleJdbcConnectionOk() {
+
+ try {
+ Connection conn = getConnection();
+
+ Assert.assertEquals(conn.isValid(15), true,
+ "The connection to the Oracle database does not appear to be valid.");
+
+ ResultSet resultSet =
+ conn.createStatement().executeQuery(
+ "select instance_name from v$instance");
+ if (!resultSet.next() || resultSet.getString(1).isEmpty()) {
+ Assert.fail("Got blank instance name from v$instance");
+ }
+ } catch (SQLException ex) {
+ Assert.fail(ex.getMessage());
+ }
+ }
+
+ @Test
+ public void testExecuteOraOopSessionInitializationStatements() {
+
+ Logger log = Logger.getLogger(OracleConnectionFactory.class);
+ StringWriter stringWriter = new StringWriter();
+ Layout layout = new PatternLayout("%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n");
+ WriterAppender writerAppender = new WriterAppender(layout, stringWriter);
+ log.addAppender(writerAppender);
+
+ // Check that the default session-initialization statements are reflected in
+ // the log...
+ stringWriter.getBuffer().setLength(0);
+ checkExecuteOraOopSessionInitializationStatements(null);
+ checkLogContainsText(stringWriter.toString(),
+ "Initializing Oracle session with SQL : alter session disable "
+ + "parallel query");
+ checkLogContainsText(
+ stringWriter.toString(),
+ "Initializing Oracle session with SQL : alter session set "
+ + "\"_serial_direct_read\"=true");
+
+ // Check that the absence of session-initialization statements is reflected
+ // in the log...
+ stringWriter.getBuffer().setLength(0);
+ checkExecuteOraOopSessionInitializationStatements(";");
+ checkLogContainsText(stringWriter.toString(),
+ "No Oracle 'session initialization' statements were found to execute");
+
+ // This should throw an exception, as Oracle won't know what to do with
+ // this...
+ stringWriter.getBuffer().setLength(0);
+ checkExecuteOraOopSessionInitializationStatements("loremipsum");
+ checkLogContainsText(stringWriter.toString(), "loremipsum");
+ checkLogContainsText(stringWriter.toString(),
+ "ORA-00900: invalid SQL statement");
+
+ Connection conn = getConnection();
+ try {
+
+ // Try a session-initialization statement that creates a table...
+ dropTable(conn, TEST_TABLE_NAME);
+ checkExecuteOraOopSessionInitializationStatements("create table "
+ + TEST_TABLE_NAME + " (col1 varchar2(1))");
+ if (!doesTableExist(conn, TEST_TABLE_NAME)) {
+ Assert.fail("The session-initialization statement to create the table "
+ + TEST_TABLE_NAME + " did not work.");
+ }
+
+ // Try a sequence of a few statements...
+ dropTable(conn, TEST_TABLE_NAME);
+ checkExecuteOraOopSessionInitializationStatements("create table "
+ + TEST_TABLE_NAME + " (col1 number);insert into "
+ + TEST_TABLE_NAME + " values (1) ; --update "
+ + TEST_TABLE_NAME + " set col1 = col1 + 1; update "
+ + TEST_TABLE_NAME
+ + " set col1 = col1 + 1; commit ;;");
+
+ ResultSet resultSet =
+ conn.createStatement().executeQuery(
+ "select col1 from " + TEST_TABLE_NAME);
+ resultSet.next();
+ int actualValue = resultSet.getInt("col1");
+ if (actualValue != 2) {
+ Assert.fail("The table " + TEST_TABLE_NAME
+ + " does not contain the data we expected.");
+ }
+
+ dropTable(conn, TEST_TABLE_NAME);
+
+ } catch (Exception ex) {
+ Assert.fail(ex.getMessage());
+ }
+ log.removeAppender(writerAppender);
+ }
+
+ @Test
+ public void testParseOraOopSessionInitializationStatements() {
+
+ List<String> statements = null;
+
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements(null);
+ Assert.assertEquals(0, statements.size());
+
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements("");
+ Assert.assertEquals(0, statements.size());
+
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements(";");
+ Assert.assertEquals(0, statements.size());
+
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements(";--;\t--");
+ Assert.assertEquals(0, statements.size());
+
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements("\ta");
+ Assert.assertEquals(1, statements.size());
+ if (!statements.get(0).equalsIgnoreCase("a")) {
+ Assert.fail("Expected a session initialization statement of \"a\"");
+ }
+
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements("a;b;--c;d;");
+ Assert.assertEquals(3, statements.size());
+ if (!statements.get(0).equalsIgnoreCase("a")) {
+ Assert.fail("Expected a session initialization statement of \"a\"");
+ }
+ if (!statements.get(1).equalsIgnoreCase("b")) {
+ Assert.fail("Expected a session initialization statement of \"b\"");
+ }
+ if (!statements.get(2).equalsIgnoreCase("d")) {
+ Assert.fail("Expected a session initialization statement of \"d\"");
+ }
+
+ // Expressions without default values...
+ /*conf.set(OraOopConstants.ORAOOP_SESSION_INITIALIZATION_STATEMENTS,
+ "set a={expr1};b={expr2}/{expr3};");
+ conf.set("expr1", "1");
+ conf.set("expr2", "2");
+ conf.set("expr3", "3");
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements(conf);
+ Assert.assertEquals(2, statements.size());
+ String actual = statements.get(0);
+ String expected = "set a=1";
+ if (!actual.equalsIgnoreCase(expected)) {
+ Assert.fail(String.format(
+ "Expected a session initialization statement of \"%s\", but got \"%s\"."
+ , expected, actual));
+ }
+ actual = statements.get(1);
+ expected = "b=2/3";
+ if (!actual.equalsIgnoreCase(expected)) {
+ Assert.fail(String.format(
+ "Expected a session initialization statement of \"%s\", but got \"%s\"."
+ , expected, actual));
+ }
+
+ // Expressions with default values...
+ conf.set(OraOopConstants.ORAOOP_SESSION_INITIALIZATION_STATEMENTS,
+ "set c={expr3|66};d={expr4|15}/{expr5|90};");
+ conf.set("expr3", "20");
+ // conf.set("expr4", "21");
+ // conf.set("expr5", "23");
+ statements =
+ OracleConnectionFactory
+ .parseOraOopSessionInitializationStatements(conf);
+ Assert.assertEquals(2, statements.size());
+ actual = statements.get(0);
+ expected = "set c=20";
+ if (!actual.equalsIgnoreCase(expected)) {
+ Assert.fail(String.format(
+ "Expected a session initialization statement of \"%s\", but got \"%s\"."
+ , expected, actual));
+ }
+ actual = statements.get(1);
+ expected = "d=15/90";
+ if (!actual.equalsIgnoreCase(expected)) {
+ Assert.fail(String.format(
+ "Expected a session initialization statement of \"%s\", but got \"%s\"."
+ , expected, actual));
+ }*/
+
+ }
+
+ private void dropTable(Connection conn, String tableName) {
+
+ try {
+ conn.createStatement().executeQuery("drop table " + tableName);
+
+ if (doesTableExist(conn, tableName)) {
+ Assert.fail("Unable to drop the table " + tableName);
+ }
+ } catch (SQLException ex) {
+ if (ex.getErrorCode() != 942) { // <- Table or view does not exist
+ Assert.fail(ex.getMessage());
+ }
+ }
+ }
+
+ private boolean doesTableExist(Connection conn, String tableName) {
+
+ boolean result = false;
+ try {
+ List<OracleTable> tables = OracleQueries.getTables(conn);
+
+ for (int idx = 0; idx < tables.size(); idx++) {
+ if (tables.get(idx).getName().equalsIgnoreCase(tableName)) {
+ result = true;
+ break;
+ }
+ }
+ } catch (SQLException ex) {
+ Assert.fail(ex.getMessage());
+ }
+ return result;
+ }
+
+ private void checkLogContainsText(String log, String text) {
+
+ if (!log.toLowerCase().contains(text.toLowerCase())) {
+ Assert.fail(
+ "The LOG does not contain the following text (when it should):\n\t"
+ + text);
+ }
+ }
+
+ private void checkExecuteOraOopSessionInitializationStatements(
+ String statements) {
+
+ Connection conn = getConnection();
+
+ OracleConnectionFactory.executeOraOopSessionInitializationStatements(
+ conn, statements);
+ }
+
+ @Test
+ public void testSetSessionClientInfo() {
+
+ Connection conn = getConnection();
+
+ ConnectionConfig connectionConfig = new ConnectionConfig();
+
+ String moduleName = OracleJdbcConnectorConstants.ORACLE_SESSION_MODULE_NAME;
+ String actionName =
+ (new SimpleDateFormat("yyyyMMddHHmmsszzz")).format(new Date());
+
+ connectionConfig.actionName = actionName;
+
+ try {
+ PreparedStatement statement =
+ conn.prepareStatement("select process, module, action "
+ + "from v$session " + "where module = ? and action = ?");
+ statement.setString(1, moduleName);
+ statement.setString(2, actionName);
+
+ // Check no session have this action name - because we haven't applied to
+ // our session yet...
+ ResultSet resultSet = statement.executeQuery();
+ if (resultSet.next()) {
+ Assert
+ .fail("There should be no Oracle sessions with an action name of "
+ + actionName);
+ }
+
+ // Apply this action name to our session...
+ OracleConnectionFactory.setSessionClientInfo(conn, connectionConfig);
+
+ // Now check there is a session with our action name...
+ int sessionFoundCount = 0;
+ resultSet = statement.executeQuery();
+ while (resultSet.next()) {
+ sessionFoundCount++;
+ }
+
+ if (sessionFoundCount < 1) {
+ Assert
+ .fail("Unable to locate an Oracle session with the expected module "
+ + "and action.");
+ }
+
+ if (sessionFoundCount > 1) {
+ Assert
+ .fail("Multiple sessions were found with the expected module and "
+ + "action - we only expected to find one.");
+ }
+ } catch (SQLException ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ }
+
+ private Connection getConnection() {
+
+ try {
+ return OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ provider.getConnectionUrl(),
+ provider.getConnectionUsername(), provider.getConnectionPassword());
+ } catch (SQLException ex) {
+ Assert.fail(ex.getMessage());
+ }
+ return null;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleQueriesTest.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleQueriesTest.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleQueriesTest.java
new file mode 100644
index 0000000..48ab922
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleQueriesTest.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.integration;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Test Oracle queries against Oracle database.
+ */
+public class OracleQueriesTest extends OracleTestCase {
+
+ @Test
+ public void testGetCurrentSchema() throws Exception {
+ Connection conn = provider.getConnection();
+
+ String schema = OracleQueries.getCurrentSchema(conn);
+ Assert.assertEquals(schema.toUpperCase(),
+ provider.getConnectionUsername().toUpperCase());
+
+ PreparedStatement stmt =
+ conn.prepareStatement("ALTER SESSION SET CURRENT_SCHEMA=SYS");
+ stmt.execute();
+
+ schema = OracleQueries.getCurrentSchema(conn);
+ Assert.assertEquals(schema, "SYS");
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleTestCase.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleTestCase.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleTestCase.java
new file mode 100644
index 0000000..7be9a15
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/integration/OracleTestCase.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.integration;
+
+import org.apache.sqoop.common.test.db.OracleProvider;
+import org.testng.annotations.AfterGroups;
+import org.testng.annotations.BeforeGroups;
+import org.testng.annotations.Test;
+
+@Test(groups = "oracle")
+public abstract class OracleTestCase {
+
+ protected static OracleProvider provider;
+
+ @BeforeGroups(value = "oracle")
+ public static void startProvider() throws Exception {
+ provider = new OracleProvider();
+ provider.start();
+ }
+
+ @AfterGroups(value = "oracle")
+ public static void stopProvider() {
+ provider.stop();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/pom.xml
----------------------------------------------------------------------
diff --git a/connector/pom.xml b/connector/pom.xml
index 1b69180..be8fcb1 100644
--- a/connector/pom.xml
+++ b/connector/pom.xml
@@ -40,6 +40,7 @@ limitations under the License.
<module>connector-kafka</module>
<module>connector-ftp</module>
<module>connector-sftp</module>
+ <module>connector-oracle-jdbc</module>
<!-- Uncomment and finish connectors after sqoop framework will become stable
<module>connector-mysql-jdbc</module>
<module>connector-mysql-fastpath</module>
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ab505f4..f33958c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -381,6 +381,17 @@ limitations under the License.
</dependency>
<dependency>
<groupId>org.apache.sqoop.connector</groupId>
+ <artifactId>sqoop-connector-oracle-jdbc</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.sqoop.connector</groupId>
+ <artifactId>sqoop-connector-oracle-jdbc</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.sqoop.connector</groupId>
<artifactId>sqoop-connector-mysql-jdbc</artifactId>
<version>${project.version}</version>
</dependency>
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index ca068e0..370a6a2 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -108,6 +108,11 @@ limitations under the License.
</dependency>
<dependency>
+ <groupId>org.apache.sqoop.connector</groupId>
+ <artifactId>sqoop-connector-oracle-jdbc</artifactId>
+ </dependency>
+
+ <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>provided</scope>
[4/6] sqoop git commit: SQOOP-2595: Add Oracle connector to Sqoop 2
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleConnectionFactory.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleConnectionFactory.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleConnectionFactory.java
new file mode 100644
index 0000000..3ebb0d4
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleConnectionFactory.java
@@ -0,0 +1,246 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.connector.jdbc.oracle.OracleJdbcConnectorConstants;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ConnectionConfig;
+
+/**
+ * Create and initialize connections to Oracle RDBMS.
+ */
+public class OracleConnectionFactory {
+
+ protected OracleConnectionFactory() {
+ }
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleConnectionFactory.class);
+
+ public static Connection makeConnection(ConnectionConfig config) throws SQLException {
+
+ String connectStr = config.connectionString;
+ String username = config.username;
+ String password = config.password;
+ Properties additionalProps = new Properties();
+ if(config.jdbcProperties != null) {
+ additionalProps.putAll(config.jdbcProperties);
+ }
+
+ Connection connection =
+ OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ connectStr, username, password, additionalProps);
+ //TODO: This is from the other Oracle Manager
+// if (username == null) {
+// username = OracleManager.getSessionUser(connection);
+// }
+ OracleUtilities.setCurrentSessionUser(username);
+ return connection;
+ }
+
+ public static Connection createOracleJdbcConnection(
+ String jdbcDriverClassName, String jdbcUrl, String username,
+ String password) throws SQLException {
+ Properties props = null;
+ return createOracleJdbcConnection(jdbcDriverClassName, jdbcUrl, username,
+ password, props);
+ }
+
+ public static Connection createOracleJdbcConnection(
+ String jdbcDriverClassName, String jdbcUrl, String username,
+ String password, Properties additionalProps) throws SQLException {
+
+ loadJdbcDriver(jdbcDriverClassName);
+ Connection connection =
+ createConnection(jdbcUrl, username, password, additionalProps);
+
+ // Only OraOopDBRecordReader will call initializeOracleConnection(), as
+ // we only need to initialize the session(s) prior to the mapper starting
+ // it's job.
+ // i.e. We don't need to initialize the sessions in order to get the
+ // table's data-files etc.
+
+ // initializeOracleConnection(connection, conf);
+
+ return connection;
+ }
+
+ private static void loadJdbcDriver(String jdbcDriverClassName) {
+
+ try {
+ Class.forName(jdbcDriverClassName);
+ } catch (ClassNotFoundException ex) {
+ String errorMsg =
+ "Unable to load the jdbc driver class : " + jdbcDriverClassName;
+ LOG.error(errorMsg);
+ throw new RuntimeException(errorMsg);
+ }
+ }
+
+ private static Connection createConnection(String jdbcUrl, String username,
+ String password, Properties additionalProps) throws SQLException {
+
+ Properties props = new Properties();
+ if (username != null) {
+ props.put("user", username);
+ }
+
+ if (password != null) {
+ props.put("password", password);
+ }
+
+ if (additionalProps != null && additionalProps.size() > 0) {
+ props.putAll(additionalProps);
+ }
+
+ OracleUtilities.checkJavaSecurityEgd();
+
+ try {
+ Connection result = DriverManager.getConnection(jdbcUrl, props);
+ result.setAutoCommit(false);
+ return result;
+ } catch (SQLException ex) {
+ String errorMsg = String.format(
+ "Unable to obtain a JDBC connection to the URL \"%s\" as user \"%s\": ",
+ jdbcUrl, (username != null) ? username : "[null]");
+ LOG.error(errorMsg, ex);
+ throw ex;
+ }
+ }
+
+ public static void initializeOracleConnection(Connection connection,
+ ConnectionConfig config) throws SQLException {
+
+ connection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+
+ connection.setAutoCommit(false);
+
+ OracleQueries.setConnectionTimeZone(connection, config.timeZone);
+
+ setSessionClientInfo(connection, config);
+
+ OracleQueries.setJdbcFetchSize(connection, config.fetchSize);
+
+ executeOraOopSessionInitializationStatements(connection,
+ config.initializationStatements);
+ }
+
+ public static void setSessionClientInfo(Connection connection,
+ ConnectionConfig config) {
+
+ String sql = "";
+ try {
+ sql =
+ "begin \n"
+ + " dbms_application_info.set_module(module_name => "
+ + "'%s', action_name => '%s'); \n"
+ + "end;";
+
+ String oracleSessionActionName = config.actionName;
+
+ sql =
+ String.format(sql,
+ OracleJdbcConnectorConstants.ORACLE_SESSION_MODULE_NAME,
+ oracleSessionActionName);
+
+ Statement statement = connection.createStatement();
+ statement.execute(sql);
+ LOG.info("Initializing Oracle session with SQL :\n" + sql);
+ } catch (Exception ex) {
+ LOG.error(String.format("An error occurred while attempting to execute "
+ + "the following Oracle session-initialization statement:" + "\n%s"
+ + "\nError:" + "\n%s", sql, ex.getMessage()));
+ }
+ }
+
+ public static void executeOraOopSessionInitializationStatements(
+ Connection connection, String sessionInitializationStatements) {
+ String statementsStr = sessionInitializationStatements;
+ if(StringUtils.isEmpty(statementsStr)) {
+ statementsStr =OracleJdbcConnectorConstants.
+ ORACLE_SESSION_INITIALIZATION_STATEMENTS_DEFAULT;
+ }
+
+ List<String> statements =
+ parseOraOopSessionInitializationStatements(statementsStr);
+
+ if (statements.size() == 0) {
+ LOG.warn("No Oracle 'session initialization' statements were found to "
+ + "execute.");
+ } else {
+ for (String statement : statements) {
+ try {
+ connection.createStatement().execute(statement);
+ LOG.info("Initializing Oracle session with SQL : " + statement);
+ } catch (Exception ex) {
+ LOG.error(String.format(
+ "An error occurred while attempting to execute "
+ + "the following Oracle session-initialization statement:"
+ + "\n%s" + "\nError:" + "\n%s", statement, ex.getMessage()));
+ }
+ }
+ }
+ }
+
+ public static List<String> parseOraOopSessionInitializationStatements(
+ String sessionInitializationStatements) {
+
+ ArrayList<String> result = new ArrayList<String>();
+
+ if (sessionInitializationStatements != null
+ && !sessionInitializationStatements.isEmpty()) {
+ String[] initializationStatements =
+ sessionInitializationStatements.split(";");
+ for (String initializationStatement : initializationStatements) {
+ initializationStatement = initializationStatement.trim();
+ if (initializationStatement != null
+ && !initializationStatement.isEmpty()
+ && !initializationStatement
+ .startsWith(OracleJdbcConnectorConstants.Oracle.
+ ORACLE_SQL_STATEMENT_COMMENT_TOKEN)) {
+
+ LOG.debug(String
+ .format(
+ "initializationStatement (quoted & pre-expression "
+ + "evaluation) = \"%s\"",
+ initializationStatement));
+
+ //TODO: Not supported in Sqoop 2?
+ /*initializationStatement =
+ OracleUtilities.replaceConfigurationExpression(
+ initializationStatement, conf);*/
+
+ result.add(initializationStatement);
+ }
+ }
+ }
+ return result;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunk.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunk.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunk.java
new file mode 100644
index 0000000..5b24fe3
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunk.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import org.apache.sqoop.job.etl.Partition;
+
+/**
+ * How data should be split between mappers.
+ */
+public abstract class OracleDataChunk extends Partition {
+
+ private String id;
+
+ public abstract long getNumberOfBlocks();
+
+ public String getWhereClause() {
+ return "1=1";
+ }
+
+ public String getPartitionClause() {
+ return "";
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public void setId(String newId) {
+ this.id = newId;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkExtent.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkExtent.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkExtent.java
new file mode 100644
index 0000000..2f794c2
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkExtent.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.sqoop.connector.jdbc.oracle.OracleJdbcConnectorConstants;
+
+/**
+ * Data should be split by extent for ROWID scans.
+ */
+public class OracleDataChunkExtent extends OracleDataChunk {
+
+ private int oracleDataObjectId;
+ private int relativeDatafileNumber;
+ private long startBlockNumber;
+ private long finishBlockNumber;
+
+ public OracleDataChunkExtent() {
+
+ }
+
+ public OracleDataChunkExtent(String id, int oracleDataObjectId,
+ int relativeDatafileNumber, long startBlockNumber,
+ long finishBlockNumber) {
+
+ this.setId(id);
+ this.oracleDataObjectId = oracleDataObjectId;
+ this.relativeDatafileNumber = relativeDatafileNumber;
+ this.startBlockNumber = startBlockNumber;
+ this.finishBlockNumber = finishBlockNumber;
+ }
+
+ @Override
+ public String getWhereClause() {
+ return String.format(
+ "(rowid >= dbms_rowid.rowid_create(%d, %d, %d, %d, %d)",
+ OracleJdbcConnectorConstants.Oracle.ROWID_EXTENDED_ROWID_TYPE,
+ this.oracleDataObjectId, this.relativeDatafileNumber,
+ this.startBlockNumber, 0)
+ + String.format(
+ " AND rowid <= dbms_rowid.rowid_create(%d, %d, %d, %d, %d))",
+ OracleJdbcConnectorConstants.Oracle.ROWID_EXTENDED_ROWID_TYPE,
+ this.oracleDataObjectId, this.relativeDatafileNumber,
+ this.finishBlockNumber,
+ OracleJdbcConnectorConstants.Oracle.ROWID_MAX_ROW_NUMBER_PER_BLOCK);
+ }
+
+ @Override
+ public void write(DataOutput output) throws IOException {
+ output.writeUTF(this.getId());
+ output.writeInt(this.oracleDataObjectId);
+ output.writeInt(this.relativeDatafileNumber);
+ output.writeLong(this.startBlockNumber);
+ output.writeLong(this.finishBlockNumber);
+ }
+
+ @Override
+ public void readFields(DataInput input) throws IOException {
+ this.setId(input.readUTF());
+ this.oracleDataObjectId = input.readInt();
+ this.relativeDatafileNumber = input.readInt();
+ this.startBlockNumber = input.readLong();
+ this.finishBlockNumber = input.readLong();
+ }
+
+ @Override
+ public long getNumberOfBlocks() {
+ if (this.finishBlockNumber == 0L && this.startBlockNumber == 0L) {
+ return 0;
+ } else {
+ return (this.finishBlockNumber - this.startBlockNumber) + 1L;
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(String.format("\n\t%s = %s", "id", getId()));
+ sb.append(String.format("\n\t%s = %s",
+ "oracleDataObjectId", oracleDataObjectId));
+ sb.append(String.format("\n\t%s = %s",
+ "relativeDatafileNumber", relativeDatafileNumber));
+ sb.append(String.format("\n\t%s = %s",
+ "startBlockNumber", startBlockNumber));
+ sb.append(String.format("\n\t%s = %s",
+ "finishBlockNumber", finishBlockNumber));
+ return sb.toString();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkPartition.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkPartition.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkPartition.java
new file mode 100644
index 0000000..0a47e1f
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleDataChunkPartition.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Data should be split by partition.
+ */
+public class OracleDataChunkPartition extends OracleDataChunk {
+
+ private boolean isSubPartition;
+ private long blocks;
+
+ public OracleDataChunkPartition() {
+
+ }
+
+ OracleDataChunkPartition(String partitionName, boolean isSubPartition,
+ long blocks) {
+ this.setId(partitionName);
+ this.isSubPartition = isSubPartition;
+ this.blocks = blocks;
+ }
+
+ @Override
+ public long getNumberOfBlocks() {
+ return this.blocks;
+ }
+
+ @Override
+ public void write(DataOutput output) throws IOException {
+ output.writeUTF(this.getId());
+ output.writeBoolean(this.isSubPartition);
+ output.writeLong(this.blocks);
+ }
+
+ @Override
+ public void readFields(DataInput input) throws IOException {
+ this.setId(input.readUTF());
+ this.isSubPartition = input.readBoolean();
+ this.blocks = input.readLong();
+ }
+
+ @Override
+ public String getPartitionClause() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(" ");
+ if (this.isSubPartition) {
+ sb.append("SUBPARTITION");
+ } else {
+ sb.append("PARTITION");
+ }
+ sb.append("(\"").append(this.getId()).append("\")");
+ return sb.toString();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(String.format("\n\t%s = %s", "id", getId()));
+ sb.append(String.format("\n\t%s = %s", "isSubPartition", isSubPartition));
+ sb.append(String.format("\n\t%s = %s", "blocks", blocks));
+ return sb.toString();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleGenerics.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleGenerics.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleGenerics.java
new file mode 100644
index 0000000..0ecf587
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleGenerics.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Generic class to hold list of objects.
+ */
+public class OracleGenerics {
+
+ /**
+ * Generic class to hold list of objects.
+ */
+ public static class ObjectList<T> {
+
+ private List<T> objects;
+
+ public ObjectList() {
+
+ this.objects = new ArrayList<T>();
+ }
+
+ public void add(T item) {
+
+ this.objects.add(item);
+ }
+
+ public int size() {
+
+ return this.objects.size();
+ }
+
+ public T get(int index) {
+
+ return this.objects.get(index);
+ }
+
+ public Iterator<T> iterator() {
+
+ return this.objects.iterator();
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleJdbcUrl.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleJdbcUrl.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleJdbcUrl.java
new file mode 100644
index 0000000..65d0092
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleJdbcUrl.java
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.JdbcOracleThinConnection;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities
+ .JdbcOracleThinConnectionParsingError;
+
+/**
+ * Parses the Oracle connection string.
+ */
+public class OracleJdbcUrl {
+
+ private String jdbcConnectString;
+
+ public OracleJdbcUrl(String jdbcConnectString) {
+
+ if (jdbcConnectString == null) {
+ throw new IllegalArgumentException(
+ "The jdbcConnectionString argument must not be null.");
+ }
+
+ if (jdbcConnectString.isEmpty()) {
+ throw new IllegalArgumentException(
+ "The jdbcConnectionString argument must not be empty.");
+ }
+
+ this.jdbcConnectString = jdbcConnectString;
+ }
+
+ public JdbcOracleThinConnection parseJdbcOracleThinConnectionString()
+ throws JdbcOracleThinConnectionParsingError {
+
+ /*
+ * http://wiki.oracle.com/page/JDBC
+ *
+ * There are different flavours of JDBC connections for Oracle, including:
+ * Thin E.g. jdbc:oracle:thin:@localhost.locadomain:1521:orcl
+ *
+ * A pure Java driver used on the client side that does not need an Oracle
+ * client installation. It is recommended that you use this driver unless
+ * you need support for non-TCP/IP networks because it provides for maximum
+ * portability and performance.
+ *
+ * Oracle Call Interface driver (OCI). E.g. jdbc:oracle:oci8:@orcl.world
+ * //<- "orcl.world" is a TNS entry
+ *
+ * This uses the Oracle client installation libraries and interfaces. If you
+ * want to support connection pooling or client side caching of requests,
+ * use this driver. You will also need this driver if you are using
+ * transparent application failover (TAF) from your application as well as
+ * strong authentication like Kerberos and PKI certificates.
+ *
+ * JDBC-ODBC bridge. E.g. jdbc:odbc:mydatabase //<- "mydatabase" is an ODBC
+ * data source.
+ *
+ * This uses the ODBC driver in Windows to connect to the database.
+ */
+
+ String hostName = null;
+ int port = 0;
+ String sid = null;
+ String service = null;
+
+ String jdbcUrl = this.jdbcConnectString.trim();
+
+ // If there are any parameters included at the end of the connection URL,
+ // let's remove them now...
+ int paramsIdx = jdbcUrl.indexOf("?");
+ if (paramsIdx > -1) {
+ jdbcUrl = jdbcUrl.substring(0, paramsIdx);
+ }
+
+ /*
+ * The format of an Oracle jdbc URL is one of:
+ * jdbc:oracle:<driver-type>:@tnsname - for tnsname based login
+ * jdbc:oracle:<driver-type>:@<host>:<port>:<sid>
+ * jdbc:oracle:<driver-type>:@<host>:<port>/<service>
+ * jdbc:oracle:<driver-type>:@<host>:<port>/<service>?<parameters>
+ * jdbc:oracle:<driver-type>:@//<host>:<port>/<service>
+ * jdbc:oracle:<driver-type>:@//<host>:<port>/<service>?<parameters>
+ */
+
+ // Split the URL on its ":" characters...
+ String[] jdbcFragments = jdbcUrl.trim().split(":");
+
+ // Clean up each fragment of the URL...
+ for (int idx = 0; idx < jdbcFragments.length; idx++) {
+ jdbcFragments[idx] = jdbcFragments[idx].trim();
+ }
+
+ // Check we can proceed...
+ if (jdbcFragments.length < 4 || jdbcFragments.length > 6) {
+ throw new JdbcOracleThinConnectionParsingError(
+ String.format(
+ "There should be 4, 5 or 6 colon-separated pieces of data in the "
+ + "JDBC URL, such as:\n\tjdbc:oracle:<driver-type>:@tnsname\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>:<sid>\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>/<service>\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>/<service>?<parameters>\n"
+ + "The JDBC URL specified was:\n"
+ + "%s\n"
+ + "which contains %d pieces of colon-separated data.",
+ this.jdbcConnectString, jdbcFragments.length));
+ }
+
+ // jdbc
+ if (!jdbcFragments[0].equalsIgnoreCase("jdbc")) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The first item in the colon-separated JDBC URL must be \"jdbc\".");
+ }
+
+ // jdbc:oracle
+ if (!jdbcFragments[1].equalsIgnoreCase("oracle")) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The second item in the colon-separated JDBC URL must be \"oracle\".");
+ }
+
+ // jdbc:oracle:thin
+ if (!jdbcFragments[2].equalsIgnoreCase("thin")) {
+ throw new JdbcOracleThinConnectionParsingError(
+ String
+ .format(
+ "The Oracle \"thin\" JDBC driver is not being used.\n"
+ + "The third item in the colon-separated JDBC URL must "
+ + "be \"thin\", not \"%s\".",
+ jdbcFragments[2]));
+ }
+
+ // jdbc:oracle:thin:@<host>
+ hostName = jdbcFragments[3];
+ if (hostName.isEmpty() || hostName.equalsIgnoreCase("@")) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The fourth item in the colon-separated JDBC URL (the host name) "
+ + "must not be empty.");
+ }
+
+ if (!hostName.startsWith("@")) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The fourth item in the colon-separated JDBC URL (the host name) "
+ + "must a prefixed with the \"@\" character.");
+ }
+
+ String portStr = "";
+ String tnsName = "";
+
+ switch (jdbcFragments.length) {
+ case 6:
+ // jdbc:oracle:<driver-type>:@<host>:<port>:<sid>
+ portStr = jdbcFragments[4];
+ sid = jdbcFragments[5];
+ break;
+
+ case 5:
+ // jdbc:oracle:<driver-type>:@<host>:<port>/<service>
+ String[] portAndService = jdbcFragments[4].split("/");
+ if (portAndService.length != 2) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The fifth colon-separated item in the JDBC URL "
+ + "(<port>/<service>) must contain two items "
+ + "separated by a \"/\".");
+ }
+ portStr = portAndService[0].trim();
+ service = portAndService[1].trim();
+ break;
+
+ case 4:
+ // jdbc:oracle:<driver-type>:@tnsname
+ tnsName = jdbcFragments[3].trim();
+ break;
+
+ default:
+ throw new JdbcOracleThinConnectionParsingError("Internal error parsing "
+ + "JDBC connection string.");
+ }
+
+ if (jdbcFragments.length > 4) {
+ if (portStr.isEmpty()) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The fifth item in the colon-separated JDBC URL (the port) must not"
+ + " be empty.");
+ }
+
+ try {
+ port = Integer.parseInt(portStr);
+ } catch (NumberFormatException ex) {
+ throw new JdbcOracleThinConnectionParsingError(
+ String
+ .format(
+ "The fifth item in the colon-separated JDBC URL (the port) "
+ + "must be a valid number.\n"
+ + "\"%s\" could not be parsed as an integer.", portStr));
+ }
+
+ if (port <= 0) {
+ throw new JdbcOracleThinConnectionParsingError(
+ String
+ .format(
+ "The fifth item in the colon-separated JDBC URL (the port) "
+ + "must be greater than zero.\n"
+ + "\"%s\" was specified.", portStr));
+ }
+ }
+
+ if (sid == null && service == null && tnsName == null) {
+ throw new JdbcOracleThinConnectionParsingError(
+ "The JDBC URL does not contain a SID or SERVICE. The URL should look "
+ + "like one of these:\n\tjdbc:oracle:<driver-type>:@tnsname\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>:<sid>\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>/<service>\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>/<service>?<parameters>\n"
+ + "\tjdbc:oracle:<driver-type>:@//<host>:<port>/<service>\n"
+ + "\tjdbc:oracle:<driver-type>:@<host>:<port>/<service>?<parameters>");
+ }
+
+ // Remove the "@" prefix of the hostname
+ JdbcOracleThinConnection result =
+ new JdbcOracleThinConnection(hostName.replaceFirst("^[@][/]{0,2}", "")
+ , port, sid, service, tnsName.replaceFirst("^[@][/]{0,2}", ""));
+
+ return result;
+ }
+
+ public String getConnectionUrl() {
+ return this.jdbcConnectString;
+ }
+
+}
[3/6] sqoop git commit: SQOOP-2595: Add Oracle connector to Sqoop 2
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleQueries.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleQueries.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleQueries.java
new file mode 100644
index 0000000..bb199c3
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleQueries.java
@@ -0,0 +1,1721 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.math.BigDecimal;
+import java.security.InvalidParameterException;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.connector.jdbc.oracle.OracleJdbcConnectorConstants;
+import org.apache.sqoop.schema.type.Column;
+import org.joda.time.DateTimeZone;
+
+/**
+ * Contains the queries to get data dictionary information from Oracle database.
+ */
+public final class OracleQueries {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleQueries.class);
+
+ private static Class<?> oracleConnectionClass;
+ private static Class<?> oracleStatementClass;
+ private static Class<?> oracleResultSetClass;
+ private static Class<?> oracleTypesClass;
+ private static Class<?> oracleDateClass;
+ private static Method methSetLongAtName;
+ private static Method methSetBigDecimalAtName;
+ private static Method methSetStringAtName;
+ private static Method methSetTimestampAtName;
+ private static Method methSetBinaryDoubleAtName;
+ private static Method methSetObjectAtName;
+ private static Method methSetBinaryFloatAtName;
+ private static Method methSetIntAtName;
+
+ private static final Map<String, Integer> ORACLE_TYPES =
+ new HashMap<String, Integer>();
+
+ static {
+ try {
+ oracleStatementClass =
+ Class.forName("oracle.jdbc.OraclePreparedStatement");
+ methSetLongAtName =
+ oracleStatementClass.getMethod("setLongAtName", String.class,
+ long.class);
+ methSetBigDecimalAtName =
+ oracleStatementClass.getMethod("setBigDecimalAtName", String.class,
+ BigDecimal.class);
+ methSetStringAtName =
+ oracleStatementClass.getMethod("setStringAtName", String.class,
+ String.class);
+ methSetTimestampAtName =
+ oracleStatementClass.getMethod("setTimestampAtName", String.class,
+ Timestamp.class);
+ methSetBinaryDoubleAtName =
+ oracleStatementClass.getMethod("setBinaryDoubleAtName", String.class,
+ double.class);
+ methSetObjectAtName =
+ oracleStatementClass.getMethod("setObjectAtName", String.class,
+ Object.class);
+ methSetBinaryFloatAtName =
+ oracleStatementClass.getMethod("setBinaryFloatAtName", String.class,
+ float.class);
+ methSetIntAtName =
+ oracleStatementClass.getMethod("setIntAtName", String.class,
+ int.class);
+
+ oracleResultSetClass = Class.forName("oracle.jdbc.OracleResultSet");
+ oracleDateClass = Class.forName("oracle.sql.DATE");
+ oracleConnectionClass = Class.forName("oracle.jdbc.OracleConnection");
+ oracleTypesClass = Class.forName("oracle.jdbc.OracleTypes");
+ } catch (Exception e) {
+ throw new RuntimeException(
+ "Problem getting Oracle JDBC methods via reflection.", e);
+ }
+ }
+
+ private OracleQueries() {
+ }
+
+ public static void setJdbcFetchSize(Connection connection,
+ Integer fetchSize) {
+ int fetchSizeInt =
+ OracleJdbcConnectorConstants.ORACLE_ROW_FETCH_SIZE_DEFAULT;
+ if(fetchSize != null && fetchSize.intValue() > 0) {
+ fetchSizeInt = fetchSize.intValue();
+ }
+ try {
+ Method methSetPrefetch =
+ oracleConnectionClass.getMethod("setDefaultRowPrefetch", int.class);
+ methSetPrefetch.invoke(connection, fetchSizeInt);
+
+ String msg =
+ "The Oracle connection has had its default row fetch size set to : "
+ + fetchSizeInt;
+ if (fetchSizeInt ==
+ OracleJdbcConnectorConstants.ORACLE_ROW_FETCH_SIZE_DEFAULT) {
+ LOG.debug(msg);
+ } else {
+ LOG.info(msg);
+ }
+ } catch (Exception ex) {
+ LOG.warn(
+ String
+ .format(
+ "Unable to configure the DefaultRowPrefetch of the "
+ + "Oracle connection in %s.",
+ OracleUtilities.getCurrentMethodName()), ex);
+ }
+
+ }
+
+ public static void setConnectionTimeZone(Connection connection,
+ String timeZone) {
+ String timeZoneStr = timeZone;
+ if(StringUtils.isEmpty(timeZoneStr)) {
+ timeZoneStr = "GMT";
+ }
+ TimeZone timeZoneObj = TimeZone.getTimeZone(timeZoneStr);
+ try {
+ Method methSession =
+ oracleConnectionClass.getMethod("setSessionTimeZone", String.class);
+ Method methDefault =
+ oracleConnectionClass.getMethod("setDefaultTimeZone", TimeZone.class);
+ methSession.invoke(connection, timeZoneObj.getID());
+ methDefault.invoke(connection, timeZoneObj);
+ TimeZone.setDefault(timeZoneObj);
+ DateTimeZone.setDefault(DateTimeZone.forTimeZone(timeZoneObj));
+ LOG.info("Session Time Zone set to " + timeZoneObj.getID());
+ } catch (Exception e) {
+ LOG.error("Error setting time zone: " + e.getMessage());
+ }
+ }
+
+ public static OracleTablePartitions getPartitions(Connection connection,
+ OracleTable table) throws SQLException {
+
+ OracleTablePartitions result = new OracleTablePartitions();
+
+ PreparedStatement statement =
+ connection
+ .prepareStatement("with"
+ + " partitions as"
+ + " (select table_owner, table_name, partition_name"
+ + " from dba_tab_partitions"
+ + " where"
+ + " table_owner = ? and"
+ + " table_name = ?),"
+ + " subpartitions as"
+ + " (select table_owner, table_name, partition_name, subpartition_name"
+ + " from dba_tab_subpartitions"
+ + " where"
+ + " table_owner = ? and"
+ + " table_name = ?)"
+ + " select"
+ + " partitions.partition_name,"
+ + " subpartitions.subpartition_name"
+ + " from partitions left outer join subpartitions on"
+ + " (partitions.table_owner = subpartitions.table_owner"
+ + " and partitions.table_name = subpartitions.table_name"
+ + " and partitions.partition_name = subpartitions.partition_name)"
+ + " order by partition_name, subpartition_name");
+
+ statement.setString(1, table.getSchema());
+ statement.setString(2, table.getName());
+ statement.setString(3, table.getSchema());
+ statement.setString(4, table.getName());
+
+ ResultSet resultSet = statement.executeQuery();
+
+ OracleTablePartition partition = null;
+ while (resultSet.next()) {
+ String partitionName = resultSet.getString("partition_name");
+ String subPartitionName = resultSet.getString("subpartition_name");
+
+ if (subPartitionName != null && !("".equals(subPartitionName))) {
+ partition = new OracleTablePartition(subPartitionName, true);
+ result.add(partition);
+ } else {
+ if (partition == null || partition.isSubPartition()
+ || partition.getName() != partitionName) {
+ partition = new OracleTablePartition(partitionName, false);
+ result.add(partition);
+ }
+ }
+ }
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+// public static int getOracleDataObjectNumber(Connection connection,
+// OracleTable table) throws SQLException {
+//
+// PreparedStatement statement =
+// connection.prepareStatement("SELECT data_object_id "
+// + " FROM dba_objects" + " WHERE owner = ?" + " and object_name = ?"
+// + " and object_type = ?");
+// statement.setString(1, table.getSchema());
+// statement.setString(2, table.getName());
+// statement.setString(3, "TABLE");
+//
+// ResultSet resultSet = statement.executeQuery();
+//
+// resultSet.next();
+// int result = resultSet.getInt("data_object_id");
+//
+// resultSet.close();
+// statement.close();
+//
+// return result;
+// }
+//
+ private static String getPartitionBindVars(List<String> partitionList) {
+ String result = "";
+ for (int i = 1; i <= partitionList.size(); i++) {
+ result += (i > 1) ? "," : "";
+ result += ":part" + i;
+ }
+ return result;
+ }
+
+ private static void bindPartitionBindVars(PreparedStatement statement,
+ List<String> partitionList) throws SQLException {
+ int i = 0;
+ for (String partition : partitionList) {
+ i++;
+ OracleQueries.setStringAtName(statement, "part" + i, partition);
+ }
+ }
+
+ public static List<OracleDataChunkPartition>
+ getOracleDataChunksPartition(Connection connection, OracleTable table,
+ List<String> partitionList) throws SQLException {
+ List<OracleDataChunkPartition> result =
+ new ArrayList<OracleDataChunkPartition>();
+ String sql =
+ "SELECT "
+ + " pl.partition_name, "
+ + " pl.is_subpartition, "
+ + " s.blocks "
+ + "FROM "
+ + " (SELECT tp.table_owner, "
+ + " tp.table_name, "
+ + " NVL(tsp.subpartition_name,tp.partition_name) partition_name, "
+ + " nvl2(tsp.subpartition_name,1,0) is_subpartition "
+ + " FROM dba_tab_partitions tp, "
+ + " dba_tab_subpartitions tsp "
+ + " WHERE tp.table_owner = :table_owner"
+ + " AND tp.table_name = :table_name"
+ + " AND tsp.table_owner(+) =tp.table_owner "
+ + " AND tsp.table_name(+) =tp.table_name "
+ + " AND tsp.partition_name(+)=tp.partition_name ";
+
+ if (partitionList != null && partitionList.size() > 0) {
+ sql +=
+ " AND tp.partition_name IN (" + getPartitionBindVars(partitionList)
+ + ") ";
+ }
+
+ sql += " ) pl, dba_tables t, dba_segments s "
+ + "WHERE t.owner=pl.table_owner "
+ + "AND t.table_name=pl.table_name "
+ + "AND ( "
+ + " (t.iot_type='IOT' AND (s.owner,s.segment_name)= "
+ + " (SELECT c.index_owner,c.index_name "
+ + " FROM dba_constraints c "
+ + " WHERE c.owner=pl.table_owner "
+ + " AND c.table_name=pl.table_name "
+ + " AND c.constraint_type='P')) "
+ + " OR (t.iot_type IS NULL "
+ + " AND s.owner=t.owner "
+ + " AND s.segment_name=t.table_name) "
+ + " ) "
+ + "AND s.partition_name=pl.partition_name";
+
+ PreparedStatement statement = connection.prepareStatement(sql);
+ OracleQueries.setStringAtName(statement, "table_owner", table
+ .getSchema());
+ OracleQueries.setStringAtName(statement, "table_name", table
+ .getName());
+
+ if (partitionList != null && partitionList.size() > 0) {
+ bindPartitionBindVars(statement, partitionList);
+ }
+
+ LOG.debug(String.format("%s SQL Query =\n%s", OracleUtilities
+ .getCurrentMethodName(), sql.replace(":table_owner", table.getSchema())
+ .replace(":table_name", table.getName())));
+
+ ResultSet resultSet = statement.executeQuery();
+
+ while (resultSet.next()) {
+ OracleDataChunkPartition dataChunk =
+ new OracleDataChunkPartition(resultSet
+ .getString("partition_name"), resultSet
+ .getBoolean("is_subpartition"), resultSet.getLong("blocks"));
+ result.add(dataChunk);
+ }
+ resultSet.close();
+ statement.close();
+ return result;
+ }
+
+ public static List<OracleDataChunkExtent> getOracleDataChunksExtent(
+ Connection connection, OracleTable table,
+ List<String> partitionList, long numberOfChunksPerOracleDataFile)
+ throws SQLException {
+
+ List<OracleDataChunkExtent> result =
+ new ArrayList<OracleDataChunkExtent>();
+
+ String sql =
+ "SELECT data_object_id, "
+ + "file_id, "
+ + "relative_fno, "
+ + "file_batch, "
+ + "MIN (start_block_id) start_block_id, "
+ + "MAX (end_block_id) end_block_id, "
+ + "SUM (blocks) blocks "
+ + "FROM (SELECT o.data_object_id, "
+ + "e.file_id, "
+ + "e.relative_fno, "
+ + "e.block_id start_block_id, "
+ + "e.block_id + e.blocks - 1 end_block_id, "
+ + "e.blocks, "
+ + "CEIL ( "
+ + " SUM ( "
+ + " e.blocks) "
+ + " OVER (PARTITION BY o.data_object_id, e.file_id "
+ + " ORDER BY e.block_id ASC) "
+ + " / (SUM (e.blocks) "
+ + " OVER (PARTITION BY o.data_object_id, e.file_id) "
+ + " / :numchunks)) "
+ + " file_batch "
+ + "FROM dba_extents e, dba_objects o, dba_tab_subpartitions tsp "
+ + "WHERE o.owner = :owner "
+ + "AND o.object_name = :object_name "
+ + "AND e.owner = :owner "
+ + "AND e.segment_name = :object_name "
+ + "AND o.owner = e.owner "
+ + "AND o.object_name = e.segment_name "
+ + "AND (o.subobject_name = e.partition_name "
+ + " OR (o.subobject_name IS NULL AND e.partition_name IS NULL)) "
+ + "AND o.owner = tsp.table_owner(+) "
+ + "AND o.object_name = tsp.table_name(+) "
+ + "AND o.subobject_name = tsp.subpartition_name(+) ";
+
+ if (partitionList != null && partitionList.size() > 0) {
+ sql +=
+ " AND case when o.object_type='TABLE SUBPARTITION' then "
+ + "tsp.partition_name else o.subobject_name end IN ("
+ + getPartitionBindVars(partitionList) + ") ";
+ }
+
+ sql +=
+ ") " + "GROUP BY data_object_id, " + " file_id, "
+ + " relative_fno, " + " file_batch "
+ + "ORDER BY data_object_id, " + " file_id, "
+ + " relative_fno, " + " file_batch";
+
+ PreparedStatement statement = connection.prepareStatement(sql);
+ OracleQueries.setLongAtName(statement, "numchunks",
+ numberOfChunksPerOracleDataFile);
+ OracleQueries.setStringAtName(statement, "owner", table.getSchema());
+ OracleQueries.setStringAtName(statement, "object_name", table
+ .getName());
+
+ if (partitionList != null && partitionList.size() > 0) {
+ bindPartitionBindVars(statement, partitionList);
+ }
+
+ LOG.debug(String.format("%s SQL Query =\n%s", OracleUtilities
+ .getCurrentMethodName(), sql.replace(":numchunks",
+ Long.toString(numberOfChunksPerOracleDataFile)).replace(":owner",
+ table.getSchema()).replace(":object_name", table.getName())));
+
+ ResultSet resultSet = statement.executeQuery();
+
+ while (resultSet.next()) {
+ int fileId = resultSet.getInt("relative_fno");
+ int fileBatch = resultSet.getInt("file_batch");
+ String dataChunkId =
+ OracleUtilities.generateDataChunkId(fileId, fileBatch);
+ OracleDataChunkExtent dataChunk =
+ new OracleDataChunkExtent(dataChunkId, resultSet
+ .getInt("data_object_id"), resultSet.getInt("relative_fno"),
+ resultSet.getLong("start_block_id"), resultSet
+ .getLong("end_block_id"));
+ result.add(dataChunk);
+ }
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+// private static void trace(String message) {
+//
+// LOG.debug(message);
+// }
+//
+ public static String getOracleObjectType(Connection connection,
+ OracleTable table) throws SQLException {
+
+ PreparedStatement statement =
+ connection.prepareStatement("SELECT object_type " + " FROM dba_objects"
+ + " WHERE owner = ?" + " and object_name = ?");
+ statement.setString(1, table.getSchema());
+ statement.setString(2, table.getName());
+
+ ResultSet resultSet = statement.executeQuery();
+
+ String result = null;
+ if (resultSet.next()) {
+ result = resultSet.getString("object_type");
+ }
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+ public static OracleVersion getOracleVersion(Connection connection)
+ throws SQLException {
+
+ String sql =
+ "SELECT \n"
+ + " v.banner, \n"
+ + " rtrim(v.version) full_version, \n"
+ + " rtrim(v.version_bit) version_bit, \n"
+ + " SUBSTR(v.version, 1, INSTR(v.version, '.', 1, 1)-1) major, \n"
+ + " SUBSTR(v.version, INSTR(v.version, '.', 1, 1) + 1, "
+ + " INSTR(v.version, '.', 1, 2) - INSTR(v.version, '.', 1, 1) - 1) "
+ + " minor, \n"
+ + " SUBSTR(v.version, INSTR(v.version, '.', 1, 2) + 1, "
+ + " INSTR(v.version, '.', 1, 3) - INSTR(v.version, '.', 1, 2) - 1) "
+ + " version, \n"
+ + " SUBSTR(v.version, INSTR(v.version, '.', 1, 3) + 1, "
+ + " INSTR(v.version, '.', 1, 4) - INSTR(v.version, '.', 1, 3) - 1) "
+ + " patch, \n"
+ + " DECODE(instr(v.banner, '64bit'), 0, 'False', 'True') isDb64bit, \n"
+ + " DECODE(instr(b.banner, 'HPUX'), 0, 'False', 'True') isHPUX \n"
+ + "FROM (SELECT rownum row_num, \n"
+ + " banner,\n"
+ + " SUBSTR(SUBSTR(banner,INSTR(banner,'Release ')+8), 1) version_bit,\n"
+ + " SUBSTR(SUBSTR(banner,INSTR(banner,'Release ')+8), 1,\n"
+ + " INSTR(SUBSTR(banner,INSTR(banner,'Release ')+8),' ')) version\n"
+ + "FROM v$version\n" + " WHERE banner LIKE 'Oracle%'\n"
+ + " OR banner LIKE 'Personal Oracle%') v,\n" + "v$version b\n"
+ + " WHERE v.row_num = 1\n" + " and b.banner like 'TNS%'\n";
+
+ Statement statement = connection.createStatement();
+ ResultSet resultSet = statement.executeQuery(sql);
+ resultSet.next();
+ OracleVersion result =
+ new OracleVersion(resultSet.getInt("major"), resultSet.getInt("minor"),
+ resultSet.getInt("version"), resultSet.getInt("patch"), resultSet
+ .getString("banner"));
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+ public static List<OracleTable> getTables(Connection connection)
+ throws SQLException {
+
+ return getTables(connection, null, null, TableNameQueryType.Equals);
+ }
+
+ private enum GetTablesOptions {
+ Owner, Table
+ }
+
+ private enum TableNameQueryType {
+ Equals, Like
+ }
+
+// public static List<OracleTable>
+// getTables(Connection connection, String owner) throws SQLException {
+//
+// return getTables(connection, owner, null, TableNameQueryType.Equals);
+// }
+
+ public static OracleTable getTable(Connection connection, String owner,
+ String tableName) throws SQLException {
+
+ List<OracleTable> tables =
+ getTables(connection, owner, tableName, TableNameQueryType.Equals);
+ if (tables.size() > 0) {
+ return tables.get(0);
+ }
+
+ return null;
+ }
+
+ public static List<OracleTable> getTablesWithTableNameLike(
+ Connection connection, String owner, String tableNameLike)
+ throws SQLException {
+
+ return getTables(connection, owner, tableNameLike, TableNameQueryType.Like);
+ }
+
+ private static List<OracleTable> getTables(Connection connection,
+ String owner, String tableName, TableNameQueryType tableNameQueryType)
+ throws SQLException {
+
+ EnumSet<GetTablesOptions> options = EnumSet.noneOf(GetTablesOptions.class);
+
+ if (owner != null && !owner.isEmpty()) {
+ options.add(GetTablesOptions.Owner);
+ }
+
+ if (tableName != null && !tableName.isEmpty()) {
+ options.add(GetTablesOptions.Table);
+ }
+
+ String sql =
+ "SELECT owner, table_name " + " FROM dba_tables" + " %s %s %s %s "
+ + " ORDER BY owner, table_name";
+
+ String tableComparitor = null;
+ switch (tableNameQueryType) {
+ case Equals:
+ tableComparitor = "=";
+ break;
+ case Like:
+ tableComparitor = "LIKE";
+ break;
+ default:
+ throw new RuntimeException("Operator not implemented.");
+ }
+
+ sql =
+ String.format(sql, options.isEmpty() ? "" : "WHERE", options
+ .contains(GetTablesOptions.Owner) ? "owner = ?" : "", options
+ .containsAll(EnumSet.of(GetTablesOptions.Owner,
+ GetTablesOptions.Table)) ? "AND" : "", options
+ .contains(GetTablesOptions.Table) ? String.format(
+ "table_name %s ?", tableComparitor) : "");
+
+ PreparedStatement statement = connection.prepareStatement(sql);
+
+ if (options.containsAll(EnumSet.of(GetTablesOptions.Owner,
+ GetTablesOptions.Table))) {
+ statement.setString(1, owner);
+ statement.setString(2, tableName);
+ } else {
+ if (options.contains(GetTablesOptions.Owner)) {
+ statement.setString(1, owner);
+ } else if (options.contains(GetTablesOptions.Table)) {
+ statement.setString(1, tableName);
+ }
+ }
+
+ ResultSet resultSet = statement.executeQuery();
+
+ ArrayList<OracleTable> result = new ArrayList<OracleTable>();
+ while (resultSet.next()) {
+ result.add(new OracleTable(resultSet.getString("owner"), resultSet
+ .getString("table_name")));
+ }
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+ public static List<String> getTableColumnNames(Connection connection,
+ OracleTable table) throws SQLException {
+
+ OracleTableColumns oracleTableColumns = getTableColumns(connection, table);
+ List<String> result = new ArrayList<String>(oracleTableColumns.size());
+
+ for (int idx = 0; idx < oracleTableColumns.size(); idx++) {
+ result.add(oracleTableColumns.get(idx).getName());
+ }
+
+ return result;
+ }
+
+ public static List<String> getToTableColumnNames(Connection connection,
+ OracleTable table, boolean onlyOraOopSupportedTypes,
+ boolean omitOraOopPseudoColumns) throws SQLException {
+
+ OracleTableColumns oracleTableColumns =
+ getToTableColumns(connection, table, onlyOraOopSupportedTypes,
+ omitOraOopPseudoColumns);
+
+ List<String> result = new ArrayList<String>(oracleTableColumns.size());
+
+ for (int idx = 0; idx < oracleTableColumns.size(); idx++) {
+ result.add(oracleTableColumns.get(idx).getName());
+ }
+
+ return result;
+
+ }
+
+ public static List<String> getFromTableColumnNames(Connection connection,
+ OracleTable table, boolean omitLobAndLongColumnsDuringImport,
+ boolean onlyOraOopSupportedTypes) throws SQLException {
+
+ OracleTableColumns oracleTableColumns =
+ getFromTableColumns(connection, table,
+ omitLobAndLongColumnsDuringImport, onlyOraOopSupportedTypes);
+
+ List<String> result = new ArrayList<String>(oracleTableColumns.size());
+
+ for (int idx = 0; idx < oracleTableColumns.size(); idx++) {
+ result.add(oracleTableColumns.get(idx).getName());
+ }
+
+ return result;
+
+ }
+
+ private static OracleTableColumns getTableColumns(Connection connection,
+ OracleTable table, boolean omitLobColumns, String dataTypesClause,
+ HashSet<String> columnNamesToOmit) throws SQLException {
+
+ String sql =
+ "SELECT column_name, data_type " + " FROM dba_tab_columns"
+ + " WHERE owner = ?" + " and table_name = ?" + " %s"
+ + " ORDER BY column_id";
+
+ sql =
+ String.format(sql, dataTypesClause == null ? "" : " and "
+ + dataTypesClause);
+
+ LOG.debug(String.format("%s : sql = \n%s", OracleUtilities
+ .getCurrentMethodName(), sql));
+
+ OracleTableColumns result = new OracleTableColumns();
+ PreparedStatement statement = connection.prepareStatement(sql);
+ statement.setString(1, getTableSchema(connection, table));
+ statement.setString(2, table.getName());
+
+ ResultSet resultSet = statement.executeQuery();
+
+ while (resultSet.next()) {
+
+ String columnName = resultSet.getString("column_name");
+
+ if (columnNamesToOmit != null) {
+ if (columnNamesToOmit.contains(columnName)) {
+ continue;
+ }
+ }
+
+ result.add(new OracleTableColumn(columnName, resultSet
+ .getString("data_type")));
+ }
+
+ resultSet.close();
+ statement.close();
+
+ // Now get the actual JDBC data-types for these columns...
+ StringBuilder columnList = new StringBuilder();
+ for (int idx = 0; idx < result.size(); idx++) {
+ if (idx > 0) {
+ columnList.append(",");
+ }
+ columnList.append(result.get(idx).getName());
+ }
+ sql =
+ String.format("SELECT %s FROM %s WHERE 0=1", columnList.toString(),
+ table.toString());
+ Statement statementDesc = connection.createStatement();
+ ResultSet resultSetDesc = statementDesc.executeQuery(sql);
+ ResultSetMetaData metaData = resultSetDesc.getMetaData();
+ for (int idx = 0; idx < metaData.getColumnCount(); idx++) {
+ result.get(idx).setOracleType(metaData.getColumnType(idx + 1)); // <- JDBC
+ // is
+ // 1-based
+ }
+ resultSetDesc.close();
+ statementDesc.close();
+
+ return result;
+ }
+
+ public static OracleTableColumns getTableColumns(Connection connection,
+ OracleTable table) throws SQLException {
+
+ return getTableColumns(connection, table, false, null // <- dataTypesClause
+ , null); // <-columnNamesToOmit
+ }
+
+ public static OracleTableColumns getToTableColumns(Connection connection,
+ OracleTable table, boolean onlyOraOopSupportedTypes,
+ boolean omitOraOopPseudoColumns) throws SQLException {
+ String dataTypesClause = "";
+ HashSet<String> columnNamesToOmit = null;
+ if (onlyOraOopSupportedTypes) {
+ dataTypesClause = OracleJdbcConnectorConstants.
+ SUPPORTED_EXPORT_ORACLE_DATA_TYPES_CLAUSE;
+ }
+ if (omitOraOopPseudoColumns) {
+ if (columnNamesToOmit == null) {
+ columnNamesToOmit = new HashSet<String>();
+ }
+ columnNamesToOmit.add(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_PARTITION);
+ columnNamesToOmit.add(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_SUBPARTITION);
+ columnNamesToOmit.add(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_MAPPER_ROW);
+ }
+ return getTableColumns(connection, table,
+ false, dataTypesClause, columnNamesToOmit);
+ }
+
+ public static OracleTableColumns getFromTableColumns(Connection connection,
+ OracleTable table, boolean omitLobAndLongColumnsDuringImport,
+ boolean onlyOraOopSupportedTypes) throws SQLException {
+ String dataTypesClause = "";
+ HashSet<String> columnNamesToOmit = null;
+ if (onlyOraOopSupportedTypes) {
+ dataTypesClause = OracleJdbcConnectorConstants.
+ SUPPORTED_IMPORT_ORACLE_DATA_TYPES_CLAUSE;
+
+ if (omitLobAndLongColumnsDuringImport) {
+ LOG.info("LOB and LONG columns are being omitted from the Import.");
+ dataTypesClause =
+ " DATA_TYPE not in ('BLOB', 'CLOB', 'NCLOB', 'LONG') and "
+ + dataTypesClause;
+ }
+ }
+ return getTableColumns(connection, table,
+ omitLobAndLongColumnsDuringImport, dataTypesClause, columnNamesToOmit);
+ }
+
+ public static List<OracleActiveInstance> getOracleActiveInstances(
+ Connection connection) throws SQLException {
+
+ // Returns null if there are no rows in v$active_instances - which indicates
+ // this Oracle database is not a RAC.
+ ArrayList<OracleActiveInstance> result = null;
+
+ Statement statement = connection.createStatement();
+ ResultSet resultSet =
+ statement.executeQuery("select inst_name from v$active_instances ");
+
+ while (resultSet.next()) {
+ String instName = resultSet.getString("inst_name");
+ String[] nameFragments = instName.split(":");
+
+ if (nameFragments.length != 2) {
+ throw new SQLException(
+ "Parsing Error: The inst_name column of v$active_instances does "
+ + "not contain two values separated by a colon.");
+ }
+
+ String hostName = nameFragments[0].trim();
+ String instanceName = nameFragments[1].trim();
+
+ if (hostName.isEmpty()) {
+ throw new SQLException(
+ "Parsing Error: The inst_name column of v$active_instances does "
+ + "not include a host name.");
+ }
+
+ if (instanceName.isEmpty()) {
+ throw new SQLException(
+ "Parsing Error: The inst_name column of v$active_instances does "
+ + "not include an instance name.");
+ }
+
+ OracleActiveInstance instance = new OracleActiveInstance();
+ instance.setHostName(hostName);
+ instance.setInstanceName(instanceName);
+
+ if (result == null) {
+ result = new ArrayList<OracleActiveInstance>();
+ }
+
+ result.add(instance);
+ }
+
+ resultSet.close();
+ statement.close();
+ return result;
+ }
+
+ public static String getCurrentOracleInstanceName(Connection connection)
+ throws SQLException {
+
+ String result = "";
+
+ Statement statement = connection.createStatement();
+ ResultSet resultSet =
+ statement.executeQuery("select instance_name from v$instance");
+
+ if (resultSet.next()) {
+ result = resultSet.getString("instance_name");
+ }
+
+ resultSet.close();
+ statement.close();
+ return result;
+ }
+
+ public static Object getSysDate(Connection connection) throws SQLException {
+ Statement statement = connection.createStatement();
+ ResultSet resultSet = statement.executeQuery("select sysdate from dual");
+
+ resultSet.next();
+ try {
+ Method method = oracleResultSetClass.getMethod("getDATE", int.class);
+ return method.invoke(resultSet, 1);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not get sysdate", e);
+ }
+ } finally {
+ resultSet.close();
+ statement.close();
+ }
+ }
+
+ public static String oraDATEToString(Object date, String format) {
+ try {
+ Method dateMethod =
+ oracleDateClass.getMethod("toText", String.class, String.class);
+ return (String) dateMethod.invoke(date, format, null);
+ } catch (Exception e) {
+ throw new RuntimeException(String.format(
+ "Unable to convert the oracle.sql.DATE value \"%s\" to text.", date
+ .toString()), e);
+ }
+ }
+
+ public static Object oraDATEFromString(String date, String format) {
+ try {
+ Method dateMethod =
+ oracleDateClass.getMethod("fromText", String.class, String.class,
+ String.class);
+ return dateMethod.invoke(null, date, format, null);
+ } catch (Exception e) {
+ throw new RuntimeException(String
+ .format(
+ "Unable to convert the String value \"%s\" to oracle.sql.DATE.",
+ date), e);
+ }
+ }
+
+ public static Date oraDATEToDate(Object date) {
+ try {
+ Method dateMethod = oracleDateClass.getMethod("dateValue");
+ return (Date) dateMethod.invoke(date);
+ } catch (Exception e) {
+ throw new RuntimeException("Could not get sysdate", e);
+ }
+ }
+
+ public static String getSysTimeStamp(Connection connection)
+ throws SQLException {
+
+ Statement statement = connection.createStatement();
+ ResultSet resultSet =
+ statement.executeQuery("select systimestamp from dual");
+
+ resultSet.next();
+
+ try {
+ Method method = oracleResultSetClass.getMethod("getTIMESTAMP", int.class);
+ Object timestamp = method.invoke(resultSet, 1);
+ return timestamp.toString();
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not get sysdate", e);
+ }
+ } finally {
+ resultSet.close();
+ statement.close();
+ }
+ }
+
+ public static boolean isTableAnIndexOrganizedTable(Connection connection,
+ OracleTable table) throws SQLException {
+
+ /*
+ * http://ss64.com/orad/DBA_TABLES.html IOT_TYPE: If index-only table,then
+ * IOT_TYPE is IOT or IOT_OVERFLOW or IOT_MAPPING else NULL
+ */
+
+ boolean result = false;
+
+ PreparedStatement statement =
+ connection.prepareStatement("select iot_type " + "from dba_tables "
+ + "where owner = ? " + "and table_name = ?");
+ statement.setString(1, table.getSchema());
+ statement.setString(2, table.getName());
+ ResultSet resultSet = statement.executeQuery();
+
+ if (resultSet.next()) {
+ String iotType = resultSet.getString("iot_type");
+ result = iotType != null && !iotType.isEmpty();
+ }
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+ public static void dropTable(Connection connection, OracleTable table)
+ throws SQLException {
+
+ String sql = String.format("DROP TABLE %s", table.toString());
+
+ Statement statement = connection.createStatement();
+ try {
+ statement.execute(sql);
+ } catch (SQLException ex) {
+ if (ex.getErrorCode() != 942) { // ORA-00942: table or view does not exist
+ throw ex;
+ }
+ }
+ statement.close();
+ }
+
+ public static void
+ exchangeSubpartition(Connection connection, OracleTable table,
+ String subPartitionName, OracleTable subPartitionTable)
+ throws SQLException {
+
+ Statement statement = connection.createStatement();
+ String sql =
+ String.format("ALTER TABLE %s EXCHANGE SUBPARTITION %s WITH TABLE %s",
+ table.toString(), subPartitionName, subPartitionTable.toString());
+ statement.execute(sql);
+ statement.close();
+ }
+
+ public static void createExportTableFromTemplate(Connection connection,
+ OracleTable newTable, String tableStorageClause,
+ OracleTable templateTable, boolean noLogging) throws SQLException {
+
+ String sql =
+ String.format("CREATE TABLE %s \n" + "%s %s \n" + "AS \n"
+ + "(SELECT * FROM %s WHERE 0=1)", newTable.toString(),
+ noLogging ? "NOLOGGING" : "", tableStorageClause, templateTable
+ .toString());
+
+ Statement statement = connection.createStatement();
+ statement.execute(sql);
+ statement.close();
+ }
+
+ private static Object oraDATEAddJulianDays(Object date, int julianDay,
+ int julianSec) {
+ try {
+ Constructor<?> dateCon = oracleDateClass.getConstructor(byte[].class);
+ Method dateBytes = oracleDateClass.getMethod("toBytes");
+ Object result = dateCon.newInstance(dateBytes.invoke(date));
+ Method dateAdd =
+ oracleDateClass.getMethod("addJulianDays", int.class, int.class);
+ result = dateAdd.invoke(result, julianDay, julianSec);
+ return result;
+ } catch (Exception e) {
+ throw new RuntimeException("Could not add days to date.", e);
+ }
+ }
+
+ public static void createExportTableFromTemplateWithPartitioning(
+ Connection connection, OracleTable newTable, String tableStorageClause,
+ OracleTable templateTable, boolean noLogging, String partitionName,
+ Object jobDateTime, int numberOfMappers, String[] subPartitionNames)
+ throws SQLException {
+
+ String dateFormat = "yyyy-mm-dd hh24:mi:ss";
+
+ Object partitionBound =
+ OracleQueries.oraDATEAddJulianDays(jobDateTime, 0, 1);
+
+ String partitionBoundStr =
+ OracleQueries.oraDATEToString(partitionBound, dateFormat);
+
+ StringBuilder subPartitions = new StringBuilder();
+ for (int idx = 0; idx < numberOfMappers; idx++) {
+ if (idx > 0) {
+ subPartitions.append(",");
+ }
+
+ subPartitions.append(String.format(" SUBPARTITION %s VALUES (%d)",
+ subPartitionNames[idx], idx));
+ }
+
+ String sql =
+ String.format(
+ "CREATE TABLE %s \n" + "%s %s \n" + "PARTITION BY RANGE (%s) \n"
+ + "SUBPARTITION BY LIST (%s) \n" + "(PARTITION %s \n"
+ + "VALUES LESS THAN (to_date('%s', '%s')) \n" + "( %s ) \n"
+ + ") \n" + "AS \n"
+ + "(SELECT t.*, sysdate %s, 0 %s, 0 %s FROM %s t \n"
+ + "WHERE 0=1)", newTable.toString(), noLogging ? "NOLOGGING"
+ : "", tableStorageClause,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_PARTITION,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_SUBPARTITION,
+ partitionName, partitionBoundStr, dateFormat,
+ subPartitions.toString(),
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_PARTITION,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_SUBPARTITION,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_MAPPER_ROW,
+ templateTable.toString());
+
+ LOG.debug(String.format("SQL generated by %s:\n%s", OracleUtilities
+ .getCurrentMethodName(), sql));
+
+ try {
+
+ // Create the main export table...
+ PreparedStatement preparedStatement = connection.prepareStatement(sql);
+ preparedStatement.execute(sql);
+ preparedStatement.close();
+ } catch (SQLException ex) {
+ LOG.error(String
+ .format(
+ "The error \"%s\" was encountered when executing the following "
+ + "SQL statement:\n%s",
+ ex.getMessage(), sql));
+ throw ex;
+ }
+ }
+
+ public static void createExportTableForMapper(Connection connection,
+ OracleTable table, String tableStorageClause, OracleTable templateTable,
+ boolean addOraOopPartitionColumns) throws SQLException {
+
+ String sql = "";
+ try {
+
+ // Create the N tables to be used by the mappers...
+ Statement statement = connection.createStatement();
+ if (addOraOopPartitionColumns) {
+ sql =
+ String.format("CREATE TABLE %s \n" + "NOLOGGING %s \n" + "AS \n"
+ + "(SELECT t.*, SYSDATE %s, 0 %s, 0 %s FROM %s t WHERE 0=1)",
+ table.toString(), tableStorageClause,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_PARTITION,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_SUBPARTITION,
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_MAPPER_ROW,
+ templateTable.toString());
+ } else {
+ sql =
+ String.format("CREATE TABLE %s \n" + "NOLOGGING %s \n" + "AS \n"
+ + "(SELECT * FROM %s WHERE 0=1)", table.toString(),
+ tableStorageClause, templateTable.toString());
+ }
+
+ LOG.info(String.format("SQL generated by %s:\n%s", OracleUtilities
+ .getCurrentMethodName(), sql));
+
+ statement.execute(sql);
+ statement.close();
+ } catch (SQLException ex) {
+ LOG.error(String
+ .format(
+ "The error \"%s\" was encountered when executing the following "
+ + "SQL statement:\n%s",
+ ex.getMessage(), sql));
+ throw ex;
+ }
+ }
+
+ public static void createMoreExportTablePartitions(Connection connection,
+ OracleTable table, String partitionName, Object jobDateTime,
+ String[] subPartitionNames) throws SQLException {
+
+ String dateFormat = "yyyy-mm-dd hh24:mi:ss";
+
+ Object partitionBound =
+ OracleQueries.oraDATEAddJulianDays(jobDateTime, 0, 1);
+ String partitionBoundStr =
+ OracleQueries.oraDATEToString(partitionBound, dateFormat);
+
+ StringBuilder subPartitions = new StringBuilder();
+ for (int idx = 0; idx < subPartitionNames.length; idx++) {
+ if (idx > 0) {
+ subPartitions.append(",");
+ }
+
+ subPartitions.append(String.format(" SUBPARTITION %s VALUES (%d)",
+ subPartitionNames[idx], idx));
+ }
+
+ String sql =
+ String.format("ALTER TABLE %s " + "ADD PARTITION %s "
+ + "VALUES LESS THAN (to_date('%s', '%s'))" + "( %s ) ", table
+ .toString(), partitionName, partitionBoundStr, dateFormat,
+ subPartitions.toString());
+
+ LOG.debug(String.format("SQL generated by %s:\n%s", OracleUtilities
+ .getCurrentMethodName(), sql));
+
+ try {
+ PreparedStatement preparedStatement = connection.prepareStatement(sql);
+ preparedStatement.execute(sql);
+ preparedStatement.close();
+
+ } catch (SQLException ex) {
+ LOG.error(String
+ .format(
+ "The error \"%s\" was encountered when executing the following "
+ + "SQL statement:\n%s",
+ ex.getMessage(), sql));
+ throw ex;
+ }
+ }
+
+ public static void mergeTable(Connection connection, OracleTable targetTable,
+ OracleTable sourceTable, String[] mergeColumnNames,
+ OracleTableColumns oracleTableColumns, Object oraOopSysDate,
+ int oraOopMapperId, boolean parallelizationEnabled) throws SQLException {
+
+ StringBuilder updateClause = new StringBuilder();
+ StringBuilder insertClause = new StringBuilder();
+ StringBuilder valuesClause = new StringBuilder();
+ for (int idx = 0; idx < oracleTableColumns.size(); idx++) {
+ OracleTableColumn oracleTableColumn = oracleTableColumns.get(idx);
+ String columnName = oracleTableColumn.getName();
+
+ if (insertClause.length() > 0) {
+ insertClause.append(",");
+ }
+ insertClause.append(String.format("target.%s", columnName));
+
+ if (valuesClause.length() > 0) {
+ valuesClause.append(",");
+ }
+ valuesClause.append(String.format("source.%s", columnName));
+
+ if (!OracleUtilities.stringArrayContains(mergeColumnNames, columnName,
+ true)) {
+
+ // If we're performing a merge, then the table is not partitioned. (If
+ // the table
+ // was partitioned, we'd be deleting and then inserting rows.)
+ if (!columnName.equalsIgnoreCase(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_PARTITION)
+ && !columnName.equalsIgnoreCase(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_SUBPARTITION)
+ && !columnName.equalsIgnoreCase(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_MAPPER_ROW)) {
+
+ if (updateClause.length() > 0) {
+ updateClause.append(",");
+ }
+ updateClause.append(String.format("target.%1$s = source.%1$s",
+ columnName));
+
+ }
+ }
+ }
+
+ String sourceClause = valuesClause.toString();
+
+ String sql =
+ String.format("MERGE %7$s INTO %1$s target \n"
+ + "USING (SELECT %8$s * FROM %2$s) source \n" + " ON (%3$s) \n"
+ + "WHEN MATCHED THEN \n" + " UPDATE SET %4$s \n"
+ + "WHEN NOT MATCHED THEN \n" + " INSERT (%5$s) \n"
+ + " VALUES (%6$s)", targetTable.toString(),
+ sourceTable.toString(),
+ generateUpdateKeyColumnsWhereClauseFragment(mergeColumnNames,
+ "target", "source"), updateClause.toString(), insertClause
+ .toString(), sourceClause,
+ parallelizationEnabled ? "/*+ append parallel(target) */" : "",
+ parallelizationEnabled ? "/*+parallel*/" : "");
+
+ LOG.info(String.format("Merge SQL statement:\n" + sql));
+
+ Statement statement = connection.createStatement();
+ ResultSet resultSet = statement.executeQuery(sql);
+ resultSet.close();
+ statement.close();
+ }
+
+ public static void updateTable(Connection connection,
+ OracleTable targetTable, OracleTable sourceTable,
+ String[] mergeColumnNames, OracleTableColumns oracleTableColumns,
+ Object oraOopSysDate, int oraOopMapperId, boolean parallelizationEnabled)
+ throws SQLException {
+
+ StringBuilder targetColumnsClause = new StringBuilder();
+ StringBuilder sourceColumnsClause = new StringBuilder();
+ for (int idx = 0; idx < oracleTableColumns.size(); idx++) {
+ OracleTableColumn oracleTableColumn = oracleTableColumns.get(idx);
+ String columnName = oracleTableColumn.getName();
+
+ if (targetColumnsClause.length() > 0) {
+ targetColumnsClause.append(",");
+ }
+ targetColumnsClause.append(String.format("a.%s", columnName));
+
+ if (sourceColumnsClause.length() > 0) {
+ sourceColumnsClause.append(",");
+ }
+ sourceColumnsClause.append(String.format("b.%s", columnName));
+ }
+
+ String sourceClause = sourceColumnsClause.toString();
+
+ sourceClause =
+ sourceClause.replaceAll(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_PARTITION,
+ String.format("to_date('%s', 'yyyy/mm/dd hh24:mi:ss')",
+ OracleQueries.oraDATEToString(oraOopSysDate,
+ "yyyy/mm/dd hh24:mi:ss")));
+
+ sourceClause =
+ sourceClause.replaceAll(
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_SUBPARTITION,
+ Integer.toString(oraOopMapperId));
+
+ String sql =
+ String.format("UPDATE %5$s %1$s a \n" + "SET \n" + "(%2$s) \n"
+ + "= (SELECT \n" + "%3$s \n" + "FROM %4$s b \n" + "WHERE %6$s) \n"
+ + "WHERE EXISTS (SELECT null FROM %4$s c " + "WHERE %7$s)",
+ targetTable.toString(), targetColumnsClause.toString(),
+ sourceClause, sourceTable.toString(),
+ parallelizationEnabled ? "/*+ parallel */" : "",
+ generateUpdateKeyColumnsWhereClauseFragment(mergeColumnNames, "b",
+ "a"), generateUpdateKeyColumnsWhereClauseFragment(
+ mergeColumnNames, "c", "a"));
+
+ LOG.info(String.format("Update SQL statement:\n" + sql));
+
+ Statement statement = connection.createStatement();
+ int rowsAffected = statement.executeUpdate(sql);
+
+ LOG.info(String.format(
+ "The number of rows affected by the update SQL was: %d", rowsAffected));
+
+ statement.close();
+ }
+
+ /**
+ * Whether new rows should be included in changes table or not.
+ */
+ public enum CreateExportChangesTableOptions {
+ OnlyRowsThatDiffer, RowsThatDifferPlusNewRows
+ }
+
+ public static int createExportChangesTable(Connection connection,
+ OracleTable tableToCreate, String tableToCreateStorageClause,
+ OracleTable tableContainingUpdates, OracleTable tableToBeUpdated,
+ String[] joinColumnNames, CreateExportChangesTableOptions options,
+ boolean parallelizationEnabled) throws SQLException {
+
+ List<String> columnNames =
+ getToTableColumnNames(connection, tableToBeUpdated
+ , true // <- onlyOraOopSupportedTypes
+ , false // <- omitOraOopPseudoColumns
+ );
+
+ StringBuilder columnClause = new StringBuilder(2 * columnNames.size());
+ for (int idx = 0; idx < columnNames.size(); idx++) {
+ if (idx > 0) {
+ columnClause.append(",");
+ }
+ columnClause.append("a." + columnNames.get(idx));
+ }
+
+ StringBuilder rowEqualityClause = new StringBuilder();
+ for (int idx = 0; idx < columnNames.size(); idx++) {
+ String columnName = columnNames.get(idx);
+
+ // We need to omit the OraOop pseudo columns from the SQL statement that
+ // compares the data in
+ // the two tables we're interested in. Otherwise, EVERY row will be
+ // considered to be changed,
+ // since the values in the pseudo columns will differ. (i.e.
+ // ORAOOP_EXPORT_SYSDATE will differ.)
+ if (columnName.equalsIgnoreCase(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_PARTITION)
+ || columnName.equalsIgnoreCase(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_SUBPARTITION)
+ || columnName.equalsIgnoreCase(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_MAPPER_ROW)) {
+ continue;
+ }
+
+ if (idx > 0) {
+ rowEqualityClause.append("OR");
+ }
+
+ rowEqualityClause.append(String.format("(a.%1$s <> b.%1$s "
+ + "OR (a.%1$s IS NULL AND b.%1$s IS NOT NULL) "
+ + "OR (a.%1$s IS NOT NULL AND b.%1$s IS NULL))", columnName));
+ }
+
+ String sqlJoin = null;
+ switch (options) {
+
+ case OnlyRowsThatDiffer:
+ sqlJoin = "";
+ break;
+
+ case RowsThatDifferPlusNewRows:
+ sqlJoin = "(+)"; // <- An outer-join will cause the "new" rows to be
+ // included
+ break;
+
+ default:
+ throw new RuntimeException(String.format(
+ "Update %s to cater for the option \"%s\".", OracleUtilities
+ .getCurrentMethodName(), options.toString()));
+ }
+
+ String sql =
+ String.format("CREATE TABLE %1$s \n" + "NOLOGGING %8$s \n" + "%7$s \n"
+ + "AS \n " + "SELECT \n" + "%5$s \n" + "FROM %2$s a, %3$s b \n"
+ + "WHERE (%4$s) \n" + "AND ( \n" + "%6$s \n" + ")", tableToCreate
+ .toString(), tableContainingUpdates.toString(), tableToBeUpdated
+ .toString(), generateUpdateKeyColumnsWhereClauseFragment(
+ joinColumnNames, "a", "b", sqlJoin), columnClause.toString(),
+ rowEqualityClause.toString(), parallelizationEnabled ? "PARALLEL"
+ : "", tableToCreateStorageClause);
+
+ LOG.info(String.format("The SQL to create the changes-table is:\n%s", sql));
+
+ Statement statement = connection.createStatement();
+
+ long start = System.nanoTime();
+ statement.executeUpdate(sql);
+ double timeInSec = (System.nanoTime() - start) / Math.pow(10, 9);
+ LOG.info(String.format("Time spent creating change-table: %f sec.",
+ timeInSec));
+
+ String indexName = tableToCreate.toString().replaceAll("CHG", "IDX");
+ start = System.nanoTime();
+ statement.execute(String.format("CREATE INDEX %s ON %s (%s)", indexName,
+ tableToCreate.toString(), OracleUtilities
+ .stringArrayToCSV(joinColumnNames)));
+ timeInSec = (System.nanoTime() - start) / Math.pow(10, 9);
+ LOG.info(String.format("Time spent creating change-table index: %f sec.",
+ timeInSec));
+
+ int changeTableRowCount = 0;
+
+ ResultSet resultSet =
+ statement.executeQuery(String.format("select count(*) from %s",
+ tableToCreate.toString()));
+ resultSet.next();
+ changeTableRowCount = resultSet.getInt(1);
+ LOG.info(String.format("The number of rows in the change-table is: %d",
+ changeTableRowCount));
+
+ statement.close();
+ return changeTableRowCount;
+ }
+
+ public static void deleteRowsFromTable(Connection connection,
+ OracleTable tableToDeleteRowsFrom,
+ OracleTable tableContainingRowsToDelete, String[] joinColumnNames,
+ boolean parallelizationEnabled) throws SQLException {
+
+ String sql =
+ String.format("DELETE %4$s FROM %1$s a \n" + "WHERE EXISTS ( \n"
+ + "SELECT null FROM %3$s b WHERE \n" + "%2$s)",
+ tableToDeleteRowsFrom.toString(),
+ generateUpdateKeyColumnsWhereClauseFragment(joinColumnNames, "a",
+ "b"), tableContainingRowsToDelete.toString(),
+ parallelizationEnabled ? "/*+ parallel */" : "");
+
+ LOG.info(String.format("The SQL to delete rows from a table:\n%s", sql));
+
+ Statement statement = connection.createStatement();
+ int rowsAffected = statement.executeUpdate(sql);
+
+ LOG.info(String.format(
+ "The number of rows affected by the delete SQL was: %d", rowsAffected));
+
+ statement.close();
+ }
+
+ public static void insertRowsIntoExportTable(Connection connection,
+ OracleTable tableToInsertRowsInto,
+ OracleTable tableContainingRowsToInsert, Object oraOopSysDate,
+ int oraOopMapperId, boolean parallelizationEnabled) throws SQLException {
+
+ List<String> columnNames =
+ getTableColumnNames(connection, tableToInsertRowsInto);
+
+ StringBuilder columnClause =
+ new StringBuilder(2 + (2 * columnNames.size()));
+ for (int idx = 0; idx < columnNames.size(); idx++) {
+ if (idx > 0) {
+ columnClause.append(",");
+ }
+ columnClause.append(columnNames.get(idx));
+ }
+
+ String columnsClause = columnClause.toString();
+
+ String sql =
+ String.format("insert %4$s \n" + "into %1$s \n" + "select \n"
+ + "%2$s \n" + "from %3$s", tableToInsertRowsInto.toString(),
+ columnsClause, tableContainingRowsToInsert.toString(),
+ parallelizationEnabled ? "/*+ append parallel */" : "");
+
+ LOG.info(String.format(
+ "The SQL to insert rows from one table into another:\n%s", sql));
+
+ Statement statement = connection.createStatement();
+ ResultSet resultSet = statement.executeQuery(sql);
+ resultSet.close();
+ statement.close();
+ }
+
+ public static boolean doesIndexOnColumnsExist(Connection connection,
+ OracleTable oracleTable, String[] columnNames) throws SQLException {
+
+ // Attempts to find an index on the table that *starts* with the N column
+ // names passed.
+ // These columns can be in any order.
+
+ String columnNamesInClause =
+ OracleUtilities.stringArrayToCSV(columnNames, "'");
+
+ String sql =
+ String.format("SELECT b.index_name, \n"
+ + " sum(case when b.column_name in (%1$s) then 1 end) num_cols \n"
+ + "FROM dba_indexes a, dba_ind_columns b \n" + "WHERE \n"
+ + "a.owner = b.index_owner \n"
+ + "AND a.index_name = b.index_name \n" + "AND b.table_owner = ? \n"
+ + "AND b.table_name = ? \n" + "AND a.status = 'VALID' \n"
+ + "AND b.column_position <= ? \n" + "GROUP BY b.index_name \n"
+ + "HAVING sum(case when b.column_name in (%1$s) then 1 end) = ?",
+ columnNamesInClause);
+
+ PreparedStatement statement = connection.prepareStatement(sql);
+ statement.setString(1, oracleTable.getSchema());
+ statement.setString(2, oracleTable.getName());
+ statement.setInt(3, columnNames.length);
+ statement.setInt(4, columnNames.length);
+
+ LOG.debug(String.format("SQL to find an index on the columns %s:\n%s",
+ columnNamesInClause, sql));
+
+ ResultSet resultSet = statement.executeQuery();
+
+ boolean result = false;
+ if (resultSet.next()) {
+ LOG.debug(String
+ .format(
+ "The table %s has an index named %s starting with the column(s) "
+ + "%s (in any order).",
+ oracleTable.toString(), resultSet.getString("index_name"),
+ columnNamesInClause));
+ result = true;
+ }
+
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+ private static String generateUpdateKeyColumnsWhereClauseFragment(
+ String[] joinColumnNames, String prefix1, String prefix2) {
+
+ return generateUpdateKeyColumnsWhereClauseFragment(joinColumnNames,
+ prefix1, prefix2, "");
+ }
+
+ private static String generateUpdateKeyColumnsWhereClauseFragment(
+ String[] joinColumnNames, String prefix1, String prefix2,
+ String sqlJoinOperator) {
+
+ StringBuilder result = new StringBuilder();
+ for (int idx = 0; idx < joinColumnNames.length; idx++) {
+ String joinColumnName = joinColumnNames[idx];
+ if (idx > 0) {
+ result.append(" AND ");
+ }
+ result.append(String.format("%1$s.%3$s = %2$s.%3$s %4$s", prefix1,
+ prefix2, joinColumnName, sqlJoinOperator));
+ }
+ return result.toString();
+ }
+
+ public static String getCurrentSchema(Connection connection)
+ throws SQLException {
+ String sql = "SELECT SYS_CONTEXT('USERENV','CURRENT_SCHEMA') FROM DUAL";
+
+ PreparedStatement statement = connection.prepareStatement(sql);
+
+ ResultSet resultSet = statement.executeQuery();
+
+ resultSet.next();
+ String result = resultSet.getString(1);
+
+ resultSet.close();
+ statement.close();
+
+ LOG.info("Current schema is: " + result);
+
+ return result;
+ }
+
+ public static String getTableSchema(Connection connection, OracleTable table)
+ throws SQLException {
+ if (table.getSchema() == null || table.getSchema().isEmpty()) {
+ return getCurrentSchema(connection);
+ } else {
+ return table.getSchema();
+ }
+ }
+
+ public static long getCurrentScn(Connection connection) throws SQLException {
+ String sql = "SELECT current_scn FROM v$database";
+ PreparedStatement statement = connection.prepareStatement(sql);
+ ResultSet resultSet = statement.executeQuery();
+
+ resultSet.next();
+ long result = resultSet.getLong(1);
+ resultSet.close();
+ statement.close();
+
+ return result;
+ }
+
+ public static void setLongAtName(PreparedStatement statement,
+ String bindName, long bindValue) throws SQLException {
+ try {
+ methSetLongAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+ public static void setBigDecimalAtName(PreparedStatement statement,
+ String bindName, BigDecimal bindValue) throws SQLException {
+ try {
+ methSetBigDecimalAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+ public static void setStringAtName(PreparedStatement statement,
+ String bindName, String bindValue) throws SQLException {
+ try {
+ methSetStringAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+ public static void setTimestampAtName(PreparedStatement statement,
+ String bindName, Timestamp bindValue) throws SQLException {
+ try {
+ methSetTimestampAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+ public static void setBinaryDoubleAtName(PreparedStatement statement,
+ String bindName, double bindValue) throws SQLException {
+ try {
+ methSetBinaryDoubleAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+ public static void setObjectAtName(PreparedStatement statement,
+ String bindName, Object bindValue) throws SQLException {
+ try {
+ methSetObjectAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+ public static void setBinaryFloatAtName(PreparedStatement statement,
+ String bindName, float bindValue) throws SQLException {
+ try {
+ methSetBinaryFloatAtName.invoke(statement, bindName, bindValue);
+ } catch (Exception e) {
+ if (e.getCause() instanceof SQLException) {
+ throw (SQLException) e.getCause();
+ } else {
+ throw new RuntimeException("Could not set bind variable", e);
+ }
+ }
+ }
+
+// public static void setIntAtName(PreparedStatement statement, String bindName,
+// int bindValue) throws SQLException {
+// try {
+// methSetIntAtName.invoke(statement, bindName, bindValue);
+// } catch (Exception e) {
+// if (e.getCause() instanceof SQLException) {
+// throw (SQLException) e.getCause();
+// } else {
+// throw new RuntimeException("Could not set bind variable", e);
+// }
+// }
+// }
+
+ public static int getOracleType(String name) {
+ Integer result = ORACLE_TYPES.get(name);
+ if (result == null) {
+ synchronized (ORACLE_TYPES) {
+ try {
+ result = oracleTypesClass.getField(name).getInt(null);
+ ORACLE_TYPES.put(name, result);
+ } catch (Exception e) {
+ throw new RuntimeException("Invalid oracle type specified", e);
+ }
+ }
+ }
+ return result;
+ }
+
+ public static List<Column> getColDataTypes(Connection connection,
+ OracleTable table, List<String> colNames) throws SQLException {
+ List<Column> result = new ArrayList<Column>();
+ StringBuilder sb = new StringBuilder();
+ sb.append("SELECT ");
+ for (int idx = 0; idx < colNames.size(); idx++) {
+ if (idx > 0) {
+ sb.append(",");
+ }
+ sb.append(colNames.get(idx));
+ }
+ sb.append(String.format(" FROM %s WHERE 0=1", table.toString()));
+
+ String sql = sb.toString();
+ PreparedStatement statement = connection.prepareStatement(sql);
+ try {
+ ResultSetMetaData metadata = statement.getMetaData();
+ int numCols = metadata.getColumnCount();
+ for(int i = 1; i < numCols + 1; i++) {
+ String colName = metadata.getColumnName(i);
+ Column oracleColumn = OracleSqlTypesUtils.sqlTypeToSchemaType(
+ metadata.getColumnType(i), colName,
+ metadata.getPrecision(i), metadata.getScale(i));
+
+ result.add(oracleColumn);
+ }
+ } finally {
+ statement.close();
+ }
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleSqlTypesUtils.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleSqlTypesUtils.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleSqlTypesUtils.java
new file mode 100644
index 0000000..e691557
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleSqlTypesUtils.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.sql.Types;
+
+import org.apache.log4j.Logger;
+import org.apache.sqoop.schema.type.Binary;
+import org.apache.sqoop.schema.type.Bit;
+import org.apache.sqoop.schema.type.Column;
+import org.apache.sqoop.schema.type.Date;
+import org.apache.sqoop.schema.type.DateTime;
+import org.apache.sqoop.schema.type.Decimal;
+import org.apache.sqoop.schema.type.FixedPoint;
+import org.apache.sqoop.schema.type.FloatingPoint;
+import org.apache.sqoop.schema.type.Text;
+import org.apache.sqoop.schema.type.Time;
+import org.apache.sqoop.schema.type.Unknown;
+
+public class OracleSqlTypesUtils {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleSqlTypesUtils.class);
+
+ public static Column sqlTypeToSchemaType(int sqlType, String columnName,
+ int precision, int scale) {
+ Column result = null;
+ switch (sqlType) {
+ case Types.SMALLINT:
+ case Types.TINYINT:
+ // only supports signed values
+ result = new FixedPoint(columnName, 2L, true);
+ break;
+ case Types.INTEGER:
+ // only supports signed values
+ result = new FixedPoint(columnName, 4L, true);
+ break;
+ case Types.BIGINT:
+ result = new FixedPoint(columnName, 8L, true);
+ break;
+
+ case Types.CLOB:
+ case Types.VARCHAR:
+ case Types.CHAR:
+ case Types.LONGVARCHAR:
+ case Types.NVARCHAR:
+ case Types.NCHAR:
+ case Types.LONGNVARCHAR:
+ result = new Text(columnName);
+ break;
+
+ case Types.DATE:
+ result = new Date(columnName);
+ break;
+
+ case Types.TIME:
+ result = new Time(columnName, true);
+ break;
+
+ case Types.TIMESTAMP:
+ result = new DateTime(columnName, true, false);
+ break;
+
+ case Types.FLOAT:
+ case Types.REAL:
+ result = new FloatingPoint(columnName, 4L);
+ break;
+ case Types.DOUBLE:
+ result = new FloatingPoint(columnName, 8L);
+ break;
+
+ case Types.NUMERIC:
+ case Types.DECIMAL:
+ result = new Decimal(columnName, precision, scale);
+ break;
+
+ case Types.BIT:
+ case Types.BOOLEAN:
+ result = new Bit(columnName);
+ break;
+
+ case Types.BINARY:
+ case Types.VARBINARY:
+ case Types.BLOB:
+ case Types.LONGVARBINARY:
+ result = new Binary(columnName);
+ break;
+
+ default:
+ result = new Unknown(columnName,(long)sqlType);
+ }
+
+ if (sqlType == OracleQueries.getOracleType("TIMESTAMP")) {
+ result = new DateTime(columnName, true, false);
+ }
+
+ if (sqlType == OracleQueries.getOracleType("TIMESTAMPTZ")) {
+ result = new DateTime(columnName, true, true);
+ }
+
+ if (sqlType == OracleQueries.getOracleType("TIMESTAMPLTZ")) {
+ result = new DateTime(columnName, true, true);
+ }
+
+ /*
+ * http://www.oracle.com/technology/sample_code/tech/java/sqlj_jdbc/files
+ * /oracle10g/ieee/Readme.html
+ *
+ * BINARY_DOUBLE is a 64-bit, double-precision floating-point number
+ * datatype. (IEEE 754) Each BINARY_DOUBLE value requires 9 bytes, including
+ * a length byte. A 64-bit double format number X is divided as sign s 1-bit
+ * exponent e 11-bits fraction f 52-bits
+ *
+ * BINARY_FLOAT is a 32-bit, single-precision floating-point number
+ * datatype. (IEEE 754) Each BINARY_FLOAT value requires 5 bytes, including
+ * a length byte. A 32-bit single format number X is divided as sign s 1-bit
+ * exponent e 8-bits fraction f 23-bits
+ */
+ if (sqlType == OracleQueries.getOracleType("BINARY_FLOAT")) {
+ // http://people.uncw.edu/tompkinsj/133/numbers/Reals.htm
+ result = new FloatingPoint(columnName, 4L);
+ }
+
+ if (sqlType == OracleQueries.getOracleType("BINARY_DOUBLE")) {
+ // http://people.uncw.edu/tompkinsj/133/numbers/Reals.htm
+ result = new FloatingPoint(columnName, 8L);
+ }
+
+ if (sqlType == OracleQueries.getOracleType("STRUCT")) {
+ // E.g. URITYPE
+ result = new Text(columnName);
+ }
+
+ if (result == null || result instanceof Unknown) {
+
+ // For constant values, refer to:
+ // http://oracleadvisor.com/documentation/oracle/database/11.2/
+ // appdev.112/e13995/constant-values.html#oracle_jdbc
+
+ if (sqlType == OracleQueries.getOracleType("BFILE")
+ || sqlType == OracleQueries.getOracleType("NCLOB")
+ || sqlType == OracleQueries.getOracleType("NCHAR")
+ || sqlType == OracleQueries.getOracleType("NVARCHAR")
+ || sqlType == OracleQueries.getOracleType("ROWID")
+ || sqlType == OracleQueries.getOracleType("INTERVALYM")
+ || sqlType == OracleQueries.getOracleType("INTERVALDS")
+ || sqlType == OracleQueries.getOracleType("OTHER")) {
+ result = new Text(columnName);
+ }
+
+ }
+
+ if (result == null || result instanceof Unknown) {
+ LOG.warn(String.format("%s should be updated to cater for data-type: %d",
+ OracleUtilities.getCurrentMethodName(), sqlType));
+ }
+
+ return result;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTable.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTable.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTable.java
new file mode 100644
index 0000000..972e393
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTable.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+/**
+ * Contains details about an Oracle table.
+ */
+public class OracleTable {
+
+ private String schema;
+ private String name;
+
+ public String getSchema() {
+ return schema;
+ }
+
+ private void setSchema(String newSchema) {
+ this.schema = newSchema;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ private void setName(String newName) {
+ this.name = newName;
+ }
+
+ public OracleTable() {
+
+ }
+
+ public OracleTable(String schema, String name) {
+
+ setSchema(schema);
+ setName(name);
+ }
+
+ public OracleTable(String name) {
+ setName(name);
+ }
+
+ @Override
+ public String toString() {
+ String result =
+ (getSchema() == null || getSchema().isEmpty()) ? "" : "\""
+ + getSchema() + "\".";
+ result += "\"" + getName() + "\"";
+ return result;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumn.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumn.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumn.java
new file mode 100644
index 0000000..ac6fb8b
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumn.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+/**
+ * Contains details about a column in an Oracle table.
+ */
+public class OracleTableColumn {
+
+ private String name;
+ private String dataType; // <- i.e. The data_type from dba_tab_columns
+ private int oracleType;
+
+ public OracleTableColumn(String name, String dataType) {
+
+ this.setName(name);
+ this.setDataType(dataType);
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String newName) {
+ this.name = newName;
+ }
+
+ public String getDataType() {
+ return dataType;
+ }
+
+ public void setDataType(String newDataType) {
+ this.dataType = newDataType;
+ }
+
+ public int getOracleType() {
+ return oracleType;
+ }
+
+ public void setOracleType(int newOracleType) {
+ this.oracleType = newOracleType;
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumns.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumns.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumns.java
new file mode 100644
index 0000000..61fad5d
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTableColumns.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.util.Iterator;
+
+/**
+ * Contains a list of Oracle columns.
+ */
+public class OracleTableColumns extends
+ OracleGenerics.ObjectList<OracleTableColumn> {
+
+ public OracleTableColumn findColumnByName(String columnName) {
+
+ OracleTableColumn result;
+
+ Iterator<OracleTableColumn> iterator = this.iterator();
+ while (iterator.hasNext()) {
+ result = iterator.next();
+ if (result.getName().equals(columnName)) {
+ return result;
+ }
+ }
+ return null;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartition.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartition.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartition.java
new file mode 100644
index 0000000..43e8396
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartition.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+/**
+ * Contains details about a partition for an Oracle table.
+ */
+public class OracleTablePartition {
+
+ private String name;
+ private boolean isSubPartition;
+
+ public OracleTablePartition(String name, boolean isSubPartition) {
+ this.setName(name);
+ this.setSubPartition(isSubPartition);
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String newName) {
+ this.name = newName;
+ }
+
+ public boolean isSubPartition() {
+ return isSubPartition;
+ }
+
+ public void setSubPartition(boolean newIsSubPartition) {
+ this.isSubPartition = newIsSubPartition;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartitions.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartitions.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartitions.java
new file mode 100644
index 0000000..6140d7b
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleTablePartitions.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.util.Iterator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Contains a list of Oracle table partitions.
+ */
+public class OracleTablePartitions extends
+ OracleGenerics.ObjectList<OracleTablePartition> {
+
+ public OracleTablePartition findPartitionByName(String partitionName) {
+
+ OracleTablePartition result;
+
+ Iterator<OracleTablePartition> iterator = this.iterator();
+ while (iterator.hasNext()) {
+ result = iterator.next();
+ if (result.getName().equals(partitionName)) {
+ return result;
+ }
+ }
+ return null;
+ }
+
+ public OracleTablePartition findPartitionByRegEx(String regEx) {
+
+ OracleTablePartition result;
+
+ Pattern pattern = Pattern.compile(regEx);
+
+ Iterator<OracleTablePartition> iterator = this.iterator();
+ while (iterator.hasNext()) {
+ result = iterator.next();
+ Matcher matcher = pattern.matcher(result.getName());
+ if (matcher.find()) {
+ return result;
+ }
+ }
+ return null;
+ }
+
+}
[6/6] sqoop git commit: SQOOP-2595: Add Oracle connector to Sqoop 2
Posted by ja...@apache.org.
SQOOP-2595: Add Oracle connector to Sqoop 2
(David Robson via Jarek Jarcec Cecho)
Project: http://git-wip-us.apache.org/repos/asf/sqoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/sqoop/commit/fa3c77b6
Tree: http://git-wip-us.apache.org/repos/asf/sqoop/tree/fa3c77b6
Diff: http://git-wip-us.apache.org/repos/asf/sqoop/diff/fa3c77b6
Branch: refs/heads/sqoop2
Commit: fa3c77b6a8352f68ec429164f48aee00ae2480d8
Parents: 2a9ae31
Author: Jarek Jarcec Cecho <ja...@apache.org>
Authored: Thu Nov 5 09:40:31 2015 -0800
Committer: Jarek Jarcec Cecho <ja...@apache.org>
Committed: Thu Nov 5 09:40:31 2015 -0800
----------------------------------------------------------------------
connector/connector-oracle-jdbc/pom.xml | 134 ++
.../oracle/OracleJdbcCommonInitializer.java | 477 +++++
.../jdbc/oracle/OracleJdbcConnector.java | 92 +
.../oracle/OracleJdbcConnectorConstants.java | 493 +++++
.../oracle/OracleJdbcConnectorUpgrader.java | 43 +
.../jdbc/oracle/OracleJdbcExtractor.java | 361 ++++
.../jdbc/oracle/OracleJdbcFromDestroyer.java | 36 +
.../jdbc/oracle/OracleJdbcFromInitializer.java | 90 +
.../connector/jdbc/oracle/OracleJdbcLoader.java | 615 +++++++
.../jdbc/oracle/OracleJdbcPartition.java | 183 ++
.../jdbc/oracle/OracleJdbcPartitioner.java | 252 +++
.../jdbc/oracle/OracleJdbcToDestroyer.java | 273 +++
.../jdbc/oracle/OracleJdbcToInitializer.java | 498 +++++
.../oracle/configuration/ConnectionConfig.java | 78 +
.../oracle/configuration/FromJobConfig.java | 61 +
.../configuration/FromJobConfiguration.java | 33 +
.../oracle/configuration/LinkConfiguration.java | 34 +
.../jdbc/oracle/configuration/ToJobConfig.java | 64 +
.../configuration/ToJobConfiguration.java | 33 +
.../jdbc/oracle/util/OracleActiveInstance.java | 44 +
.../oracle/util/OracleConnectionFactory.java | 246 +++
.../jdbc/oracle/util/OracleDataChunk.java | 48 +
.../jdbc/oracle/util/OracleDataChunkExtent.java | 109 ++
.../oracle/util/OracleDataChunkPartition.java | 85 +
.../jdbc/oracle/util/OracleGenerics.java | 64 +
.../jdbc/oracle/util/OracleJdbcUrl.java | 244 +++
.../jdbc/oracle/util/OracleQueries.java | 1721 ++++++++++++++++++
.../jdbc/oracle/util/OracleSqlTypesUtils.java | 176 ++
.../connector/jdbc/oracle/util/OracleTable.java | 68 +
.../jdbc/oracle/util/OracleTableColumn.java | 59 +
.../jdbc/oracle/util/OracleTableColumns.java | 43 +
.../jdbc/oracle/util/OracleTablePartition.java | 50 +
.../jdbc/oracle/util/OracleTablePartitions.java | 62 +
.../jdbc/oracle/util/OracleUtilities.java | 1446 +++++++++++++++
.../jdbc/oracle/util/OracleVersion.java | 84 +
.../oracle-jdbc-connector-config.properties | 136 ++
.../main/resources/sqoopconnector.properties | 18 +
.../jdbc/oracle/TestOracleJdbcPartitioner.java | 102 ++
.../jdbc/oracle/TestOracleJdbcUrl.java | 249 +++
.../connector/jdbc/oracle/TestOracleTable.java | 42 +
.../jdbc/oracle/TestOracleUtilities.java | 613 +++++++
.../OracleConnectionFactoryTest.java | 497 +++++
.../oracle/integration/OracleQueriesTest.java | 49 +
.../jdbc/oracle/integration/OracleTestCase.java | 41 +
connector/pom.xml | 1 +
pom.xml | 11 +
server/pom.xml | 5 +
47 files changed, 10163 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/pom.xml b/connector/connector-oracle-jdbc/pom.xml
new file mode 100644
index 0000000..325790d
--- /dev/null
+++ b/connector/connector-oracle-jdbc/pom.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.sqoop</groupId>
+ <artifactId>connector</artifactId>
+ <version>2.0.0-SNAPSHOT</version>
+ </parent>
+
+ <groupId>org.apache.sqoop.connector</groupId>
+ <artifactId>sqoop-connector-oracle-jdbc</artifactId>
+ <name>Sqoop Oracle JDBC Connector</name>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>org.apache.sqoop</groupId>
+ <artifactId>connector-sdk</artifactId>
+ </dependency>
+
+ <!-- Test dependencies -->
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.sqoop</groupId>
+ <artifactId>sqoop-common-test</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <finalName>sqoop</finalName>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <excludedGroups>oracle</excludedGroups>
+
+ <excludes>
+ <exclude>**/integration/**</exclude>
+ </excludes>
+ </configuration>
+ <executions>
+ <execution>
+ <id>integration-test</id>
+ <goals>
+ <goal>test</goal>
+ </goals>
+ <phase>integration-test</phase>
+ <configuration>
+ <excludes>
+ <exclude>none</exclude>
+ </excludes>
+ <includes>
+ <include>**/integration/**</include>
+ </includes>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <profiles>
+ <profile>
+ <id>jdbc-oracle</id>
+
+ <activation>
+ <property>
+ <name>jdbc.oracle</name>
+ </property>
+ </activation>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.oracle</groupId>
+ <artifactId>ojdbc14</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+
+ <configuration>
+ <excludedGroups>none</excludedGroups>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+</project>
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcCommonInitializer.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcCommonInitializer.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcCommonInitializer.java
new file mode 100644
index 0000000..1fd95c0
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcCommonInitializer.java
@@ -0,0 +1,477 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.common.MutableContext;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ConnectionConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleActiveInstance;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleJdbcUrl;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.JdbcOracleThinConnectionParsingError;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleVersion;
+import org.apache.sqoop.job.etl.Initializer;
+import org.apache.sqoop.job.etl.InitializerContext;
+import org.apache.sqoop.schema.Schema;
+import org.apache.sqoop.schema.type.Column;
+
+public class OracleJdbcCommonInitializer<JobConfiguration> extends Initializer<LinkConfiguration, JobConfiguration> {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleJdbcCommonInitializer.class);
+
+ protected Connection connection;
+ protected OracleTable table;
+ protected int numMappers = 8;
+
+ public void connect(InitializerContext context,
+ LinkConfiguration linkConfiguration,
+ JobConfiguration jobConfiguration) throws SQLException {
+ connection = OracleConnectionFactory.makeConnection(
+ linkConfiguration.connectionConfig);
+ }
+
+ @Override
+ public void initialize(InitializerContext context,
+ LinkConfiguration linkConfiguration,
+ JobConfiguration jobConfiguration) {
+ showUserTheOraOopWelcomeMessage();
+
+ try {
+ connect(context, linkConfiguration, jobConfiguration);
+ } catch (SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to connect to the Oracle database at %s\nError:%s",
+ linkConfiguration.connectionConfig.connectionString, ex
+ .getMessage()), ex);
+ }
+
+ // Generate the "action" name that we'll assign to our Oracle sessions
+ // so that the user knows which Oracle sessions belong to OraOop...
+ //TODO: Get the job name
+ context.getContext().setString(
+ OracleJdbcConnectorConstants.ORACLE_SESSION_ACTION_NAME,
+ getOracleSessionActionName(
+ linkConfiguration.connectionConfig.username));
+
+ //TODO: Don't think this can be done anymore
+ //OraOopUtilities.appendJavaSecurityEgd(sqoopOptions.getConf());
+
+ // Get the Oracle database version...
+ try {
+ OracleVersion oracleVersion =
+ OracleQueries.getOracleVersion(connection);
+ LOG.info(String.format("Oracle Database version: %s",
+ oracleVersion.getBanner()));
+ } catch (SQLException ex) {
+ LOG.error("Unable to obtain the Oracle database version.", ex);
+ }
+
+ // Generate the JDBC URLs to be used by each mapper...
+ setMapperConnectionDetails(linkConfiguration.connectionConfig,
+ context.getContext());
+
+ // Show the user the Oracle command that can be used to kill this
+ // OraOop
+ // job via Oracle...
+ showUserTheOracleCommandToKillOraOop(context.getContext());
+ }
+
+ @Override
+ public Schema getSchema(InitializerContext context,
+ LinkConfiguration linkConfiguration,
+ JobConfiguration jobConfiguration) {
+ try {
+ connect(context, linkConfiguration, jobConfiguration);
+ } catch (SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to connect to the Oracle database at %s\n"
+ + "Error:%s", linkConfiguration.connectionConfig.connectionString,
+ ex.getMessage()), ex);
+ }
+
+ Schema schema = new Schema(table.toString());
+
+ try {
+ List<String> colNames = OracleQueries.getToTableColumnNames(
+ connection, table, true, true);
+
+ List<Column> columnTypes =
+ OracleQueries.getColDataTypes(connection, table, colNames);
+
+ for(Column column : columnTypes) {
+ schema.addColumn(column);
+ }
+
+ return schema;
+ } catch(Exception e) {
+ throw new RuntimeException(
+ "Could not determine columns in Oracle Table.", e);
+ }
+ }
+
+ private void showUserTheOraOopWelcomeMessage() {
+
+ String msg1 =
+ String.format("Using %s",
+ OracleJdbcConnectorConstants.ORACLE_SESSION_MODULE_NAME);
+
+ int longestMessage = msg1.length();
+
+ msg1 = StringUtils.rightPad(msg1, longestMessage);
+
+ char[] asterisks = new char[longestMessage + 8];
+ Arrays.fill(asterisks, '*');
+
+ String msg =
+ String.format("\n" + "%1$s\n" + "*** %2$s ***\n" + "%1$s", new String(
+ asterisks), msg1);
+ LOG.info(msg);
+ }
+
+ private String getOracleSessionActionName(String jobName) {
+
+ String timeStr =
+ (new SimpleDateFormat("yyyyMMddHHmmsszzz")).format(new Date());
+
+ String result = String.format("%s %s", jobName, timeStr);
+
+ // NOTE: The "action" column of v$session is only a 32 character column.
+ // Therefore we need to ensure that the string returned by this
+ // method does not exceed 32 characters...
+ if (result.length() > 32) {
+ result = result.substring(0, 32).trim();
+ }
+
+ return result;
+ }
+
+ private void setMapperConnectionDetails(ConnectionConfig connectionConfig,
+ MutableContext context) {
+
+ // Query v$active_instances to get a list of all instances in the Oracle RAC
+ // (assuming this *could* be a RAC)...
+ List<OracleActiveInstance> activeInstances = null;
+ try {
+ activeInstances =
+ OracleQueries.getOracleActiveInstances(connection);
+ } catch (SQLException ex) {
+ throw new RuntimeException(
+ "An error was encountered when attempting to determine the "
+ + "configuration of the Oracle RAC.",
+ ex);
+ }
+
+ if (activeInstances == null) {
+ LOG.info("This Oracle database is not a RAC.");
+ } else {
+ LOG.info("This Oracle database is a RAC.");
+ }
+
+ // Is dynamic JDBC URL generation disabled?...
+ if (OracleUtilities.oracleJdbcUrlGenerationDisabled(connectionConfig)) {
+ LOG.info(String
+ .format(
+ "%s will not use dynamically generated JDBC URLs - this feature "
+ + "has been disabled.",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME));
+ return;
+ }
+
+ boolean generateRacBasedJdbcUrls = false;
+
+ // Decide whether this is a multi-instance RAC, and whether we need to do
+ // anything more...
+ if (activeInstances != null) {
+ generateRacBasedJdbcUrls = true;
+
+ if (activeInstances.size() < OracleJdbcConnectorConstants.
+ MIN_NUM_RAC_ACTIVE_INSTANCES_FOR_DYNAMIC_JDBC_URLS) {
+ LOG.info(String.format(
+ "There are only %d active instances in the Oracle RAC. "
+ + "%s will not bother utilizing dynamically generated JDBC URLs.",
+ activeInstances.size(),
+ OracleJdbcConnectorConstants.CONNECTOR_NAME));
+ generateRacBasedJdbcUrls = false;
+ }
+ }
+
+ // E.g. jdbc:oracle:thin:@localhost.localdomain:1521:orcl
+ String jdbcConnectStr = connectionConfig.connectionString;
+
+ // Parse the JDBC URL to obtain the port number for the TNS listener...
+ String jdbcHost = "";
+ int jdbcPort = 0;
+ String jdbcSid = "";
+ String jdbcService = "";
+ String jdbcTnsName = "";
+ try {
+
+ OracleJdbcUrl oraOopJdbcUrl = new OracleJdbcUrl(jdbcConnectStr);
+ OracleUtilities.JdbcOracleThinConnection jdbcConnection =
+ oraOopJdbcUrl.parseJdbcOracleThinConnectionString();
+ jdbcHost = jdbcConnection.getHost();
+ jdbcPort = jdbcConnection.getPort();
+ jdbcSid = jdbcConnection.getSid();
+ jdbcService = jdbcConnection.getService();
+ jdbcTnsName = jdbcConnection.getTnsName();
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ LOG.info(String.format(
+ "Unable to parse the JDBC connection URL \"%s\" as a connection "
+ + "that uses the Oracle 'thin' JDBC driver.\n"
+ + "This problem prevents %s from being able to dynamically generate "
+ + "JDBC URLs that specify 'dedicated server connections' or spread "
+ + "mapper sessions across multiple Oracle instances.\n"
+ + "If the JDBC driver-type is 'OCI' (instead of 'thin'), then "
+ + "load-balancing should be appropriately managed automatically.",
+ jdbcConnectStr, OracleJdbcConnectorConstants.CONNECTOR_NAME, ex));
+ return;
+ }
+
+ if (generateRacBasedJdbcUrls) {
+
+ // Retrieve the Oracle service name to use when connecting to the RAC...
+ String oracleServiceName = connectionConfig.racServiceName;
+
+ // Generate JDBC URLs for each of the mappers...
+ if (!oracleServiceName.isEmpty()) {
+ if (!generateRacJdbcConnectionUrlsByServiceName(jdbcHost, jdbcPort,
+ oracleServiceName, connectionConfig, context)) {
+ throw new RuntimeException(String.format(
+ "Unable to connect to the Oracle database at %s "
+ + "via the service name \"%s\".", jdbcConnectStr,
+ oracleServiceName));
+ }
+ } else {
+ generateJdbcConnectionUrlsByActiveInstance(activeInstances, jdbcPort,
+ connectionConfig, context);
+ }
+ } else {
+ generateJdbcConnectionUrlsByTnsnameSidOrService(jdbcHost, jdbcPort,
+ jdbcSid, jdbcService, jdbcTnsName, connectionConfig, context);
+ }
+
+ }
+
+ private boolean generateRacJdbcConnectionUrlsByServiceName(String hostName,
+ int port, String serviceName, ConnectionConfig connectionConfig,
+ MutableContext context) {
+
+ boolean result = false;
+ String jdbcUrl =
+ OracleUtilities.generateOracleServiceNameJdbcUrl(hostName, port,
+ serviceName);
+
+ if (testDynamicallyGeneratedOracleRacInstanceConnection(jdbcUrl,
+ connectionConfig.username, connectionConfig.password,
+ connectionConfig.jdbcProperties
+ , false // <- ShowInstanceSysTimestamp
+ , "" // <- instanceDescription
+ )) {
+
+ LOG.info(String.format(
+ "%s will load-balance sessions across the Oracle RAC instances "
+ + "by connecting each mapper to the Oracle Service \"%s\".",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME, serviceName));
+
+ // Now store these connection strings in such a way that each mapper knows
+ // which one to use...
+ for (int idxMapper = 0; idxMapper < numMappers; idxMapper++) {
+ storeJdbcUrlForMapper(idxMapper, jdbcUrl, context);
+ }
+ result = true;
+ }
+ return result;
+ }
+
+ private void generateJdbcConnectionUrlsByTnsnameSidOrService(String hostName,
+ int port, String sid, String serviceName, String tnsName,
+ ConnectionConfig connectionConfig, MutableContext context) {
+
+ String jdbcUrl = null;
+ if (tnsName != null && !tnsName.isEmpty()) {
+ jdbcUrl = OracleUtilities.generateOracleTnsNameJdbcUrl(tnsName);
+ } else if (sid != null && !sid.isEmpty()) {
+ jdbcUrl = OracleUtilities.generateOracleSidJdbcUrl(hostName, port, sid);
+ } else {
+ jdbcUrl =
+ OracleUtilities.generateOracleServiceNameJdbcUrl(hostName, port,
+ serviceName);
+ }
+
+ // Now store these connection strings in such a way that each mapper knows
+ // which one to use...
+ for (int idxMapper = 0; idxMapper < numMappers; idxMapper++) {
+ storeJdbcUrlForMapper(idxMapper, jdbcUrl, context);
+ }
+ }
+
+ private void
+ generateJdbcConnectionUrlsByActiveInstance(
+ List<OracleActiveInstance> activeInstances, int jdbcPort,
+ ConnectionConfig connectionConfig, MutableContext context) {
+
+ // Generate JDBC URLs for each of the instances in the RAC...
+ ArrayList<OracleUtilities.JdbcOracleThinConnection>
+ jdbcOracleActiveThinConnections =
+ new ArrayList<OracleUtilities.JdbcOracleThinConnection>(
+ activeInstances.size());
+
+ for (OracleActiveInstance activeInstance : activeInstances) {
+
+ OracleUtilities.JdbcOracleThinConnection
+ jdbcActiveInstanceThinConnection =
+ new OracleUtilities.JdbcOracleThinConnection(
+ activeInstance.getHostName(),
+ jdbcPort, activeInstance.getInstanceName(), "", "");
+
+ if (testDynamicallyGeneratedOracleRacInstanceConnection(
+ jdbcActiveInstanceThinConnection.toString(),
+ connectionConfig.username,
+ connectionConfig.password, connectionConfig.jdbcProperties,
+ true, activeInstance.getInstanceName())) {
+ jdbcOracleActiveThinConnections.add(jdbcActiveInstanceThinConnection);
+ }
+ }
+
+ // If there are multiple JDBC URLs that work okay for the RAC, then we'll
+ // make use of them...
+ if (jdbcOracleActiveThinConnections.size() < OracleJdbcConnectorConstants.
+ MIN_NUM_RAC_ACTIVE_INSTANCES_FOR_DYNAMIC_JDBC_URLS) {
+ LOG.info(String
+ .format(
+ "%s will not attempt to load-balance sessions across instances "
+ + "of an Oracle RAC - as multiple JDBC URLs to the "
+ + "Oracle RAC could not be dynamically generated.",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME));
+ return;
+ } else {
+ StringBuilder msg = new StringBuilder();
+ msg.append(String
+ .format(
+ "%s will load-balance sessions across the following instances of"
+ + "the Oracle RAC:\n",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME));
+
+ for (OracleUtilities.JdbcOracleThinConnection thinConnection
+ : jdbcOracleActiveThinConnections) {
+ msg.append(String.format("\tInstance: %s \t URL: %s\n",
+ thinConnection.getSid(), thinConnection.toString()));
+ }
+ LOG.info(msg.toString());
+ }
+
+ // Now store these connection strings in such a way that each mapper knows
+ // which one to use...
+ int racInstanceIdx = 0;
+ OracleUtilities.JdbcOracleThinConnection thinUrl;
+ for (int idxMapper = 0; idxMapper < numMappers; idxMapper++) {
+ if (racInstanceIdx > jdbcOracleActiveThinConnections.size() - 1) {
+ racInstanceIdx = 0;
+ }
+ thinUrl = jdbcOracleActiveThinConnections.get(racInstanceIdx);
+ racInstanceIdx++;
+ storeJdbcUrlForMapper(idxMapper, thinUrl.toString(), context);
+ }
+ }
+
+ private void storeJdbcUrlForMapper(int mapperIdx, String jdbcUrl,
+ MutableContext context) {
+
+ // Now store these connection strings in such a way that each mapper knows
+ // which one to use...
+ String mapperJdbcUrlPropertyName =
+ OracleUtilities.getMapperJdbcUrlPropertyName(mapperIdx);
+ LOG.debug("Setting mapper url " + mapperJdbcUrlPropertyName + " = "
+ + jdbcUrl);
+ context.setString(mapperJdbcUrlPropertyName, jdbcUrl);
+ }
+
+ private boolean testDynamicallyGeneratedOracleRacInstanceConnection(
+ String url, String userName, String password,
+ Map<String, String> jdbcProperties,
+ boolean showInstanceSysTimestamp, String instanceDescription) {
+
+ boolean result = false;
+
+ // Test the connection...
+ try {
+ Properties additionalProps = new Properties();
+ if(jdbcProperties != null) {
+ additionalProps.putAll(jdbcProperties);
+ }
+ Connection testConnection =
+ OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ url, userName, password, additionalProps);
+
+ // Show the system time on each instance...
+ if (showInstanceSysTimestamp) {
+ LOG.info(String.format("\tDatabase time on %s is %s",
+ instanceDescription, OracleQueries
+ .getSysTimeStamp(testConnection)));
+ }
+
+ testConnection.close();
+ result = true;
+ } catch (SQLException ex) {
+ LOG.warn(
+ String
+ .format(
+ "The dynamically generated JDBC URL \"%s\" was unable to "
+ + "connect to an instance in the Oracle RAC.",
+ url), ex);
+ }
+
+ return result;
+ }
+
+ private void showUserTheOracleCommandToKillOraOop(MutableContext context) {
+
+ String moduleName =
+ OracleJdbcConnectorConstants.ORACLE_SESSION_MODULE_NAME;
+ String actionName = context.getString(
+ OracleJdbcConnectorConstants.ORACLE_SESSION_ACTION_NAME);
+
+ String msg = String.format(
+ "\nNote: This %s job can be killed via Oracle by executing the "
+ + "following statement:\n\tbegin\n"
+ + "\t\tfor row in (select sid,serial# from v$session where module='%s' "
+ + "and action='%s') loop\n"
+ + "\t\t\texecute immediate 'alter system kill session ''' || row.sid || "
+ + "',' || row.serial# || '''';\n"
+ + "\t\tend loop;\n" + "\tend;",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME, moduleName, actionName);
+ LOG.info(msg);
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnector.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnector.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnector.java
new file mode 100644
index 0000000..ae0b9dc
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnector.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.util.Locale;
+import java.util.ResourceBundle;
+
+import org.apache.sqoop.common.Direction;
+import org.apache.sqoop.common.VersionInfo;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.spi.ConnectorConfigurableUpgrader;
+import org.apache.sqoop.connector.spi.SqoopConnector;
+import org.apache.sqoop.job.etl.From;
+import org.apache.sqoop.job.etl.To;
+
+public class OracleJdbcConnector extends SqoopConnector {
+
+ private static final To TO = new To(
+ OracleJdbcToInitializer.class,
+ OracleJdbcLoader.class,
+ OracleJdbcToDestroyer.class);
+
+ private static final From FROM = new From(
+ OracleJdbcFromInitializer.class,
+ OracleJdbcPartitioner.class,
+ OracleJdbcPartition.class,
+ OracleJdbcExtractor.class,
+ OracleJdbcFromDestroyer.class);
+
+ @Override
+ public String getVersion() {
+ return VersionInfo.getBuildVersion();
+ }
+
+ @Override
+ public ResourceBundle getBundle(Locale locale) {
+ return ResourceBundle.getBundle(
+ OracleJdbcConnectorConstants.RESOURCE_BUNDLE_NAME, locale);
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Override
+ public Class getLinkConfigurationClass() {
+ return LinkConfiguration.class;
+ }
+
+ @SuppressWarnings("rawtypes")
+ @Override
+ public Class getJobConfigurationClass(Direction jobType) {
+ switch (jobType) {
+ case FROM:
+ return FromJobConfiguration.class;
+ case TO:
+ return ToJobConfiguration.class;
+ default:
+ return null;
+ }
+ }
+
+ @Override
+ public From getFrom() {
+ return FROM;
+ }
+
+ @Override
+ public To getTo() {
+ return TO;
+ }
+
+ @Override
+ public ConnectorConfigurableUpgrader getConfigurableUpgrader(String oldConnectorVersion) {
+ return new OracleJdbcConnectorUpgrader();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorConstants.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorConstants.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorConstants.java
new file mode 100644
index 0000000..2215cf3
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorConstants.java
@@ -0,0 +1,493 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+public final class OracleJdbcConnectorConstants {
+
+ // Resource bundle name
+ public static final String RESOURCE_BUNDLE_NAME =
+ "oracle-jdbc-connector-config";
+
+ public static final String CONNECTOR_NAME = "Sqoop Oracle Connector";
+
+ // The string we want to pass to dbms_application_info.set_module() via the
+ // "module_name" parameter...
+ public static final String ORACLE_SESSION_MODULE_NAME = CONNECTOR_NAME;
+
+ public static final String ORACLE_SESSION_ACTION_NAME =
+ "oracle.session.module.action";
+
+ //How many rows to pre-fetch when executing Oracle queries...
+ public static final int ORACLE_ROW_FETCH_SIZE_DEFAULT = 5000;
+
+ // The name of the Oracle JDBC class...
+ public static final String ORACLE_JDBC_DRIVER_CLASS =
+ "oracle.jdbc.OracleDriver";
+
+ public static final String ORACLE_SESSION_INITIALIZATION_STATEMENTS_DEFAULT =
+ "alter session disable parallel query;" +
+ "alter session set \"_serial_direct_read\"=true;" +
+ "alter session set tracefile_identifier=oraoop;" +
+ "--alter session set events '10046 trace name context forever, level 8';";
+
+
+ /////////////////////////////////////////////////////////////////////
+
+// // Whether to log Oracle session statistics using Guy Harrison's jar file...
+// public static final String ORAOOP_REPORT_SESSION_STATISTICS =
+// "oraoop.report.session.statistics";
+//
+// // Disables dynamic JDBC URL generation for each mapper...
+// public static final String ORAOOP_JDBC_URL_VERBATIM =
+// "oraoop.jdbc.url.verbatim";
+//
+// // The name of the Oracle RAC service each mapper should connect to, via their
+// // dynamically generated JDBC URL...
+// public static final String ORAOOP_ORACLE_RAC_SERVICE_NAME =
+// "oraoop.oracle.rac.service.name";
+//
+// // The log4j log-level for OraOop...
+// public static final String ORAOOP_LOGGING_LEVEL = "oraoop.logging.level";
+//
+// // The file names for the configuration properties of OraOop...
+// public static final String ORAOOP_SITE_TEMPLATE_FILENAME =
+// "oraoop-site-template.xml";
+// public static final String ORAOOP_SITE_FILENAME = "oraoop-site.xml";
+//
+// // A flag that indicates that the OraOop job has been cancelled.
+// // E.g. An Oracle DBA killed our Oracle session.
+// // public static final String ORAOOP_JOB_CANCELLED = "oraoop.job.cancelled";
+//
+ // The SYSDATE from the Oracle database when this OraOop job was started.
+ // This is used to generate unique names for partitions and temporary tables
+ // that we create during the job...
+ public static final String SQOOP_ORACLE_JOB_SYSDATE =
+ "sqoop.oracle.job.sysdate";
+//
+// // The properties are used internally by OraOop to indicate the schema and
+// // name of
+// // the table being imported/exported...
+// public static final String ORAOOP_TABLE_OWNER = "oraoop.table.owner";
+// public static final String ORAOOP_TABLE_NAME = "oraoop.table.name";
+//
+// // Constants used to indicate the desired location of the WHERE clause within
+// // the SQL generated by the record-reader.
+// // E.g. A WHERE clause like "rownum <= 10" would want to be located so that
+// // it had an impact on the total number of rows returned by the split;
+// // as opposed to impacting the number of rows returned for each of the
+// // unioned data-chunks within each split.
+// public static final String ORAOOP_TABLE_IMPORT_WHERE_CLAUSE_LOCATION =
+// "oraoop.table.import.where.clause.location";
+//
+//
+// // Reliably stores the number mappers requested for the sqoop map-reduce
+// // job...
+// public static final String ORAOOP_DESIRED_NUMBER_OF_MAPPERS =
+// "oraoop.desired.num.mappers";
+//
+// // The minimum number of mappers required for OraOop to accept the import
+// // job...
+// public static final String ORAOOP_MIN_IMPORT_MAPPERS =
+// "oraoop.min.import.mappers";
+// public static final int MIN_NUM_IMPORT_MAPPERS_ACCEPTED_BY_ORAOOP = 2;
+//
+// // The minimum number of mappers required for OraOop to accept the export
+// // job...
+// public static final String ORAOOP_MIN_EXPORT_MAPPERS =
+// "oraoop.min.export.mappers";
+// public static final int MIN_NUM_EXPORT_MAPPERS_ACCEPTED_BY_ORAOOP = 2;
+//
+// // The query used to fetch oracle data chunks...
+// public static final String ORAOOP_ORACLE_DATA_CHUNKS_QUERY =
+// "oraoop.oracle.data.chunks.query";
+//
+ // The minimum number of active instances in an Oracle RAC required for OraOop
+ // to use dynamically generated JDBC URLs...
+ public static final int MIN_NUM_RAC_ACTIVE_INSTANCES_FOR_DYNAMIC_JDBC_URLS =
+ 2;
+//
+//
+//
+// // OraOop does not require a "--split-by" column to be defined...
+// public static final String TABLE_SPLIT_COLUMN_NOT_REQUIRED = "not-required";
+//
+ // The name of the data_chunk_id column the OraOop appends to each (import)
+ // query...
+ public static final String COLUMN_NAME_DATA_CHUNK_ID = "data_chunk_id";
+//
+// // The hint that will be used on the SELECT statement for import jobs
+// public static final String IMPORT_QUERY_HINT = "oraoop.import.hint";
+//
+// // Pseudo-columns added to an partitioned export table (created by OraOop from
+// // a template table)
+// // to store the partition value and subpartition value. The partition value is
+// // the sysdate when
+// // the job was performed. The subpartition value is the mapper index...
+ public static final String COLUMN_NAME_EXPORT_PARTITION =
+ "SQOOP_EXPORT_SYSDATE";
+ public static final String COLUMN_NAME_EXPORT_SUBPARTITION =
+ "SQOOP_MAPPER_ID";
+ public static final String COLUMN_NAME_EXPORT_MAPPER_ROW =
+ "SQOOP_MAPPER_ROW";
+
+ public static final String ORAOOP_EXPORT_PARTITION_DATE_VALUE =
+ "oraoop.export.partition.date.value";
+ public static final String ORAOOP_EXPORT_PARTITION_DATE_FORMAT =
+ "yyyy-mm-dd hh24:mi:ss";
+//
+//
+//
+// // Boolean whether to do a consistent read based off an SCN
+// public static final String ORAOOP_IMPORT_CONSISTENT_READ =
+// "oraoop.import.consistent.read";
+//
+ // The SCN number to use for the consistent read
+ public static final String ORACLE_IMPORT_CONSISTENT_READ_SCN =
+ "oracle.import.consistent.read.scn";
+//
+// // The method that will be used to create data chunks - ROWID ranges or
+// // partitions
+// public static final String ORAOOP_ORACLE_DATA_CHUNK_METHOD =
+// "oraoop.chunk.method";
+
+// // List of partitions to be imported, comma seperated list
+// public static final String ORAOOP_IMPORT_PARTITION_LIST =
+// "oraoop.import.partitions";
+//
+// public static final OraOopOracleDataChunkMethod
+// ORAOOP_ORACLE_DATA_CHUNK_METHOD_DEFAULT =
+// OraOopOracleDataChunkMethod.ROWID;
+//
+// // How to allocate data-chunks into splits...
+// public static final String ORAOOP_ORACLE_BLOCK_TO_SPLIT_ALLOCATION_METHOD =
+// "oraoop.block.allocation";
+//
+
+//
+// // Whether to omit LOB and LONG columns during an import...
+// public static final String ORAOOP_IMPORT_OMIT_LOBS_AND_LONG =
+// "oraoop.import.omit.lobs.and.long";
+//
+// // Identifies an existing Oracle table used to create a new table as the
+// // destination of a Sqoop export.
+// // Hence, use of this property implies that the "-table" does not exist in
+// // Oracle and OraOop should create it.
+// public static final String ORAOOP_EXPORT_CREATE_TABLE_TEMPLATE =
+// "oraoop.template.table";
+//
+// // If the table already exists that we want to create, should we drop it?...
+// public static final String ORAOOP_EXPORT_CREATE_TABLE_DROP =
+// "oraoop.drop.table";
+//
+// // If ORAOOP_EXPORT_CREATE_TABLE_TEMPLATE has been specified, then this flag
+// // indicates whether the created Oracle
+// // tables should have NOLOGGING...
+// public static final String ORAOOP_EXPORT_CREATE_TABLE_NO_LOGGING =
+// "oraoop.no.logging";
+//
+// // If ORAOOP_EXPORT_CREATE_TABLE_TEMPLATE has been specified, then this flag
+// // indicates whether the created Oracle
+// // tables should be partitioned by job and mapper...
+// public static final String ORAOOP_EXPORT_CREATE_TABLE_PARTITIONED =
+// "oraoop.partitioned";
+//
+ // Indicates (internally) the the export table we're dealling with has been
+ // paritioned by Sqoop...
+ public static final String EXPORT_TABLE_HAS_SQOOP_PARTITIONS =
+ "sqoop.export.table.has.sqoop.partitions";
+
+ // When using the Oracle hint... /* +APPEND_VALUES */ ...a commit must be
+ // performed after each batch insert.
+ // Therefore, the batches need to be quite large to avoid a performance
+ // penality (for the 'extra' commits).
+ // This is the minimum batch size to use under these conditions...
+// public static final String ORAOOP_MIN_APPEND_VALUES_BATCH_SIZE =
+// "oraoop.min.append.values.batch.size";
+ public static final int MIN_APPEND_VALUES_BATCH_SIZE_DEFAULT = 5000;
+//
+// // The version of the Oracle database we're connected to...
+// public static final String ORAOOP_ORACLE_DATABASE_VERSION_MAJOR =
+// "oraoop.oracle.database.version.major";
+// public static final String ORAOOP_ORACLE_DATABASE_VERSION_MINOR =
+// "oraoop.oracle.database.version.minor";
+//
+// // When OraOop creates a table for a Sqoop export (from a template table) and
+// // the table contains partitions,
+// // this is the prefix of those partition names. (This also allows us to later
+// // identify partitions that OraOop
+// // created.)
+ public static final String EXPORT_TABLE_PARTITION_NAME_PREFIX = "SQOOP_";
+
+ // When OraOop creates temporary tables for each mapper during a Sqoop export
+ // this is the prefix of table names...
+ public static final String EXPORT_MAPPER_TABLE_NAME_PREFIX = "SQOOP_";
+
+ // The format string used to turn a DATE into a string for use within the
+ // names of Oracle objects
+ // that we create. For example, temporary tables, table partitions, table
+ // subpartitions...
+ public static final String ORACLE_OBJECT_NAME_DATE_TO_STRING_FORMAT_STRING =
+ "yyyymmdd_hh24miss";
+
+// // Indicates whether to perform a "merge" operation when performing a Sqoop
+// // export.
+// // If false, 'insert' statements will be used (i.e. no 'updates')...
+// public static final String ORAOOP_EXPORT_MERGE = "oraoop.export.merge";
+//
+// // This property allows the user to enable parallelization during exports...
+// public static final String ORAOOP_EXPORT_PARALLEL =
+// "oraoop.export.oracle.parallelization.enabled";
+//
+// // Flag used to indicate that the Oracle table contains at least one column of
+// // type BINARY_DOUBLE...
+// public static final String TABLE_CONTAINS_BINARY_DOUBLE_COLUMN =
+// "oraoop.table.contains.binary.double.column";
+// // Flag used to indicate that the Oracle table contains at least one column of
+// // type BINARY_FLOAT...
+// public static final String TABLE_CONTAINS_BINARY_FLOAT_COLUMN =
+// "oraoop.table.contains.binary.float.column";
+//
+// // The storage clause to append to the end of any CREATE TABLE statements we
+// // execute for temporary Oracle tables...
+// public static final String ORAOOP_TEMPORARY_TABLE_STORAGE_CLAUSE =
+// "oraoop.temporary.table.storage.clause";
+//
+// // The storage clause to append to the end of any CREATE TABLE statements we
+// // execute for permanent (export) Oracle tables...
+// public static final String ORAOOP_EXPORT_TABLE_STORAGE_CLAUSE =
+// "oraoop.table.storage.clause";
+//
+// // Additional columns to include with the --update-key column...
+// public static final String ORAOOP_UPDATE_KEY_EXTRA_COLUMNS =
+// "oraoop.update.key.extra.columns";
+//
+// // Should OraOop map Timestamps as java.sql.Timestamp as Sqoop does, or as
+// // String
+// public static final String ORAOOP_MAP_TIMESTAMP_AS_STRING =
+// "oraoop.timestamp.string";
+// public static final boolean ORAOOP_MAP_TIMESTAMP_AS_STRING_DEFAULT = true;
+//
+// // This flag allows the user to force use of the APPEND_VALUES Oracle hint
+// // either ON, OFF or AUTO...
+// public static final String ORAOOP_ORACLE_APPEND_VALUES_HINT_USAGE =
+// "oraoop.oracle.append.values.hint.usage";
+//
+ /**
+ * Whether to use the append values hint for exports.
+ */
+ public enum AppendValuesHintUsage {
+ AUTO, ON, OFF
+ }
+
+ // http://download.oracle.com/docs/cd/E11882_01/server.112/e17118/
+ // sql_elements001.htm#i45441
+ public static final String SUPPORTED_IMPORT_ORACLE_DATA_TYPES_CLAUSE =
+ "(DATA_TYPE IN ("
+ +
+ // "'BFILE',"+
+ "'BINARY_DOUBLE',"
+ + "'BINARY_FLOAT',"
+ + "'BLOB',"
+ + "'CHAR',"
+ + "'CLOB',"
+ + "'DATE',"
+ + "'FLOAT',"
+ + "'LONG',"
+ +
+ // "'LONG RAW',"+
+ // "'MLSLABEL',"+
+ "'NCHAR',"
+ + "'NCLOB',"
+ + "'NUMBER',"
+ + "'NVARCHAR2',"
+ + "'RAW',"
+ + "'ROWID',"
+ +
+ // "'UNDEFINED',"+
+ "'URITYPE',"
+ +
+ // "'UROWID',"+ //<- SqlType = 1111 = "OTHER" Not supported as
+ // "AAAAACAADAAAAAEAAF" is being returned as "AAAAAAgADAAAA"
+ "'VARCHAR2'"
+ + // <- Columns declared as VARCHAR are listed as VARCHAR2 in
+ // dba_tabl_columns
+ // "'XMLTYPE',"+
+ ")" + " OR DATA_TYPE LIKE 'INTERVAL YEAR(%) TO MONTH'"
+ + " OR DATA_TYPE LIKE 'INTERVAL DAY(%) TO SECOND(%)'"
+ + " OR DATA_TYPE LIKE 'TIMESTAMP(%)'"
+ + " OR DATA_TYPE LIKE 'TIMESTAMP(%) WITH TIME ZONE'"
+ + " OR DATA_TYPE LIKE 'TIMESTAMP(%) WITH LOCAL TIME ZONE'" + ")";
+
+ public static final String SUPPORTED_EXPORT_ORACLE_DATA_TYPES_CLAUSE =
+ "(DATA_TYPE IN ("
+ +
+ // "'BFILE',"+
+ "'BINARY_DOUBLE',"
+ + "'BINARY_FLOAT',"
+ +
+ // "'BLOB',"+ //<- Jira: SQOOP-117 Sqoop cannot export LOB data
+ "'CHAR',"
+ +
+ // "'CLOB',"+ //<- Jira: SQOOP-117 Sqoop cannot export LOB data
+ "'DATE',"
+ + "'FLOAT',"
+ +
+ // "'LONG',"+ //<- "create table as select..." and
+ // "insert into table as select..." do not work when a long column
+ // exists.
+ // "'LONG RAW',"+
+ // "'MLSLABEL',"+
+ "'NCHAR',"
+ +
+ // "'NCLOB',"+ //<- Jira: SQOOP-117 Sqoop cannot export LOB data
+ "'NUMBER',"
+ + "'NVARCHAR2',"
+ +
+ // "'RAW',"+
+ "'ROWID',"
+ +
+ // "'UNDEFINED',"+
+ "'URITYPE',"
+ +
+ // "'UROWID',"+ //<- SqlType = 1111 = "OTHER" Not supported as
+ // "AAAAACAADAAAAAEAAF" is being returned as "AAAAAAgADAAAA"
+ "'VARCHAR2'"
+ + // <- Columns declared as VARCHAR are listed as VARCHAR2 in
+ // dba_tabl_columns
+ // "'XMLTYPE',"+
+ ")" + " OR DATA_TYPE LIKE 'INTERVAL YEAR(%) TO MONTH'"
+ + " OR DATA_TYPE LIKE 'INTERVAL DAY(%) TO SECOND(%)'"
+ + " OR DATA_TYPE LIKE 'TIMESTAMP(%)'"
+ + " OR DATA_TYPE LIKE 'TIMESTAMP(%) WITH TIME ZONE'"
+ + " OR DATA_TYPE LIKE 'TIMESTAMP(%) WITH LOCAL TIME ZONE'" + ")";
+
+// // Query to get current logged on user
+// public static final String QUERY_GET_SESSION_USER = "SELECT USER FROM DUAL";
+//
+// // public static final int[] SUPPORTED_ORACLE_DATA_TYPES = {
+// // oracle.jdbc.OracleTypes.BIT // -7;
+// // ,oracle.jdbc.OracleTypes.TINYINT // -6;
+// // ,oracle.jdbc.OracleTypes.SMALLINT // 5;
+// // ,oracle.jdbc.OracleTypes.INTEGER // 4;
+// // ,oracle.jdbc.OracleTypes.BIGINT // -5;
+// // ,oracle.jdbc.OracleTypes.FLOAT // 6;
+// // ,oracle.jdbc.OracleTypes.REAL // 7;
+// // ,oracle.jdbc.OracleTypes.DOUBLE // 8;
+// // ,oracle.jdbc.OracleTypes.NUMERIC // 2;
+// // ,oracle.jdbc.OracleTypes.DECIMAL // 3;
+// // ,oracle.jdbc.OracleTypes.CHAR // 1;
+// // ,oracle.jdbc.OracleTypes.VARCHAR // 12;
+// // ,oracle.jdbc.OracleTypes.LONGVARCHAR // -1;
+// // ,oracle.jdbc.OracleTypes.DATE // 91;
+// // ,oracle.jdbc.OracleTypes.TIME // 92;
+// // ,oracle.jdbc.OracleTypes.TIMESTAMP // 93;
+// // // ,oracle.jdbc.OracleTypes.TIMESTAMPNS // -100; //<- Deprecated
+// // ,oracle.jdbc.OracleTypes.TIMESTAMPTZ // -101;
+// // ,oracle.jdbc.OracleTypes.TIMESTAMPLTZ // -102;
+// // ,oracle.jdbc.OracleTypes.INTERVALYM // -103;
+// // ,oracle.jdbc.OracleTypes.INTERVALDS // -104;
+// // ,oracle.jdbc.OracleTypes.BINARY // -2;
+// // /// ,oracle.jdbc.OracleTypes.VARBINARY // -3;
+// // ,oracle.jdbc.OracleTypes.LONGVARBINARY // -4;
+// // ,oracle.jdbc.OracleTypes.ROWID // -8;
+// // ,oracle.jdbc.OracleTypes.CURSOR // -10;
+// // ,oracle.jdbc.OracleTypes.BLOB // 2004;
+// // ,oracle.jdbc.OracleTypes.CLOB // 2005;
+// // // ,oracle.jdbc.OracleTypes.BFILE // -13;
+// // // ,oracle.jdbc.OracleTypes.STRUCT // 2002;
+// // // ,oracle.jdbc.OracleTypes.ARRAY // 2003;
+// // ,oracle.jdbc.OracleTypes.REF // 2006;
+// // ,oracle.jdbc.OracleTypes.NCHAR // -15;
+// // ,oracle.jdbc.OracleTypes.NCLOB // 2011;
+// // ,oracle.jdbc.OracleTypes.NVARCHAR // -9;
+// // ,oracle.jdbc.OracleTypes.LONGNVARCHAR // -16;
+// // // ,oracle.jdbc.OracleTypes.SQLXML // 2009;
+// // // ,oracle.jdbc.OracleTypes.OPAQUE // 2007;
+// // // ,oracle.jdbc.OracleTypes.JAVA_STRUCT // 2008;
+// // // ,oracle.jdbc.OracleTypes.JAVA_OBJECT // 2000;
+// // // ,oracle.jdbc.OracleTypes.PLSQL_INDEX_TABLE // -14;
+// // ,oracle.jdbc.OracleTypes.BINARY_FLOAT // 100;
+// // ,oracle.jdbc.OracleTypes.BINARY_DOUBLE // 101;
+// // ,oracle.jdbc.OracleTypes.NULL // 0;
+// // ,oracle.jdbc.OracleTypes.NUMBER // 2;
+// // // ,oracle.jdbc.OracleTypes.RAW // -2;
+// // // ,oracle.jdbc.OracleTypes.OTHER // 1111;
+// // ,oracle.jdbc.OracleTypes.FIXED_CHAR // 999;
+// // // ,oracle.jdbc.OracleTypes.DATALINK // 70;
+// // ,oracle.jdbc.OracleTypes.BOOLEAN // 16;
+// // };
+//
+// /**
+// * Constants for things belonging to sqoop...
+// */
+// public static final class Sqoop {
+// private Sqoop() {
+// }
+//
+// /**
+// * What type of Sqoop tool is being run.
+// */
+// public enum Tool {
+// UNKNOWN, IMPORT, EXPORT
+// }
+//
+// public static final String IMPORT_TOOL_NAME = "import";
+// public static final String MAX_MAPREDUCE_ATTEMPTS =
+// "mapred.map.max.attempts";
+// }
+//
+/**
+ * Constants for things belonging to Oracle...
+ */
+ public static final class Oracle {
+ private Oracle() {
+ }
+
+ public static final int ROWID_EXTENDED_ROWID_TYPE = 1;
+ public static final int ROWID_MAX_ROW_NUMBER_PER_BLOCK = 32767;
+
+ // This is how you comment-out a line of SQL text in Oracle.
+ public static final String ORACLE_SQL_STATEMENT_COMMENT_TOKEN = "--";
+
+ public static final String OBJECT_TYPE_TABLE = "TABLE";
+
+ public static final String URITYPE = "URITYPE";
+
+ public static final int MAX_IDENTIFIER_LENGTH = 30; // <- Max length of an
+ // Oracle name
+ // (table-name,
+ // partition-name etc.)
+
+ public static final String HINT_SYNTAX = "/*+ %s */ "; // Syntax for a hint
+ // in Oracle
+ }
+//
+// /**
+// * Logging constants.
+// */
+// public static class Logging {
+// /**
+// * Level of log to output.
+// */
+// public enum Level {
+// TRACE, DEBUG, INFO, WARN, ERROR, FATAL
+// }
+// }
+//
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorUpgrader.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorUpgrader.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorUpgrader.java
new file mode 100644
index 0000000..30693af
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcConnectorUpgrader.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import org.apache.sqoop.configurable.ConfigurableUpgradeUtil;
+import org.apache.sqoop.connector.spi.ConnectorConfigurableUpgrader;
+import org.apache.sqoop.model.MFromConfig;
+import org.apache.sqoop.model.MLinkConfig;
+import org.apache.sqoop.model.MToConfig;
+
+// NOTE: All config types have the similar upgrade path at this point
+public class OracleJdbcConnectorUpgrader extends ConnectorConfigurableUpgrader {
+
+ @Override
+ public void upgradeLinkConfig(MLinkConfig original, MLinkConfig upgradeTarget) {
+ ConfigurableUpgradeUtil.doUpgrade(original.getConfigs(), upgradeTarget.getConfigs());
+ }
+
+ @Override
+ public void upgradeFromJobConfig(MFromConfig original, MFromConfig upgradeTarget) {
+ ConfigurableUpgradeUtil.doUpgrade(original.getConfigs(), upgradeTarget.getConfigs());
+ }
+
+ @Override
+ public void upgradeToJobConfig(MToConfig original, MToConfig upgradeTarget) {
+ ConfigurableUpgradeUtil.doUpgrade(original.getConfigs(), upgradeTarget.getConfigs());
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcExtractor.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcExtractor.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcExtractor.java
new file mode 100644
index 0000000..df15fc2
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcExtractor.java
@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.common.ImmutableContext;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleDataChunk;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTableColumn;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTableColumns;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.job.etl.Extractor;
+import org.apache.sqoop.job.etl.ExtractorContext;
+import org.apache.sqoop.schema.type.Column;
+import org.apache.sqoop.schema.type.ColumnType;
+import org.joda.time.DateTime;
+import org.joda.time.LocalDateTime;
+import org.joda.time.format.DateTimeFormat;
+import org.joda.time.format.DateTimeFormatter;
+
+public class OracleJdbcExtractor extends
+ Extractor<LinkConfiguration, FromJobConfiguration, OracleJdbcPartition> {
+
+ private static final Logger LOG = Logger.getLogger(OracleJdbcExtractor.class);
+
+ private Connection connection;
+ private OracleTable table;
+ private int mapperId; // <- The index of this Hadoop mapper
+ private long rowsRead = 0;
+
+ private OracleTableColumns tableColumns;
+
+ private OracleJdbcPartition dbInputSplit; // <- The split this record-reader
+ // is working on.
+ private int numberOfBlocksInThisSplit; // <- The number of Oracle blocks in
+ // this Oracle data-chunk.
+ private int numberOfBlocksProcessedInThisSplit; // <- How many Oracle blocks
+ // we've processed with this
+ // record-reader.
+ private String currentDataChunkId; // <- The id of the current data-chunk
+ // being processed
+ private ResultSet results; // <- The ResultSet containing the data from the
+ // query returned by getSelectQuery()
+ private int columnIndexDataChunkIdZeroBased = -1; // <- The zero-based column
+ // index of the
+ // data_chunk_id column.
+ private boolean progressCalculationErrorLogged; // <- Whether we've logged a
+ // problem with the progress
+ // calculation during
+ // nextKeyValue().
+ private Object oraOopOraStats; // <- A reference to the Oracle statistics
+ // object that is being tracked for this Oracle
+ // session.
+ private boolean profilingEnabled; // <- Whether to collect profiling metrics
+ private long timeSpentInNextKeyValueInNanoSeconds; // <- Total time spent in
+ // super.nextKeyValue()
+
+ private static final DateTimeFormatter TIMESTAMP_TIMEZONE =
+ DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS z");
+
+ @Override
+ public void extract(ExtractorContext context,
+ LinkConfiguration linkConfiguration,
+ FromJobConfiguration jobConfiguration, OracleJdbcPartition partition) {
+ //TODO: Mapper ID
+ mapperId = 1;
+ dbInputSplit = partition;
+
+ // Retrieve the JDBC URL that should be used by this mapper.
+ String mapperJdbcUrlPropertyName =
+ OracleUtilities.getMapperJdbcUrlPropertyName(mapperId);
+ String mapperJdbcUrl = context.getString(mapperJdbcUrlPropertyName, null);
+
+ LOG.debug(String.format("Mapper %d has a JDBC URL of: %s", mapperId,
+ mapperJdbcUrl == null ? "<null>" : mapperJdbcUrl));
+
+ try {
+ connection = OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ mapperJdbcUrl,
+ linkConfiguration.connectionConfig.username,
+ linkConfiguration.connectionConfig.password);
+ } catch (SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to connect to the Oracle database at %s\nError:%s",
+ linkConfiguration.connectionConfig.connectionString, ex
+ .getMessage()), ex);
+ }
+
+ table = OracleUtilities.decodeOracleTableName(
+ linkConfiguration.connectionConfig.username,
+ jobConfiguration.fromJobConfig.tableName);
+
+ try {
+ String thisOracleInstanceName =
+ OracleQueries.getCurrentOracleInstanceName(connection);
+
+ LOG.info(String.format(
+ "This record reader is connected to Oracle via the JDBC URL: \n"
+ + "\t\"%s\"\n" + "\tto the Oracle instance: \"%s\"", mapperJdbcUrl,
+ thisOracleInstanceName));
+
+ OracleConnectionFactory.initializeOracleConnection(
+ connection, linkConfiguration.connectionConfig);
+ } catch(SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to initialize connection to the Oracle database at %s\n"
+ + "Error:%s",
+ linkConfiguration.connectionConfig.connectionString, ex
+ .getMessage()), ex);
+ }
+
+ try {
+ tableColumns =
+ OracleQueries.getFromTableColumns(connection, table, OracleUtilities.
+ omitLobAndLongColumnsDuringImport(jobConfiguration.fromJobConfig),
+ true // <- onlyOraOopSupportedTypes
+ );
+ } catch (SQLException ex) {
+ LOG.error(String.format(
+ "Unable to obtain the data-types of the columns in table %s.\n"
+ + "Error:\n%s", table.toString(), ex.getMessage()));
+ throw new RuntimeException(ex);
+ }
+
+ this.numberOfBlocksInThisSplit =
+ this.dbInputSplit.getTotalNumberOfBlocksInThisSplit();
+ this.numberOfBlocksProcessedInThisSplit = 0;
+
+ extractData(context, jobConfiguration.fromJobConfig);
+
+ try {
+ connection.close();
+ } catch(SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to close connection to the Oracle database at %s\nError:%s",
+ linkConfiguration.connectionConfig.connectionString, ex
+ .getMessage()), ex);
+ }
+ }
+
+ private Object getObjectAtName(ResultSet resultSet,
+ OracleTableColumn column, Column sqoopColumn) throws SQLException {
+ Object result = null;
+ if(sqoopColumn.getType() == ColumnType.TEXT) {
+ result = resultSet.getString(column.getName());
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("TIMESTAMP")) {
+ Timestamp timestamp = resultSet.getTimestamp(column.getName());
+ if(timestamp!=null) {
+ result = LocalDateTime.fromDateFields(timestamp);
+ }
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("TIMESTAMPTZ")
+ || column.getOracleType() == OracleQueries
+ .getOracleType("TIMESTAMPLTZ")) {
+ Timestamp timestamp = resultSet.getTimestamp(column.getName());
+ if(timestamp!=null) {
+ //TODO: BC dates
+ String dateTimeStr = resultSet.getString(column.getName());
+ result = DateTime.parse(dateTimeStr, TIMESTAMP_TIMEZONE);
+ }
+ } else {
+ result = resultSet.getObject(column.getName());
+ }
+ return result;
+ }
+
+ private void extractData(ExtractorContext context, FromJobConfig jobConfig) {
+ String sql = getSelectQuery(jobConfig, context.getContext());
+ Column[] columns = context.getSchema().getColumnsArray();
+ int columnCount = columns.length;
+ try {
+ PreparedStatement statement = connection.prepareStatement(sql);
+ ResultSet resultSet = statement.executeQuery();
+
+ while(resultSet.next()) {
+ Object[] array = new Object[columnCount];
+ for(int i = 0; i < columnCount; i++) {
+ OracleTableColumn tableColumn =
+ tableColumns.findColumnByName(columns[i].getName());
+ array[i] = getObjectAtName(resultSet, tableColumn, columns[i]);
+ }
+ context.getDataWriter().writeArrayRecord(array);
+ rowsRead++;
+ }
+
+ resultSet.close();
+ statement.close();
+ } catch (SQLException ex) {
+ LOG.error(String.format("Error in %s while executing the SQL query:\n"
+ + "%s\n\n" + "%s", OracleUtilities.getCurrentMethodName(), sql, ex
+ .getMessage()));
+ throw new RuntimeException(ex);
+ }
+ }
+
+ @Override
+ public long getRowsRead() {
+ return rowsRead;
+ }
+
+ private String getSelectQuery(FromJobConfig jobConfig,
+ ImmutableContext context) {
+
+ boolean consistentRead = BooleanUtils.isTrue(jobConfig.consistentRead);
+ long consistentReadScn = context.getLong(
+ OracleJdbcConnectorConstants.ORACLE_IMPORT_CONSISTENT_READ_SCN, 0L);
+ if (consistentRead && consistentReadScn == 0L) {
+ throw new RuntimeException("Could not get SCN for consistent read.");
+ }
+
+ StringBuilder query = new StringBuilder();
+
+ if (this.dbInputSplit.getDataChunks() == null) {
+ String errMsg =
+ String.format("The %s does not contain any data-chunks, within %s.",
+ this.dbInputSplit.getClass().getName(), OracleUtilities
+ .getCurrentMethodName());
+ throw new RuntimeException(errMsg);
+ }
+
+ OracleUtilities.OracleTableImportWhereClauseLocation whereClauseLocation =
+ OracleUtilities.getTableImportWhereClauseLocation(jobConfig,
+ OracleUtilities.OracleTableImportWhereClauseLocation.SUBSPLIT);
+
+ int numberOfDataChunks = this.dbInputSplit.getNumberOfDataChunks();
+ for (int idx = 0; idx < numberOfDataChunks; idx++) {
+
+ OracleDataChunk dataChunk =
+ this.dbInputSplit.getDataChunks().get(idx);
+
+ if (idx > 0) {
+ query.append("UNION ALL \n");
+ }
+
+ query.append(getColumnNamesClause(tableColumns,
+ dataChunk.getId(), jobConfig)) // <- SELECT clause
+ .append("\n");
+
+ query.append(" FROM ").append(table.toString()).append(" ");
+
+ if (consistentRead) {
+ query.append("AS OF SCN ").append(consistentReadScn).append(" ");
+ }
+
+ query.append(getPartitionClauseForDataChunk(this.dbInputSplit, idx))
+ .append(" t").append("\n");
+
+ query.append(" WHERE (").append(
+ getWhereClauseForDataChunk(this.dbInputSplit, idx)).append(")\n");
+
+ // If the user wants the WHERE clause applied to each data-chunk...
+ if (whereClauseLocation == OracleUtilities.
+ OracleTableImportWhereClauseLocation.SUBSPLIT) {
+ String conditions = jobConfig.conditions;
+ if (conditions != null && conditions.length() > 0) {
+ query.append(" AND (").append(conditions).append(")\n");
+ }
+ }
+
+ }
+
+ // If the user wants the WHERE clause applied to the whole split...
+ if (whereClauseLocation == OracleUtilities.
+ OracleTableImportWhereClauseLocation.SPLIT) {
+ String conditions = jobConfig.conditions;
+ if (conditions != null && conditions.length() > 0) {
+
+ // Insert a "select everything" line at the start of the SQL query...
+ query.insert(0, getColumnNamesClause(tableColumns, null, jobConfig) +
+ " FROM (\n");
+
+ // ...and then apply the WHERE clause to all the UNIONed sub-queries...
+ query.append(")\n").append("WHERE\n").append(conditions).append("\n");
+ }
+ }
+
+ LOG.info("SELECT QUERY = \n" + query.toString());
+
+ return query.toString();
+ }
+
+ private String getColumnNamesClause(OracleTableColumns tableColumns,
+ String dataChunkId, FromJobConfig jobConfig) {
+
+ StringBuilder result = new StringBuilder();
+
+ result.append("SELECT ");
+ result.append(OracleUtilities.getImportHint(jobConfig));
+
+ int firstFieldIndex = 0;
+ int lastFieldIndex = tableColumns.size();
+ for (int i = firstFieldIndex; i < lastFieldIndex; i++) {
+ if (i > firstFieldIndex) {
+ result.append(",");
+ }
+
+ OracleTableColumn oracleTableColumn = tableColumns.get(i);
+ String fieldName = oracleTableColumn.getName();
+ if (oracleTableColumn != null) {
+ if (oracleTableColumn.getDataType().equals(
+ OracleJdbcConnectorConstants.Oracle.URITYPE)) {
+ fieldName = String.format("uritype.geturl(%s) %s", fieldName,
+ fieldName);
+ }
+ }
+
+ result.append(fieldName);
+ }
+ // We need to insert the value of that data_chunk_id now...
+ if (dataChunkId != null && !dataChunkId.isEmpty()) {
+ String fieldName =
+ String.format(",'%s' %s", dataChunkId,
+ OracleJdbcConnectorConstants.COLUMN_NAME_DATA_CHUNK_ID);
+ result.append(fieldName);
+ }
+ return result.toString();
+ }
+
+ private String getPartitionClauseForDataChunk(OracleJdbcPartition split,
+ int dataChunkIndex) {
+ OracleDataChunk dataChunk = split.getDataChunks().get(dataChunkIndex);
+ return dataChunk.getPartitionClause();
+ }
+
+ private String getWhereClauseForDataChunk(OracleJdbcPartition split,
+ int dataChunkIndex) {
+
+ OracleDataChunk dataChunk = split.getDataChunks().get(dataChunkIndex);
+ return dataChunk.getWhereClause();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromDestroyer.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromDestroyer.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromDestroyer.java
new file mode 100644
index 0000000..bd6fd0a
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromDestroyer.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.job.etl.Destroyer;
+import org.apache.sqoop.job.etl.DestroyerContext;
+
+public class OracleJdbcFromDestroyer extends
+ Destroyer<LinkConfiguration, FromJobConfiguration> {
+
+ @Override
+ public void destroy(DestroyerContext context,
+ LinkConfiguration linkConfiguration,
+ FromJobConfiguration jobConfiguration) {
+ // TODO Auto-generated method stub
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromInitializer.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromInitializer.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromInitializer.java
new file mode 100644
index 0000000..62a0e84
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcFromInitializer.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.sql.SQLException;
+
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.job.etl.InitializerContext;
+
+public class OracleJdbcFromInitializer extends
+ OracleJdbcCommonInitializer<FromJobConfiguration> {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleJdbcFromInitializer.class);
+
+ @Override
+ public void connect(InitializerContext context,
+ LinkConfiguration linkConfiguration,
+ FromJobConfiguration jobConfiguration) throws SQLException {
+ super.connect(context, linkConfiguration, jobConfiguration);
+ table = OracleUtilities.decodeOracleTableName(
+ linkConfiguration.connectionConfig.username,
+ jobConfiguration.fromJobConfig.tableName);
+ }
+
+ @Override
+ public void initialize(InitializerContext context,
+ LinkConfiguration linkConfiguration,
+ FromJobConfiguration jobConfiguration) {
+ super.initialize(context, linkConfiguration, jobConfiguration);
+ LOG.debug("Running Oracle JDBC connector FROM initializer");
+
+ try {
+ if(OracleQueries.isTableAnIndexOrganizedTable(connection, table)) {
+ if(OracleUtilities.getOraOopOracleDataChunkMethod(
+ jobConfiguration.fromJobConfig) !=
+ OracleUtilities.OracleDataChunkMethod.PARTITION) {
+ throw new RuntimeException(String.format("Cannot process this Sqoop"
+ + " connection, as the Oracle table %s is an"
+ + " index-organized table. If the table is"
+ + " partitioned, set the data chunk method to "
+ + OracleUtilities.OracleDataChunkMethod.PARTITION
+ + ".",
+ table.toString()));
+ }
+ }
+ } catch (SQLException e) {
+ throw new RuntimeException(String.format(
+ "Unable to determine whether the Oracle table %s is an"
+ + "index-organized table.", table.toString()), e);
+ }
+
+ if(BooleanUtils.isTrue(jobConfiguration.fromJobConfig.consistentRead)) {
+ Long scn = jobConfiguration.fromJobConfig.consistentReadScn;
+ if(scn==null || scn.equals(Long.valueOf(0L))) {
+ try {
+ scn = OracleQueries.getCurrentScn(connection);
+ } catch(SQLException e) {
+ throw new RuntimeException("Unable to determine SCN of database.",
+ e);
+ }
+ }
+ context.getContext().setLong(
+ OracleJdbcConnectorConstants.ORACLE_IMPORT_CONSISTENT_READ_SCN,
+ scn);
+ LOG.info("Performing a consistent read using SCN: " + scn);
+ }
+ }
+
+}
[2/6] sqoop git commit: SQOOP-2595: Add Oracle connector to Sqoop 2
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleUtilities.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleUtilities.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleUtilities.java
new file mode 100644
index 0000000..4219af2
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleUtilities.java
@@ -0,0 +1,1446 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.log4j.Category;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.common.ImmutableContext;
+import org.apache.sqoop.connector.jdbc.oracle.OracleJdbcConnectorConstants;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ConnectionConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfig;
+
+/**
+ * Utilities used by OraOop.
+ */
+public final class OracleUtilities {
+
+ private static final Logger LOG = Logger.getLogger(OracleUtilities.class);
+
+ private OracleUtilities() {
+ }
+
+ private static String currentSessionUser = null;
+
+ /**
+ * Type of export - straight update or merge (update-insert).
+ */
+ public enum UpdateMode {
+ Update, Merge
+ }
+
+ /**
+ * Type of insert to use - direct or partition exchange load.
+ */
+ public enum InsertMode {
+ DirectInsert, ExchangePartition
+ }
+
+ public enum ExportTableUpdateTechnique {
+ ReInsertUpdatedRows, ReInsertUpdatedRowsAndNewRows, UpdateSql, MergeSql
+ }
+
+ /**
+ * How should data be split up - by ROWID range, or by partition.
+ */
+ public enum OracleDataChunkMethod {
+ ROWID, PARTITION
+ }
+
+ /**
+ * How splits should be allocated to the mappers.
+ */
+ public enum OracleBlockToSplitAllocationMethod {
+ ROUNDROBIN, SEQUENTIAL, RANDOM
+ }
+
+ /**
+ * Location to place the WHERE clause.
+ */
+ public enum OracleTableImportWhereClauseLocation {
+ SUBSPLIT, SPLIT
+ }
+
+// /**
+// * Used for testing purposes - can get OraOop to call a class to run a report
+// * on various performance metrics.
+// */
+// public static class OraOopStatsReports {
+// private String csvReport;
+// private String performanceReport;
+//
+// public String getPerformanceReport() {
+// return performanceReport;
+// }
+//
+// public void setPerformanceReport(String newPerformanceReport) {
+// this.performanceReport = newPerformanceReport;
+// }
+//
+// public String getCsvReport() {
+// return csvReport;
+// }
+//
+// public void setCsvReport(String newCsvReport) {
+// this.csvReport = newCsvReport;
+// }
+// }
+//
+//
+//
+// public static List<String> copyStringList(List<String> list) {
+//
+// List<String> result = new ArrayList<String>(list.size());
+// result.addAll(list);
+// return result;
+// }
+
+ public static OracleTable decodeOracleTableName(
+ String oracleConnectionUserName, String tableStr) {
+
+ String tableOwner;
+ String tableName;
+
+ // These are the possibilities for double-quote location...
+ // table
+ // "table"
+ // schema.table
+ // schema."table"
+ // "schema".table
+ // "schema"."table"
+ String[] tableStrings = tableStr.split("\"");
+
+ if (oracleConnectionUserName == null) {
+ oracleConnectionUserName = currentSessionUser;
+ }
+
+ switch (tableStrings.length) {
+
+ case 1: // <- table or schema.table
+
+ tableStrings = tableStr.split("\\.");
+
+ switch (tableStrings.length) {
+
+ case 1: // <- No period
+ tableOwner = oracleConnectionUserName.toUpperCase();
+ tableName = tableStrings[0].toUpperCase();
+ break;
+ case 2: // <- 1 period
+ tableOwner = tableStrings[0].toUpperCase();
+ tableName = tableStrings[1].toUpperCase();
+ break;
+ default:
+ LOG.debug(String.format(
+ "Unable to decode the table name (displayed in "
+ + "double quotes): \"%s\"", tableStr));
+ throw new RuntimeException(String.format(
+ "Unable to decode the table name: %s", tableStr));
+ }
+ break;
+
+ case 2: // <- "table" or schema."table"
+
+ if (tableStrings[0] == null || tableStrings[0].isEmpty()) {
+ tableOwner = oracleConnectionUserName.toUpperCase();
+ } else {
+ tableOwner = tableStrings[0].toUpperCase();
+ // Remove the "." from the end of the schema name...
+ if (tableOwner.endsWith(".")) {
+ tableOwner = tableOwner.substring(0, tableOwner.length() - 1);
+ }
+ }
+
+ tableName = tableStrings[1];
+ break;
+
+ case 3: // <- "schema".table
+
+ tableOwner = tableStrings[1];
+ tableName = tableStrings[2].toUpperCase();
+ // Remove the "." from the start of the table name...
+ if (tableName.startsWith(".")) {
+ tableName = tableName.substring(1, tableName.length());
+ }
+
+ break;
+
+ case 4: // <- "schema"."table"
+ tableOwner = tableStrings[1];
+ tableName = tableStrings[3];
+ break;
+
+ default:
+ LOG.debug(String.format(
+ "Unable to decode the table name (displayed in double "
+ + "quotes): \"%s\"", tableStr));
+ throw new RuntimeException(String.format(
+ "Unable to decode the table name: %s", tableStr));
+
+ }
+ OracleTable result = new OracleTable(tableOwner, tableName);
+ return result;
+ }
+
+
+ public static boolean oracleJdbcUrlGenerationDisabled(ConnectionConfig config) {
+ return BooleanUtils.isTrue(config.jdbcUrlVerbatim);
+ }
+
+// public static boolean userWantsOracleSessionStatisticsReports(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// return conf.getBoolean(OraOopConstants.ORAOOP_REPORT_SESSION_STATISTICS,
+// false);
+// }
+//
+// public static boolean enableDebugLoggingIfRequired(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// boolean result = false;
+//
+// try {
+//
+// Level desiredOraOopLoggingLevel =
+// Level.toLevel(conf.get(OraOopConstants.ORAOOP_LOGGING_LEVEL),
+// Level.INFO);
+//
+// Level sqoopLogLevel =
+// Logger.getLogger(Sqoop.class.getName()).getParent().getLevel();
+//
+// if (desiredOraOopLoggingLevel == Level.DEBUG
+// || desiredOraOopLoggingLevel == Level.ALL
+// || sqoopLogLevel == Level.DEBUG || sqoopLogLevel == Level.ALL) {
+//
+// Category oraOopLogger =
+// Logger.getLogger(OraOopManagerFactory.class.getName()).getParent();
+// oraOopLogger.setLevel(Level.DEBUG);
+// LOG.debug("Enabled OraOop debug logging.");
+// result = true;
+//
+// conf.set(OraOopConstants.ORAOOP_LOGGING_LEVEL, Level.DEBUG.toString());
+// }
+// } catch (Exception ex) {
+// LOG.error(String.format(
+// "Unable to determine whether debug logging should be enabled.\n%s",
+// getFullExceptionMessage(ex)));
+// }
+//
+// return result;
+// }
+//
+ public static String generateDataChunkId(int fileId, int fileBatch) {
+ StringBuilder sb = new StringBuilder();
+ return sb.append(fileId).append("_").append(fileBatch).toString();
+ }
+
+ public static String getCurrentMethodName() {
+
+ StackTraceElement[] stackTraceElements = (new Throwable()).getStackTrace();
+ return String.format("%s()", stackTraceElements[1].getMethodName());
+ }
+
+// public static String[] getDuplicatedStringArrayValues(String[] list,
+// boolean ignoreCase) {
+//
+// if (list == null) {
+// throw new IllegalArgumentException("The list argument cannot be null");
+// }
+//
+// ArrayList<String> duplicates = new ArrayList<String>();
+//
+// for (int idx1 = 0; idx1 < list.length - 1; idx1++) {
+// for (int idx2 = idx1 + 1; idx2 < list.length; idx2++) {
+// if (list[idx1].equals(list[idx2])) {
+// // If c is a duplicate of both a & b, don't add c to the list twice...
+// if (!duplicates.contains(list[idx2])) {
+// duplicates.add(list[idx2]);
+// }
+//
+// } else if (ignoreCase && list[idx1].equalsIgnoreCase((list[idx2]))) {
+// // If c is a duplicate of both a & b, don't add c to the list twice...
+// if (stringListIndexOf(duplicates, list[idx2], ignoreCase) == -1) {
+// duplicates.add(list[idx2]);
+// }
+// }
+// }
+// }
+//
+// return duplicates.toArray(new String[duplicates.size()]);
+// }
+
+ public static String getFullExceptionMessage(Exception ex) {
+
+ ByteArrayOutputStream arrayStream = new ByteArrayOutputStream();
+ PrintStream printStream = new PrintStream(arrayStream);
+ ex.printStackTrace(printStream);
+ return arrayStream.toString();
+ }
+
+// public static int getMinNumberOfImportMappersAcceptedByOraOop(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// return conf.getInt(OraOopConstants.ORAOOP_MIN_IMPORT_MAPPERS,
+// OraOopConstants.MIN_NUM_IMPORT_MAPPERS_ACCEPTED_BY_ORAOOP);
+// }
+//
+// public static int getMinAppendValuesBatchSize(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// return conf.getInt(OraOopConstants.ORAOOP_MIN_APPEND_VALUES_BATCH_SIZE,
+// OraOopConstants.ORAOOP_MIN_APPEND_VALUES_BATCH_SIZE_DEFAULT);
+// }
+//
+// public static int getMinNumberOfExportMappersAcceptedByOraOop(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// return conf.getInt(OraOopConstants.ORAOOP_MIN_EXPORT_MAPPERS,
+// OraOopConstants.MIN_NUM_EXPORT_MAPPERS_ACCEPTED_BY_ORAOOP);
+// }
+//
+// public static int getMinNumberOfOracleRacActiveInstancesForDynamicJdbcUrlUse(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// return conf.getInt(OraOopConstants.ORAOOP_MIN_RAC_ACTIVE_INSTANCES,
+// OraOopConstants.MIN_NUM_RAC_ACTIVE_INSTANCES_FOR_DYNAMIC_JDBC_URLS);
+// }
+//
+// public static int getNumberOfDataChunksPerOracleDataFile(
+// int desiredNumberOfMappers, org.apache.hadoop.conf.Configuration conf) {
+//
+// final String MAPPER_MULTIPLIER = "oraoop.datachunk.mapper.multiplier";
+// final String RESULT_INCREMENT = "oraoop.datachunk.result.increment";
+//
+// int numberToMultiplyMappersBy = conf.getInt(MAPPER_MULTIPLIER, 2);
+// int numberToIncrementResultBy = conf.getInt(RESULT_INCREMENT, 1);
+//
+// // The number of chunks generated will *not* be a multiple of the number of
+// // splits,
+// // to ensure that each split doesn't always get data from the start of each
+// // data-file...
+// int numberOfDataChunksPerOracleDataFile =
+// (desiredNumberOfMappers * numberToMultiplyMappersBy)
+// + numberToIncrementResultBy;
+//
+// LOG.debug(String.format("%s:\n" + "\t%s=%d\n" + "\t%s=%d\n"
+// + "\tdesiredNumberOfMappers=%d\n" + "\tresult=%d",
+// getCurrentMethodName(), MAPPER_MULTIPLIER, numberToMultiplyMappersBy,
+// RESULT_INCREMENT, numberToIncrementResultBy, desiredNumberOfMappers,
+// numberOfDataChunksPerOracleDataFile));
+//
+// return numberOfDataChunksPerOracleDataFile;
+// }
+
+ public static OracleDataChunkMethod
+ getOraOopOracleDataChunkMethod(FromJobConfig jobConfig) {
+
+ String strMethod = jobConfig.dataChunkMethod;
+ if (strMethod == null) {
+ return OracleDataChunkMethod.ROWID;
+ }
+
+ OracleDataChunkMethod result;
+
+ try {
+ strMethod = strMethod.toUpperCase().trim();
+ result = OracleDataChunkMethod.valueOf(strMethod);
+ } catch (IllegalArgumentException ex) {
+ result = OracleDataChunkMethod.ROWID;
+ LOG.error("An invalid value of \"" + strMethod
+ + "\" was specified for the data chunk method "
+ + "configuration property value.\n" + "\tThe default value of "
+ + OracleDataChunkMethod.ROWID
+ + " will be used.");
+ }
+ return result;
+ }
+
+ public static
+ OracleBlockToSplitAllocationMethod getOracleBlockToSplitAllocationMethod(
+ FromJobConfig jobConfig,
+ OracleBlockToSplitAllocationMethod defaultMethod) {
+
+ String strMethod = jobConfig.dataChunkAllocationMethod;
+ if (strMethod == null) {
+ return defaultMethod;
+ }
+
+ OracleBlockToSplitAllocationMethod result;
+
+ try {
+ strMethod = strMethod.toUpperCase().trim();
+ result =
+ OracleBlockToSplitAllocationMethod
+ .valueOf(strMethod);
+ } catch (IllegalArgumentException ex) {
+ result = defaultMethod;
+
+ String errorMsg =
+ String
+ .format(
+ "An invalid value of \"%s\" was specified for the data chunk "
+ + " allocation method configuration property value.\n"
+ + "\tValid values are: %s\n"
+ + "\tThe default value of %s will be used.",
+ strMethod,
+ getOracleBlockToSplitAllocationMethods(), defaultMethod.name());
+ LOG.error(errorMsg);
+ }
+
+ return result;
+ }
+
+ private static String getOracleBlockToSplitAllocationMethods() {
+
+ OracleBlockToSplitAllocationMethod[] values =
+ OracleBlockToSplitAllocationMethod.values();
+
+ StringBuilder result =
+ new StringBuilder((2 * values.length) - 1); // <- Include capacity
+ // for commas
+
+ for (int idx = 0; idx < values.length; idx++) {
+ OracleBlockToSplitAllocationMethod value =
+ values[idx];
+ if (idx > 0) {
+ result.append(" or ");
+ }
+ result.append(value.name());
+ }
+ return result.toString();
+ }
+
+ public static OracleTableImportWhereClauseLocation
+ getTableImportWhereClauseLocation(
+ FromJobConfig jobConfig,
+ OracleTableImportWhereClauseLocation defaultLocation) {
+
+ String strLocation = jobConfig.whereClauseLocation;
+
+ if (strLocation == null) {
+ return defaultLocation;
+ }
+
+ OracleTableImportWhereClauseLocation result;
+
+ try {
+ strLocation = strLocation.toUpperCase().trim();
+ result =
+ OracleTableImportWhereClauseLocation
+ .valueOf(strLocation);
+ } catch (IllegalArgumentException ex) {
+ result = defaultLocation;
+
+ String errorMsg =
+ String
+ .format(
+ "An invalid value of \"%s\"was specified for the "
+ + "where clause location configuration property value.\n"
+ + "\tValid values are: %s\n"
+ + "\tThe default value of %s will be used.", strLocation,
+ getOracleTableImportWhereClauseLocations(), defaultLocation
+ .name());
+ LOG.error(errorMsg);
+ }
+
+ return result;
+ }
+
+ private static String getOracleTableImportWhereClauseLocations() {
+
+ OracleTableImportWhereClauseLocation[] locationValues =
+ OracleTableImportWhereClauseLocation.values();
+
+ StringBuilder result =
+ new StringBuilder((2 * locationValues.length) - 1); // <- Include
+ // capacity for
+ // commas
+
+ for (int idx = 0; idx < locationValues.length; idx++) {
+ OracleTableImportWhereClauseLocation locationValue =
+ locationValues[idx];
+ if (idx > 0) {
+ result.append(" or ");
+ }
+ result.append(locationValue.name());
+ }
+ return result.toString();
+ }
+
+// public static String getOutputDirectory(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// String workingDir = conf.get("mapred.working.dir");
+// String outputDir = conf.get("mapred.output.dir");
+//
+// return workingDir + "/" + outputDir;
+// }
+//
+// public static String padLeft(String s, int n) {
+// return StringUtils.leftPad(s, n);
+// }
+//
+// public static String padRight(String s, int n) {
+// return StringUtils.rightPad(s, n);
+// }
+//
+// public static String replaceConfigurationExpression(String str,
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// int startPos = str.indexOf('{');
+// int endPos = str.indexOf('}');
+//
+// // Example:
+// // alter session set timezone = '{oracle.sessionTimeZone|GMT}';
+//
+// if (startPos == -1 || endPos == -1) {
+// return str;
+// }
+//
+// String configName = null;
+// String defaultValue = null;
+//
+// String expression = str.substring(startPos + 1, endPos);
+// int defaultValuePos = expression.indexOf('|');
+// if (defaultValuePos == -1) {
+// // return expression;
+// configName = expression;
+// } else {
+// configName = expression.substring(0, defaultValuePos);
+// defaultValue = expression.substring(defaultValuePos + 1);
+// }
+//
+// if (defaultValue == null) {
+// defaultValue = "";
+// }
+//
+// String configValue = conf.get(configName);
+// if (configValue == null) {
+// configValue = defaultValue;
+// }
+//
+// String result = str.replace(String.format("{%s}", expression), configValue);
+//
+// LOG.debug(String.format("The expression:\n%s\nwas replaced with:\n%s", str,
+// result));
+//
+// // Recurse to evaluate any other expressions...
+// result = replaceConfigurationExpression(result, conf);
+//
+// return result;
+// }
+//
+// public static boolean stackContainsClass(String className) {
+//
+// StackTraceElement[] stackTraceElements = (new Throwable()).getStackTrace();
+// for (StackTraceElement stackTraceElement : stackTraceElements) {
+// if (stackTraceElement.getClassName().equalsIgnoreCase(className)) {
+// return true;
+// }
+// }
+//
+// return false;
+// }
+//
+// public static Object startSessionSnapshot(Connection connection) {
+//
+// Object result = null;
+// try {
+//
+// Class<?> oraOopOraStatsClass =
+// Class.forName("quest.com.oraOop.oracleStats.OraOopOraStats");
+// Method startSnapshotMethod =
+// oraOopOraStatsClass.getMethod("startSnapshot", Connection.class);
+// if (connection != null) {
+// result = startSnapshotMethod.invoke(null, connection);
+// }
+// } catch (ClassNotFoundException ex) {
+// throw new RuntimeException(ex);
+// } catch (NoSuchMethodException ex) {
+// throw new RuntimeException(ex);
+// } catch (InvocationTargetException ex) {
+// throw new RuntimeException(ex);
+// } catch (IllegalAccessException ex) {
+// throw new RuntimeException(ex);
+// }
+//
+// return result;
+// }
+//
+// public static OraOopStatsReports stopSessionSnapshot(Object oraOopOraStats) {
+//
+// OraOopStatsReports result = new OraOopStatsReports();
+//
+// if (oraOopOraStats == null) {
+// return result;
+// }
+//
+// try {
+//
+// Class<?> oraOopOraStatsClass =
+// Class.forName("quest.com.oraOop.oracleStats.OraOopOraStats");
+// Method finalizeSnapshotMethod =
+// oraOopOraStatsClass.getMethod("finalizeSnapshot", (Class<?>[]) null);
+// finalizeSnapshotMethod.invoke(oraOopOraStats, (Object[]) null);
+//
+// Method performanceReportCsvMethod =
+// oraOopOraStatsClass.getMethod("getStatisticsCSV", (Class<?>[]) null);
+// result.setCsvReport((String) performanceReportCsvMethod.invoke(
+// oraOopOraStats, (Object[]) null));
+//
+// Method performanceReportMethod =
+// oraOopOraStatsClass.getMethod("performanceReport", (Class<?>[]) null);
+// result.setPerformanceReport((String) performanceReportMethod.invoke(
+// oraOopOraStats, (Object[]) null));
+// } catch (ClassNotFoundException ex) {
+// throw new RuntimeException(ex);
+// } catch (NoSuchMethodException ex) {
+// throw new RuntimeException(ex);
+// } catch (InvocationTargetException ex) {
+// throw new RuntimeException(ex);
+// } catch (IllegalAccessException ex) {
+// throw new RuntimeException(ex);
+// }
+//
+// return result;
+// }
+//
+ public static boolean stringArrayContains(String[] list, String value,
+ boolean ignoreCase) {
+
+ return stringArrayIndexOf(list, value, ignoreCase) > -1;
+ }
+
+ public static int stringArrayIndexOf(String[] list, String value,
+ boolean ignoreCase) {
+
+ for (int idx = 0; idx < list.length; idx++) {
+ if (list[idx].equals(value)) {
+ return idx;
+ }
+ if (ignoreCase && list[idx].equalsIgnoreCase(value)) {
+ return idx;
+ }
+ }
+ return -1;
+ }
+
+ public static String stringArrayToCSV(String[] list) {
+
+ return stringArrayToCSV(list, "");
+ }
+
+ public static String
+ stringArrayToCSV(String[] list, String encloseValuesWith) {
+
+ StringBuilder result = new StringBuilder((list.length * 2) - 1);
+ for (int idx = 0; idx < list.length; idx++) {
+ if (idx > 0) {
+ result.append(",");
+ }
+ result
+ .append(String.format("%1$s%2$s%1$s", encloseValuesWith, list[idx]));
+ }
+ return result.toString();
+ }
+
+// public static int stringListIndexOf(List<String> list, String value,
+// boolean ignoreCase) {
+//
+// for (int idx = 0; idx < list.size(); idx++) {
+// if (list.get(idx).equals(value)) {
+// return idx;
+// }
+// if (ignoreCase && list.get(idx).equalsIgnoreCase(value)) {
+// return idx;
+// }
+// }
+// return -1;
+// }
+//
+// public static void writeOutputFile(org.apache.hadoop.conf.Configuration conf,
+// String fileName, String fileText) {
+//
+// Path uniqueFileName = null;
+// try {
+// FileSystem fileSystem = FileSystem.get(conf);
+//
+// // NOTE: This code is not thread-safe.
+// // i.e. A race-condition could still cause this code to 'fail'.
+//
+// int suffix = 0;
+// String fileNameTemplate = fileName + "%s";
+// while (true) {
+// uniqueFileName =
+// new Path(getOutputDirectory(conf), String.format(fileNameTemplate,
+// suffix == 0 ? "" : String.format(" (%d)", suffix)));
+// if (!fileSystem.exists(uniqueFileName)) {
+// break;
+// }
+// suffix++;
+// }
+//
+// FSDataOutputStream outputStream =
+// fileSystem.create(uniqueFileName, false);
+// if (fileText != null) {
+// outputStream.writeBytes(fileText);
+// }
+// outputStream.flush();
+// outputStream.close();
+// } catch (IOException ex) {
+// LOG.error(String.format("Error attempting to write the file %s\n" + "%s",
+// (uniqueFileName == null ? "null" : uniqueFileName.toUri()),
+// getFullExceptionMessage(ex)));
+// }
+// }
+//
+ /**
+ * Class to wrap details about Oracle connection string.
+ */
+ public static class JdbcOracleThinConnection {
+ private String host;
+ private int port;
+ private String sid;
+ private String service;
+ private String tnsName;
+
+ public JdbcOracleThinConnection(String host, int port, String sid,
+ String service, String tnsName) {
+ this.host = host;
+ this.port = port;
+ this.sid = sid;
+ this.service = service;
+ this.tnsName = tnsName;
+ }
+
+ @Override
+ public String toString() {
+ // Use tnsName if it is available
+ if (this.tnsName != null && !this.tnsName.isEmpty()) {
+ return String.format("jdbc:oracle:thin:@%s", tnsName);
+ }
+
+ // Use the SID if it's available...
+ if (this.sid != null && !this.sid.isEmpty()) {
+ return String.format("jdbc:oracle:thin:@%s:%d:%s", this.host,
+ this.port, this.sid);
+ }
+
+ // Otherwise, use the SERVICE. Note that the service is prefixed by "/",
+ // not by ":"...
+ if (this.service != null && !this.service.isEmpty()) {
+ return String.format("jdbc:oracle:thin:@%s:%d/%s", this.host,
+ this.port, this.service);
+ }
+
+ throw new RuntimeException(
+ "Unable to generate a JDBC URL, as no TNS name, SID or SERVICE "
+ + "has been provided.");
+
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public int getPort() {
+ return port;
+ }
+
+ public String getSid() {
+ return sid;
+ }
+
+ public String getService() {
+ return service;
+ }
+
+ public String getTnsName() {
+ return tnsName;
+ }
+ }
+
+ /**
+ * Thrown if the Oracle connection string cannot be parsed.
+ */
+ public static class JdbcOracleThinConnectionParsingError extends Exception {
+
+ private static final long serialVersionUID = 1559860600099354233L;
+
+ public JdbcOracleThinConnectionParsingError(String message) {
+
+ super(message);
+ }
+
+ public JdbcOracleThinConnectionParsingError(String message,
+ Throwable cause) {
+
+ super(message, cause);
+ }
+
+ public JdbcOracleThinConnectionParsingError(Throwable cause) {
+
+ super(cause);
+ }
+ }
+
+// public static String getOracleServiceName(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// return conf.get(OraOopConstants.ORAOOP_ORACLE_RAC_SERVICE_NAME, "");
+// }
+//
+ public static String generateOracleSidJdbcUrl(String hostName, int port,
+ String sid) {
+
+ return String.format("jdbc:oracle:thin:@(DESCRIPTION=" + "(ADDRESS_LIST="
+ + "(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%d))" + ")"
+ + "(CONNECT_DATA=(SERVER=DEDICATED)(SID=%s))" + ")", hostName, port,
+ sid);
+ }
+
+ public static String generateOracleServiceNameJdbcUrl(String hostName,
+ int port, String serviceName) {
+
+ return String.format("jdbc:oracle:thin:@(DESCRIPTION=" + "(ADDRESS_LIST="
+ + "(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%d))" + ")"
+ + "(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=%s))" + ")", hostName,
+ port, serviceName);
+ }
+
+ public static String generateOracleTnsNameJdbcUrl(String tnsName) {
+ return String.format("jdbc:oracle:thin:@%s", tnsName);
+ }
+
+ public static String getMapperJdbcUrlPropertyName(int mapperId) {
+ return String.format("oraoop.mapper.jdbc.url.%d", mapperId);
+ }
+//
+// public static final String SQOOP_JOB_TYPE = "oraoop.sqoop.job.type";
+//
+// public static void rememberSqoopJobType(OraOopConstants.Sqoop.Tool jobType,
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// conf.set(SQOOP_JOB_TYPE, jobType.name());
+// }
+//
+// public static OraOopConstants.Sqoop.Tool recallSqoopJobType(
+// org.apache.hadoop.conf.Configuration conf) {
+//
+// String jobType = conf.get(SQOOP_JOB_TYPE);
+// if (jobType == null || jobType.isEmpty()) {
+// throw new RuntimeException(
+// "RecallSqoopJobType() cannot be called unless RememberSqoopJobType() "
+// + "has been used.");
+// }
+//
+// OraOopConstants.Sqoop.Tool result =
+// OraOopConstants.Sqoop.Tool.valueOf(jobType);
+// return result;
+// }
+
+ public static boolean omitLobAndLongColumnsDuringImport(
+ FromJobConfig jobConfig) {
+ return BooleanUtils.isTrue(jobConfig.omitLobColumns);
+ }
+
+// public static boolean oracleSessionHasBeenKilled(Exception exception) {
+//
+// Throwable ex = exception;
+//
+// while (ex != null) {
+// if (ex instanceof SQLException
+// && ((SQLException) ex).getErrorCode() == 28) { // ORA-00028: your
+// // session has been
+// // killed
+// return true;
+// }
+//
+// ex = ex.getCause();
+// }
+//
+// return false;
+// }
+//
+ private static String
+ formatTimestampForOracleObjectName(Object oracleDateTime) {
+
+ // NOTE: Update decodeTimestampFromOracleObjectName() if you modify this
+ // method.
+
+ String jobTimeStr =
+ OracleQueries.oraDATEToString(oracleDateTime,
+ OracleJdbcConnectorConstants.
+ ORACLE_OBJECT_NAME_DATE_TO_STRING_FORMAT_STRING);
+
+ return jobTimeStr;
+
+ // E.g. 20101028_151000 (15 characters)
+ }
+
+ private static Object decodeTimestampFromOracleObjectName(
+ String oracleObjectNameTimestampFragment) {
+
+ String dateString = oracleObjectNameTimestampFragment;
+ String dateFormatString = OracleJdbcConnectorConstants.
+ ORACLE_OBJECT_NAME_DATE_TO_STRING_FORMAT_STRING;
+
+ // return oracle.sql.DATE.fromText(oracleObjectNameTimestampFragment
+ // ,OraOopConstants.ORACLE_OBJECT_NAME_DATE_TO_STRING_FORMAT_STRING
+ // ,null);
+
+ /*
+ * Unfortunately, we don't seem to be able to reliably decode strings into
+ * DATE objects using Oracle. For example, the following string will cause
+ * Oracle to throw an "Invalid Oracle date" exception, due to the time
+ * portion starting with a zero...
+ * oracle.sql.DATE.fromText("20101123 091554", "yyyymmdd hh24miss", null);
+ *
+ * Therefore, we need to manually deconstruct the date string and insert
+ * some colons into the time so that Oracle can decode the string. (This is
+ * therefore an Oracle bug we're working around.)
+ */
+
+ try {
+ String year = oracleObjectNameTimestampFragment.substring(0, 4);
+ String month = oracleObjectNameTimestampFragment.substring(4, 6);
+ String day = oracleObjectNameTimestampFragment.substring(6, 8);
+ String hour = oracleObjectNameTimestampFragment.substring(9, 11);
+ String minute = oracleObjectNameTimestampFragment.substring(11, 13);
+ String second = oracleObjectNameTimestampFragment.substring(13, 15);
+ dateString =
+ String.format("%s/%s/%s %s:%s:%s", year, month, day, hour, minute,
+ second);
+ dateFormatString = "yyyy/mm/dd hh24:mi:ss";
+
+ return OracleQueries.oraDATEFromString(
+ dateString, dateFormatString);
+ } catch (Exception ex) {
+ LOG.debug(String.format(
+ "%s could not convert the string \"%s\" into a DATE via the format "
+ + "string \"%s\".\n" + "The error encountered was:\n%s",
+ getCurrentMethodName(), dateString, dateFormatString,
+ getFullExceptionMessage(ex)));
+
+ return null;
+ }
+ }
+
+ public static String createExportTablePartitionNameFromOracleTimestamp(
+ Object oracleDateTime) {
+
+ // Partition name can be up to 30 characters long and must start with a
+ // letter...
+ return OracleJdbcConnectorConstants.EXPORT_TABLE_PARTITION_NAME_PREFIX
+ + formatTimestampForOracleObjectName(oracleDateTime);
+
+ // E.g. ORAOOP_20101028_151000 (22 characters)
+ }
+
+ public static String createExportTableNamePrefixFromOracleTimestamp(
+ Object oracleDateTime) {
+
+ // NOTE: Alter decodeExportTableNamePrefix() if you modify this method.
+
+ // Table name can be 30 characters long and must start with a letter...
+ return OracleJdbcConnectorConstants.EXPORT_MAPPER_TABLE_NAME_PREFIX
+ + formatTimestampForOracleObjectName(oracleDateTime);
+ // G1.ORAOOP_20101028_152500 (22 characters) (This is just the prefix,
+ // append "_3" for mapper 4)
+ }
+
+ public static Object decodeExportTableNamePrefix(String tableNamePrefix) {
+
+ if (tableNamePrefix == null || tableNamePrefix.isEmpty()) {
+ return null;
+ }
+
+ if (!tableNamePrefix.startsWith(OracleJdbcConnectorConstants.
+ EXPORT_MAPPER_TABLE_NAME_PREFIX)) {
+ return null;
+ }
+
+ String formattedTimestamp =
+ tableNamePrefix.substring(OracleJdbcConnectorConstants.
+ EXPORT_MAPPER_TABLE_NAME_PREFIX.length(),
+ tableNamePrefix.length());
+
+ return decodeTimestampFromOracleObjectName(formattedTimestamp);
+ }
+
+ private static boolean userWantsToCreateExportTableFromTemplate(
+ ToJobConfig jobConfig) {
+ String exportTableTemplate = jobConfig.templateTable;
+ if (StringUtils.isEmpty(exportTableTemplate)) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ public static boolean enableOracleParallelProcessingDuringExport(
+ ToJobConfig jobConfig) {
+ return BooleanUtils.isTrue(jobConfig.parallel);
+ }
+
+ public static boolean userWantsToCreatePartitionedExportTableFromTemplate(
+ ToJobConfig jobConfig) {
+ return userWantsToCreateExportTableFromTemplate(jobConfig)
+ && BooleanUtils.isTrue(jobConfig.partitioned);
+ }
+
+ public static boolean userWantsToCreateNonPartitionedExportTableFromTemplate(
+ ToJobConfig jobConfig) {
+ return userWantsToCreateExportTableFromTemplate(jobConfig)
+ && !BooleanUtils.isTrue(jobConfig.partitioned);
+ }
+
+ public static String generateExportTableSubPartitionName(int mapperId,
+ Object sysDateTime) {
+
+ String partitionName =
+ createExportTablePartitionNameFromOracleTimestamp(sysDateTime);
+
+ String subPartitionName = String.format("%s_MAP_%d" // <- Should allow for
+ // 1,000 mappers before
+ // exceeding 30
+ // characters
+ , partitionName // <- Partition name is 22 characters
+ , mapperId);
+
+ // Check the length of the name...
+ if (subPartitionName.length()
+ > OracleJdbcConnectorConstants.Oracle.MAX_IDENTIFIER_LENGTH) {
+ throw new RuntimeException(
+ String
+ .format(
+ "The generated Oracle subpartition name \"%s\" is longer "
+ + "than %d characters.",
+ subPartitionName,
+ OracleJdbcConnectorConstants.Oracle.MAX_IDENTIFIER_LENGTH));
+ }
+
+ return subPartitionName;
+ }
+
+ public static String[] generateExportTableSubPartitionNames(int numMappers,
+ Object sysDateTime) {
+
+ String[] result = new String[numMappers];
+ for (int idx = 0; idx < numMappers; idx++) {
+ result[idx] = generateExportTableSubPartitionName(idx, sysDateTime);
+ }
+
+ return result;
+ }
+
+ public static OracleTable generateExportTableMapperTableName(int mapperId,
+ Object sysDateTime, String schema) {
+ //mapperId: should allow 10,000,000 mappers before it exceeds 30 characters.
+ return generateExportTableMapperTableName(Integer.toString(mapperId)
+ , sysDateTime, schema);
+ }
+
+ public static OracleTable generateExportTableMapperTableName(
+ String mapperSuffix, Object sysDateTime, String schema) {
+
+ // NOTE: Update decodeExportTableMapperTableName() if you alter this method.
+
+ // Generate a (22 character) prefix to use for the N tables that need to be
+ // created for the N mappers to insert into...
+ String mapperTableNamePrefix =
+ createExportTableNamePrefixFromOracleTimestamp(sysDateTime);
+
+ // Generate the name...
+ String tableName = String.format("%s_%s", mapperTableNamePrefix // <- 22
+ // chars
+ , mapperSuffix);
+
+ // Check the length of the name...
+ if (tableName.length() > OracleJdbcConnectorConstants.
+ Oracle.MAX_IDENTIFIER_LENGTH) {
+ throw new RuntimeException(
+ String
+ .format(
+ "The generated Oracle table name \"%s\" is longer than "
+ + "%d characters.",
+ tableName, OracleJdbcConnectorConstants.
+ Oracle.MAX_IDENTIFIER_LENGTH));
+ }
+
+ return new OracleTable(schema, tableName);
+ }
+
+ /**
+ * Class to wrap the table name to be used for the mapper.
+ */
+ public static class DecodedExportMapperTableName {
+ private String mapperId; // <- This is not an int, because it might be "CHG"
+ // in the case of a "changes-table".
+ private Object tableDateTime;
+
+ public String getMapperId() {
+ return mapperId;
+ }
+
+ public void setMapperId(String newMapperId) {
+ this.mapperId = newMapperId;
+ }
+
+ public Object getTableDateTime() {
+ return tableDateTime;
+ }
+
+ public void setTableDateTime(Object newTableDateTime) {
+ this.tableDateTime = newTableDateTime;
+ }
+ }
+
+ public static DecodedExportMapperTableName decodeExportTableMapperTableName(
+ OracleTable oracleTable) {
+
+ DecodedExportMapperTableName result = null;
+ try {
+ int lastUnderScoreIndex = oracleTable.getName().lastIndexOf("_");
+ if (lastUnderScoreIndex == -1) {
+ return result;
+ }
+
+ String dateFragment =
+ oracleTable.getName().substring(0, lastUnderScoreIndex);
+ String mapperIdFragment =
+ oracleTable.getName().substring(lastUnderScoreIndex + 1,
+ oracleTable.getName().length());
+
+ Object sysDateTime = decodeExportTableNamePrefix(dateFragment);
+ if (sysDateTime != null) {
+ result = new DecodedExportMapperTableName();
+ result.setTableDateTime(sysDateTime);
+ result.setMapperId(mapperIdFragment);
+ }
+ } catch (Exception ex) {
+ LOG.debug(
+ String.format(
+ "Error when attempting to decode the export mapper-table name \"%s\".",
+ oracleTable.toString()), ex);
+ }
+ return result;
+ }
+
+// public static void rememberOracleDateTime(
+// org.apache.hadoop.conf.Configuration conf, String propertyName,
+// String dateTime) {
+// conf.set(propertyName, dateTime);
+// }
+//
+ public static Object recallOracleDateTime(ImmutableContext context) {
+
+ String dateTimeStr = context.getString(
+ OracleJdbcConnectorConstants.SQOOP_ORACLE_JOB_SYSDATE);
+ if (dateTimeStr == null || dateTimeStr.isEmpty()) {
+ throw new RuntimeException(String.format(
+ "Unable to recall the value of the property \"%s\".",
+ OracleJdbcConnectorConstants.SQOOP_ORACLE_JOB_SYSDATE));
+ }
+
+ return OracleQueries.oraDATEFromString(dateTimeStr,
+ "yyyy-mm-dd hh24:mi:ss");
+ }
+
+ public static UpdateMode getExportUpdateMode(ToJobConfig jobConfig) {
+ UpdateMode updateMode = UpdateMode.Update;
+
+ if (BooleanUtils.isTrue(jobConfig.updateMerge)) {
+ updateMode = UpdateMode.Merge;
+ }
+
+ return updateMode;
+ }
+
+ public static InsertMode getExportInsertMode(ToJobConfig jobConfig,
+ ImmutableContext context) {
+
+ InsertMode result = InsertMode.DirectInsert;
+
+ if (OracleUtilities
+ .userWantsToCreatePartitionedExportTableFromTemplate(jobConfig)
+ || context.getBoolean(OracleJdbcConnectorConstants.
+ EXPORT_TABLE_HAS_SQOOP_PARTITIONS, false)) {
+ result = InsertMode.ExchangePartition;
+ }
+
+ return result;
+ }
+
+// public static String getJavaClassPath() {
+//
+// return System.getProperty("java.class.path");
+// }
+
+ public static String replaceAll(String inputString, String textToReplace,
+ String replaceWith) {
+
+ String result = inputString.replaceAll(textToReplace, replaceWith);
+ if (!result.equals(inputString)) {
+ result = replaceAll(result, textToReplace, replaceWith);
+ }
+
+ return result;
+ }
+
+ public static String getTemporaryTableStorageClause(ToJobConfig jobConfig) {
+
+ String result = jobConfig.temporaryStorageClause;
+ if (result == null) {
+ result = "";
+ }
+ return result;
+ }
+
+ public static String getExportTableStorageClause(ToJobConfig jobConfig) {
+
+ String result = jobConfig.storageClause;
+ if (result == null) {
+ result = "";
+ }
+ return result;
+ }
+
+ public static String[] getExportUpdateKeyColumnNames(ToJobConfig jobConfig) {
+
+ if (jobConfig.updateKey == null) {
+ // This must be an "insert-export" if no --update-key has been specified!
+ return new String[0];
+ }
+
+ String[] columnNames = jobConfig.updateKey.split(",");
+ for (int idx = 0; idx < columnNames.length; idx++) {
+ columnNames[idx] = columnNames[idx].trim();
+ if (!columnNames[idx].startsWith("\"")) {
+ columnNames[idx] = columnNames[idx].toUpperCase();
+ }
+
+ }
+ return columnNames;
+ }
+//
+ /**
+ * Splits a string separated by commas - the elements can be optionally
+ * enclosed in quotes - this allows the elements to have commas in them.
+ *
+ * @param value
+ * The String to be split
+ * @return A list of values
+ */
+ public static List<String> splitStringList(String value) {
+ List<String> result = new ArrayList<String>();
+ if (value != null && !value.isEmpty()) {
+ Pattern pattern = Pattern.compile("([^\",]*|\"[^\"]*\")(,|$)");
+ Matcher matcher = pattern.matcher(value);
+ while (matcher.find()) {
+ if (matcher.group(1) != null && !matcher.group(1).isEmpty()) {
+ result.add(matcher.group(1));
+ }
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Splits a string list separated by commas. If the element is not surrounded
+ * by quotes it will be return in upper case. If the element is enclosed in
+ * quotes it will be returned in the same case and special characters will be
+ * retained.
+ *
+ * @param value
+ * The String to be split
+ * @return A list of values
+ */
+ public static List<String> splitOracleStringList(String value) {
+ List<String> result = new ArrayList<String>();
+ List<String> splitValue = splitStringList(value);
+ Pattern pattern = Pattern.compile("(\")([^\"]*)(\")");
+ for (String element : splitValue) {
+ Matcher matcher = pattern.matcher(element);
+ if (matcher.find()) {
+ result.add(matcher.group(2));
+ } else {
+ result.add(element.toUpperCase());
+ }
+ }
+ return result;
+ }
+
+ public static OracleJdbcConnectorConstants.AppendValuesHintUsage
+ getOracleAppendValuesHintUsage(ToJobConfig jobConfig) {
+
+ String strUsage = jobConfig.appendValuesHint;
+ if (strUsage == null) {
+ return OracleJdbcConnectorConstants.AppendValuesHintUsage.AUTO;
+ }
+
+ OracleJdbcConnectorConstants.AppendValuesHintUsage result;
+
+ try {
+ strUsage = strUsage.toUpperCase().trim();
+ result = OracleJdbcConnectorConstants.
+ AppendValuesHintUsage.valueOf(strUsage);
+ } catch (IllegalArgumentException ex) {
+ result = OracleJdbcConnectorConstants.AppendValuesHintUsage.AUTO;
+
+ String errorMsg =
+ String
+ .format(
+ "An invalid value of \"%s\" was specified for the "
+ + "append values hint configuration property value.\n"
+ + "\tValid values are: %s\n"
+ + "\tThe default value of %s will be used.", strUsage,
+ getOraOopOracleAppendValuesHintUsageValues(),
+ OracleJdbcConnectorConstants.
+ AppendValuesHintUsage.AUTO.name());
+ LOG.error(errorMsg);
+ }
+
+ return result;
+ }
+
+ private static String getOraOopOracleAppendValuesHintUsageValues() {
+
+ OracleJdbcConnectorConstants.AppendValuesHintUsage[] values =
+ OracleJdbcConnectorConstants.AppendValuesHintUsage.values();
+
+ StringBuilder result = new StringBuilder((2 * values.length) - 1); // <-
+ // Include
+ // capacity
+ // for
+ // commas
+
+ for (int idx = 0; idx < values.length; idx++) {
+ OracleJdbcConnectorConstants.AppendValuesHintUsage value = values[idx];
+ if (idx > 0) {
+ result.append(" or ");
+ }
+ result.append(value.name());
+ }
+ return result.toString();
+ }
+
+ public static String getImportHint(FromJobConfig jobConfig) {
+ String result = null;
+ result = jobConfig.queryHint;
+ if (result == null || result.isEmpty()) {
+ result = "/*+ NO_INDEX(t) */ ";
+ } else if(result.trim().isEmpty()) {
+ result = "";
+ } else {
+ result = String.format(
+ OracleJdbcConnectorConstants.Oracle.HINT_SYNTAX, result);
+ }
+ return result;
+ }
+
+// public static void appendJavaSecurityEgd(Configuration conf) {
+// String mapredJavaOpts = conf.get("mapred.child.java.opts");
+// if (mapredJavaOpts == null
+// || !mapredJavaOpts.contains("-Djava.security.egd")) {
+// StringBuilder newMapredJavaOpts =
+// new StringBuilder("-Djava.security.egd=file:///dev/urandom");
+// if (mapredJavaOpts != null && !mapredJavaOpts.isEmpty()) {
+// newMapredJavaOpts.append(" ").append(mapredJavaOpts);
+// }
+// String newMapredJavaOptsString = newMapredJavaOpts.toString();
+// conf.set("mapred.child.java.opts", newMapredJavaOptsString);
+// LOG.debug("Updated mapred.child.java.opts from \"" + mapredJavaOpts
+// + "\" to \"" + newMapredJavaOptsString + "\"");
+// }
+// }
+
+ public static void checkJavaSecurityEgd() {
+ String javaSecurityEgd = System.getProperty("java.security.egd");
+ if (!"file:///dev/urandom".equals(javaSecurityEgd)) {
+ LOG.warn("System property java.security.egd is not set to "
+ + "file:///dev/urandom - Oracle connections may time out.");
+ }
+ }
+
+ public static void setCurrentSessionUser(String user) {
+ currentSessionUser = user;
+ }
+
+ public static ExportTableUpdateTechnique getExportTableUpdateTechnique(
+ ImmutableContext context, UpdateMode updateMode) {
+
+ ExportTableUpdateTechnique result;
+
+ if (context.getBoolean(OracleJdbcConnectorConstants.
+ EXPORT_TABLE_HAS_SQOOP_PARTITIONS, false)) {
+ switch (updateMode) {
+
+ case Update:
+ result = ExportTableUpdateTechnique.ReInsertUpdatedRows;
+ break;
+
+ case Merge:
+ result = ExportTableUpdateTechnique.ReInsertUpdatedRowsAndNewRows;
+ break;
+
+ default:
+ throw new RuntimeException(String.format(
+ "Update %s to cater for the updateMode \"%s\".",
+ OracleUtilities.getCurrentMethodName(), updateMode
+ .toString()));
+ }
+ } else {
+ switch (updateMode) {
+
+ case Update:
+ result = ExportTableUpdateTechnique.UpdateSql;
+ break;
+
+ case Merge:
+ result = ExportTableUpdateTechnique.MergeSql;
+ break;
+
+ default:
+ throw new RuntimeException(String.format(
+ "Update %s to cater for the updateMode \"%s\".",
+ OracleUtilities.getCurrentMethodName(), updateMode
+ .toString()));
+ }
+ }
+
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleVersion.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleVersion.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleVersion.java
new file mode 100644
index 0000000..16e0757
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleVersion.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+/**
+ * Represents an Oracle version and allows comparing of versions.
+ */
+public class OracleVersion {
+ private int major;
+ private int minor;
+ private int version;
+ private int patch;
+ private String banner;
+
+ public OracleVersion(int major, int minor, int version, int patch,
+ String banner) {
+
+ this.major = major;
+ this.minor = minor;
+ this.version = version;
+ this.patch = patch;
+ this.banner = banner;
+ }
+
+ public boolean isGreaterThanOrEqualTo(int otherMajor, int otherMinor,
+ int otherVersion, int otherPatch) {
+
+ if (this.major > otherMajor) {
+ return true;
+ }
+
+ if (this.major == otherMajor && this.minor > otherMinor) {
+ return true;
+ }
+
+ if (this.major == otherMajor && this.minor == otherMinor
+ && this.version > otherVersion) {
+ return true;
+ }
+
+ if (this.major == otherMajor && this.minor == otherMinor
+ && this.version == otherVersion && this.patch >= otherPatch) {
+ return true;
+ }
+
+ return false;
+ }
+
+ public int getMajor() {
+ return major;
+ }
+
+ public int getMinor() {
+ return minor;
+ }
+
+ public int getVersion() {
+ return version;
+ }
+
+ public int getPatch() {
+ return patch;
+ }
+
+ public String getBanner() {
+ return banner;
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/resources/oracle-jdbc-connector-config.properties
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/resources/oracle-jdbc-connector-config.properties b/connector/connector-oracle-jdbc/src/main/resources/oracle-jdbc-connector-config.properties
new file mode 100644
index 0000000..e0b9951
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/resources/oracle-jdbc-connector-config.properties
@@ -0,0 +1,136 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Oracle JDBC Connector Resources
+############################
+# Connection Config
+#
+connectionConfig.label = Link configuration
+connectionConfig.help = You must supply the information requested in order to \
+ create a link object.
+
+# connect string
+connectionConfig.connectionString.label = JDBC Connection String
+connectionConfig.connectionString.help = Enter the value of JDBC connection string to be \
+ used by this connector for creating database connections.
+
+# username string
+connectionConfig.username.label = Username
+connectionConfig.username.help = Enter the username to be used for connecting to the \
+ database.
+
+# password string
+connectionConfig.password.label = Password
+connectionConfig.password.help = Enter the password to be used for connecting to the \
+ database.
+
+# jdbc properties
+connectionConfig.jdbcProperties.label = JDBC Connection Properties
+connectionConfig.jdbcProperties.help = Enter any JDBC properties that should be \
+ supplied during the creation of connection.
+
+connectionConfig.timeZone.label = timeZone
+connectionConfig.timeZone.help = timeZone
+
+connectionConfig.actionName.label = actionName
+connectionConfig.actionName.help = actionName
+
+connectionConfig.fetchSize.label = fetchSize
+connectionConfig.fetchSize.help = fetchSize
+
+connectionConfig.initializationStatements.label = initializationStatements
+connectionConfig.initializationStatements.help = initializationStatements
+
+connectionConfig.jdbcUrlVerbatim.label = jdbcUrlVerbatim
+connectionConfig.jdbcUrlVerbatim.help = jdbcUrlVerbatim
+
+connectionConfig.racServiceName.label = racServiceName
+connectionConfig.racServiceName.help = racServiceName
+
+# ToJob Config
+#
+toJobConfig.label = To database configuration
+toJobConfig.help = You must supply the information requested in order to create \
+ the TO part of the job object.
+
+# To table name
+toJobConfig.tableName.label = Table name
+toJobConfig.tableName.help = Table name to write data into
+
+toJobConfig.templateTable.label = templateTable
+toJobConfig.templateTable.help = templateTable
+
+toJobConfig.partitioned.label = partitioned
+toJobConfig.partitioned.help = partitioned
+
+toJobConfig.nologging.label = nologging
+toJobConfig.nologging.help = nologging
+
+toJobConfig.updateKey.label = updateKey
+toJobConfig.updateKey.help = updateKey
+
+toJobConfig.updateMerge.label = updateMerge
+toJobConfig.updateMerge.help = updateMerge
+
+toJobConfig.dropTableIfExists.label = dropTableIfExists
+toJobConfig.dropTableIfExists.help = dropTableIfExists
+
+toJobConfig.storageClause.label = storageClause
+toJobConfig.storageClause.help = storageClause
+
+toJobConfig.temporaryStorageClause.label = temporaryStorageClause
+toJobConfig.temporaryStorageClause.help = temporaryStorageClause
+
+toJobConfig.appendValuesHint.label = appendValuesHint
+toJobConfig.appendValuesHint.help = appendValuesHint
+
+toJobConfig.parallel.label = parallel
+toJobConfig.parallel.help = parallel
+
+# FromJob Config
+#
+fromJobConfig.label = From database configuration
+fromJobConfig.help = You must supply the information requested in order to create \
+ the FROM part of the job object.
+
+fromJobConfig.tableName.label = tableName
+fromJobConfig.tableName.help = tableName
+
+fromJobConfig.consistentRead.label = consistentRead
+fromJobConfig.consistentRead.help = consistentRead
+
+fromJobConfig.consistentReadScn.label = consistentReadScn
+fromJobConfig.consistentReadScn.help = consistentReadScn
+
+fromJobConfig.partitionList.label = partitionList
+fromJobConfig.partitionList.help = partitionList
+
+fromJobConfig.dataChunkMethod.label = dataChunkMethod
+fromJobConfig.dataChunkMethod.help = dataChunkMethod
+
+fromJobConfig.dataChunkAllocationMethod.label = dataChunkAllocationMethod
+fromJobConfig.dataChunkAllocationMethod.help = dataChunkAllocationMethod
+
+fromJobConfig.whereClauseLocation.label = whereClauseLocation
+fromJobConfig.whereClauseLocation.help = whereClauseLocation
+
+fromJobConfig.omitLobColumns.label = omitLobColumns
+fromJobConfig.omitLobColumns.help = omitLobColumns
+
+fromJobConfig.queryHint.label = queryHint
+fromJobConfig.queryHint.help = queryHint
+
+fromJobConfig.conditions.label = conditions
+fromJobConfig.conditions.help = conditions
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/resources/sqoopconnector.properties
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/resources/sqoopconnector.properties b/connector/connector-oracle-jdbc/src/main/resources/sqoopconnector.properties
new file mode 100644
index 0000000..5413480
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/resources/sqoopconnector.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generic JDBC Connector Properties
+org.apache.sqoop.connector.class = org.apache.sqoop.connector.jdbc.oracle.OracleJdbcConnector
+org.apache.sqoop.connector.name = oracle-jdbc-connector
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcPartitioner.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcPartitioner.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcPartitioner.java
new file mode 100644
index 0000000..34b1fb9
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcPartitioner.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleDataChunk;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleDataChunkExtent;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.OracleBlockToSplitAllocationMethod;
+import org.apache.sqoop.job.etl.Partition;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Unit tests for OracleJdbcPartitioner.
+ */
+public class TestOracleJdbcPartitioner {
+
+ @Test
+ public void testgroupTableDataChunksIntoSplits() {
+
+ List<OracleDataChunk> dataChunks =
+ new ArrayList<OracleDataChunk>();
+
+ int startBlockNumber = 1;
+ for (int idx = 0; idx < 241; idx++) {
+ OracleDataChunk dataChunk =
+ new OracleDataChunkExtent("23480", 666, 1, startBlockNumber,
+ startBlockNumber + 8);
+ startBlockNumber += 8;
+ dataChunks.add(dataChunk);
+ }
+
+ List<Partition> splits =
+ OracleJdbcPartitioner.groupTableDataChunksIntoSplits(dataChunks, 32,
+ OracleBlockToSplitAllocationMethod.SEQUENTIAL);
+
+ int highestNumberOfDataChunksAllocatedToASplit = 0;
+ int lowestNumberOfDataChunksAllocatedToASplit = Integer.MAX_VALUE;
+
+ // Check that all splits have data-chunks assigned to them...
+ for (Partition split : splits) {
+ int dataChunksAllocatedToThisSplit =
+ ((OracleJdbcPartition) split).getNumberOfDataChunks();
+ highestNumberOfDataChunksAllocatedToASplit =
+ Math.max(highestNumberOfDataChunksAllocatedToASplit,
+ dataChunksAllocatedToThisSplit);
+ lowestNumberOfDataChunksAllocatedToASplit =
+ Math.min(lowestNumberOfDataChunksAllocatedToASplit,
+ dataChunksAllocatedToThisSplit);
+ }
+
+ if (lowestNumberOfDataChunksAllocatedToASplit == 0) {
+ Assert
+ .fail("There is a split that has not had any "
+ + "data-chunks allocated to it.");
+ }
+
+ // Check that the split with the least data-chunks has at least
+ // 75% of the number of data-chunks of the split with the most
+ // data-chunks...
+ double minExpectedWorkloadRatio = 0.75;
+ double actualWorkloadRatio =
+ (double) lowestNumberOfDataChunksAllocatedToASplit
+ / highestNumberOfDataChunksAllocatedToASplit;
+ if (actualWorkloadRatio < minExpectedWorkloadRatio) {
+ Assert.fail(String.format(
+ "There is too much difference in the amount of work assigned "
+ + "to the 'smallest' split and the 'largest' split. "
+ + "The split with the least work should have at least %s "
+ + "of the workload of the 'largest' split, but it actually "
+ + "only has %s of the workload of the 'largest' split.",
+ minExpectedWorkloadRatio, actualWorkloadRatio));
+ }
+ }
+
+ @Test
+ public void testLongBlockId() {
+ OracleDataChunkExtent chunk =
+ new OracleDataChunkExtent("1", 100, 1, 2147483648L, 4294967295L);
+ String whereClause = chunk.getWhereClause();
+ Assert.assertNotNull(whereClause);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcUrl.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcUrl.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcUrl.java
new file mode 100644
index 0000000..6764955
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleJdbcUrl.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleJdbcUrl;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.JdbcOracleThinConnectionParsingError;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Unit tests for OraOopJdbcUrl.
+ */
+public class TestOracleJdbcUrl {
+
+ @Test
+ public void testParseJdbcOracleThinConnectionString() {
+
+ OracleUtilities.JdbcOracleThinConnection actual;
+
+ // Null JDBC URL...
+ try {
+ actual = new OracleJdbcUrl(null).parseJdbcOracleThinConnectionString();
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ /* This is what we want to happen. */
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ }
+
+ // Empty JDBC URL...
+ try {
+ actual = new OracleJdbcUrl("").parseJdbcOracleThinConnectionString();
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ /* This is what we want to happen. */
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ }
+
+ // Incorrect number of fragments in the URL...
+ try {
+ actual =
+ new OracleJdbcUrl("jdbc:oracle:oci8:@dbname.domain")
+ .parseJdbcOracleThinConnectionString();
+ Assert.fail(
+ "A JdbcOracleThinConnectionParsingError should be been thrown.");
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ // This is what we want to happen.
+ Assert.assertTrue(
+ ex.getMessage()
+ .toLowerCase()
+ .contains("the oracle \"thin\" jdbc driver is not being used."),
+ "An exception should be thown that tells us there's an incorrect "
+ + "number of fragments in the JDBC URL.");
+ }
+
+ // Incorrect driver-type (i.e. not using the "thin" driver)...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "jdbc:oracle:loremipsum:@hostname.domain.com.au:port1521:dbsid")
+ .parseJdbcOracleThinConnectionString();
+ Assert.fail(
+ "A JdbcOracleThinConnectionParsingError should be been thrown.");
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ // This is what we want to happen.
+ Assert.assertTrue(
+ ex.getMessage().toLowerCase().contains(
+ "oracle \"thin\" jdbc driver is not being used"),
+ "An exception should be thown that refers to the fact that the thin "
+ + "JDBC driver is not being used.");
+
+ Assert.assertTrue(
+ ex.getMessage().toLowerCase().contains("loremipsum"),
+ "An exception should be thown that tells us which JDBC driver "
+ + "was specified.");
+
+ }
+
+ // Invalid JDBC URL (unparsable port number)...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "jdbc:oracle:thin:@hostname.domain.com.au:port1521:dbsid")
+ .parseJdbcOracleThinConnectionString();
+ Assert.fail(
+ "An JdbcOracleThinConnectionParsingError should be been thrown.");
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.assertTrue(
+ ex.getMessage().toLowerCase().contains("port1521"),
+ "The invalid port number should be included in the exception message.");
+ }
+
+ // Invalid JDBC URL (negative port number)...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "jdbc:oracle:thin:@hostname.domain.com.au:-1521:dbsid")
+ .parseJdbcOracleThinConnectionString();
+ Assert.fail(
+ "An JdbcOracleThinConnectionParsingError should be been thrown.");
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.assertTrue(
+ ex.getMessage().toLowerCase().contains("-1521"),
+ "The invalid port number should be included in the exception message.");
+ }
+
+ // Valid JDBC URL...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "JDBC:Oracle:tHiN:@hostname.domain.com.au:1521:dbsid")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname.domain.com.au", actual.getHost());
+ Assert.assertEquals(1521, actual.getPort());
+ Assert.assertEquals("dbsid", actual.getSid());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ // Valid JDBC URL...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ " JDBC : Oracle : tHiN : @hostname.domain.com.au : 1529 : dbsid")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname.domain.com.au", actual.getHost());
+ Assert.assertEquals(1529, actual.getPort());
+ Assert.assertEquals("dbsid", actual.getSid());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ // Valid (sid-based) JDBC URL with parameters...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "jdbc:oracle:thin:@hostname:1521:dbsid?param1=loremipsum")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname", actual.getHost());
+ Assert.assertEquals(1521, actual.getPort());
+ Assert.assertEquals("dbsid", actual.getSid());
+ Assert.assertEquals(null, actual.getService());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ // Valid (service-based) JDBC URL...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "jdbc:oracle:thin:@hostname:1521/dbservice.dbdomain")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname", actual.getHost());
+ Assert.assertEquals(1521, actual.getPort());
+ Assert.assertEquals(null, actual.getSid());
+ Assert.assertEquals("dbservice.dbdomain", actual.getService());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ // Valid (service-based) JDBC URL with slashes...
+ try {
+ actual =
+ new OracleJdbcUrl(
+ "jdbc:oracle:thin:@//hostname:1521/dbservice.dbdomain")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname", actual.getHost());
+ Assert.assertEquals(1521, actual.getPort());
+ Assert.assertEquals(null, actual.getSid());
+ Assert.assertEquals("dbservice.dbdomain", actual.getService());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ // Valid (service-based) JDBC URL with parameters...
+ try {
+ actual = new OracleJdbcUrl(
+ "jdbc:oracle:thin:@hostname:1521/dbservice.dbdomain?param1=loremipsum")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname", actual.getHost());
+ Assert.assertEquals(1521, actual.getPort());
+ Assert.assertEquals(null, actual.getSid());
+ Assert.assertEquals("dbservice.dbdomain", actual.getService());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+
+ // Valid (service-based) JDBC URL with slashes and parameters...
+ try {
+ actual = new OracleJdbcUrl(
+ "jdbc:oracle:thin:@//hostname:1521/dbservice.dbdomain?param1=loremipsum")
+ .parseJdbcOracleThinConnectionString();
+ Assert.assertEquals("hostname", actual.getHost());
+ Assert.assertEquals(1521, actual.getPort());
+ Assert.assertEquals(null, actual.getSid());
+ Assert.assertEquals("dbservice.dbdomain", actual.getService());
+ } catch (JdbcOracleThinConnectionParsingError ex) {
+ Assert.fail(ex.getMessage());
+ }
+ }
+
+ @Test
+ public void testGetConnectionUrl() {
+
+ String actual;
+
+ // Null JDBC URL...
+ try {
+ actual = new OracleJdbcUrl(null).getConnectionUrl();
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ /* This is what we want to happen. */
+ }
+
+ // Empty JDBC URL...
+ try {
+ actual = new OracleJdbcUrl("").getConnectionUrl();
+ Assert.fail("An IllegalArgumentException should be been thrown.");
+ } catch (IllegalArgumentException ex) {
+ /* This is what we want to happen. */
+ }
+
+ // JDBC URL...
+ actual =
+ new OracleJdbcUrl("jdbc:oracle:thin:@hostname.domain:1521:dbsid")
+ .getConnectionUrl();
+ Assert.assertEquals("jdbc:oracle:thin:@hostname.domain:1521:dbsid", actual);
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleTable.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleTable.java b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleTable.java
new file mode 100644
index 0000000..e6ceb86
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/test/java/org/apache/sqoop/connector/jdbc/oracle/TestOracleTable.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Unit tests for OracleTable.
+ */
+public class TestOracleTable {
+
+ @Test
+ public void testToString() {
+ OracleTable table = new OracleTable("SQOOP", "TEST_TABLE");
+ Assert.assertEquals("\"SQOOP\".\"TEST_TABLE\"", table.toString());
+
+ table = new OracleTable("", "TEST_TABLE2");
+ Assert.assertEquals("\"TEST_TABLE2\"", table.toString());
+
+ table = new OracleTable("TEST_TABLE3");
+ Assert.assertEquals("\"TEST_TABLE3\"", table.toString());
+ }
+
+}
[5/6] sqoop git commit: SQOOP-2595: Add Oracle connector to Sqoop 2
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcLoader.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcLoader.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcLoader.java
new file mode 100644
index 0000000..b741dc8
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcLoader.java
@@ -0,0 +1,615 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTableColumn;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTableColumns;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.InsertMode;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.UpdateMode;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleVersion;
+import org.apache.sqoop.job.etl.Loader;
+import org.apache.sqoop.job.etl.LoaderContext;
+import org.apache.sqoop.schema.type.Column;
+import org.joda.time.LocalDateTime;
+
+public class OracleJdbcLoader extends Loader<LinkConfiguration, ToJobConfiguration> {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleJdbcToDestroyer.class);
+
+ private long rowsWritten = 0;
+ private LoaderContext context;
+ private Connection connection;
+ private OracleVersion oracleVersion;
+ private OracleTable table; // <- If exporting into a partitioned
+ // table, this table will be unique for
+ // this mapper
+ private OracleTableColumns tableColumns; // <- The columns in the
+ // table we're inserting rows
+ // into
+ private int mapperId; // <- The index of this Hadoop mapper
+ private boolean tableHasMapperRowNumberColumn; // <- Whether the export
+ // table contain the column
+ // SQOOP_MAPPER_ROW
+ private long mapperRowNumber; // <- The 1-based row number being processed
+ // by this mapper. It's inserted into the
+ // "SQOOP_MAPPER_ROW" column
+ private boolean useAppendValuesOracleHint = false; // <- Whether to use the
+ // " /*+APPEND_VALUES*/ " hint
+ // within the Oracle SQL
+ // statement we generate
+ private long numberOfRowsSkipped; // <- The number of rows encountered
+ // during configurePreparedStatement()
+ // that had a NULL value for (one of) the
+ // update columns. This row was therefore
+ // skipped.
+ private String[] updateColumnNames;
+ private int rowsPerBatch;
+ private int rowsPerCommit;
+
+
+ private void setupInsert(LinkConfiguration linkConfiguration,
+ ToJobConfiguration jobConfiguration) throws SQLException {
+ // Is each mapper inserting rows into a unique table?...
+ InsertMode insertMode = OracleUtilities.getExportInsertMode(
+ jobConfiguration.toJobConfig, context.getContext());
+
+ if(insertMode==InsertMode.ExchangePartition) {
+ Object sysDateTime =
+ OracleUtilities.recallOracleDateTime(context.getContext());
+ table = OracleUtilities.generateExportTableMapperTableName(
+ mapperId, sysDateTime, null);
+
+ } else {
+ table = OracleUtilities.decodeOracleTableName(
+ linkConfiguration.connectionConfig.username,
+ jobConfiguration.toJobConfig.tableName);
+ }
+ tableColumns = OracleQueries.getToTableColumns(
+ connection, table, true, false);
+ tableHasMapperRowNumberColumn =
+ tableColumns.findColumnByName(
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_MAPPER_ROW) != null;
+
+ // Should we use the APPEND_VALUES Oracle hint?...
+ useAppendValuesOracleHint = false;
+ if (insertMode == InsertMode.ExchangePartition) {
+ // NB: "Direct inserts" cannot utilize APPEND_VALUES, otherwise Oracle
+ // will serialize
+ // the N mappers, causing a lot of lock contention.
+ useAppendValuesOracleHint = canUseOracleAppendValuesHint();
+ }
+ }
+
+ private void setupUpdate(LinkConfiguration linkConfiguration,
+ ToJobConfiguration jobConfiguration) throws SQLException {
+ UpdateMode updateMode = OracleUtilities.getExportUpdateMode(
+ jobConfiguration.toJobConfig);
+
+ Object sysDateTime =
+ OracleUtilities.recallOracleDateTime(context.getContext());
+ table = OracleUtilities.generateExportTableMapperTableName(
+ mapperId, sysDateTime, null);
+
+ updateColumnNames = OracleUtilities.
+ getExportUpdateKeyColumnNames(jobConfiguration.toJobConfig);
+
+ tableColumns = OracleQueries.getToTableColumns(
+ connection, table, true, false);
+
+ if (updateMode == UpdateMode.Merge || updateMode == UpdateMode.Update) {
+ // Should we use the APPEND_VALUES Oracle hint?...
+ useAppendValuesOracleHint = canUseOracleAppendValuesHint();
+ }
+
+ }
+
+ @Override
+ public void load(LoaderContext context, LinkConfiguration linkConfiguration,
+ ToJobConfiguration jobConfiguration) throws Exception {
+ LOG.debug("Running Oracle JDBC connector loader");
+ this.context = context;
+
+ //TODO: Mapper ID
+ mapperId = 1;
+ //TODO: Hardcoded values
+ rowsPerBatch = 5000;
+ rowsPerCommit = 5000;
+
+ // Retrieve the JDBC URL that should be used by this mapper.
+ // We achieve this by modifying the JDBC URL property in the
+ // configuration, prior to the OraOopDBRecordWriter's (ancestral)
+ // constructor using the configuration to establish a connection
+ // to the database - via DBConfiguration.getConnection()...
+ String mapperJdbcUrlPropertyName =
+ OracleUtilities.getMapperJdbcUrlPropertyName(mapperId);
+
+ // Get this mapper's JDBC URL
+ String mapperJdbcUrl = context.getString(mapperJdbcUrlPropertyName, null);
+
+ LOG.debug(String.format("Mapper %d has a JDBC URL of: %s", mapperId,
+ mapperJdbcUrl == null ? "<null>" : mapperJdbcUrl));
+
+ connection = OracleConnectionFactory.createOracleJdbcConnection(
+ OracleJdbcConnectorConstants.ORACLE_JDBC_DRIVER_CLASS,
+ mapperJdbcUrl,
+ linkConfiguration.connectionConfig.username,
+ linkConfiguration.connectionConfig.password);
+ String thisOracleInstanceName =
+ OracleQueries.getCurrentOracleInstanceName(connection);
+ LOG.info(String.format(
+ "This record writer is connected to Oracle via the JDBC URL: \n"
+ + "\t\"%s\"\n" + "\tto the Oracle instance: \"%s\"", connection
+ .toString(), thisOracleInstanceName));
+ OracleConnectionFactory.initializeOracleConnection(
+ connection, linkConfiguration.connectionConfig);
+ connection.setAutoCommit(false);
+ oracleVersion = OracleQueries.getOracleVersion(connection);
+
+ if (jobConfiguration.toJobConfig.updateKey == null ||
+ jobConfiguration.toJobConfig.updateKey.isEmpty()) {
+ setupInsert(linkConfiguration, jobConfiguration);
+ } else {
+ setupUpdate(linkConfiguration, jobConfiguration);
+ }
+
+ // Has the user forced the use of APPEND_VALUES either on or off?...
+ useAppendValuesOracleHint =
+ allowUserToOverrideUseOfTheOracleAppendValuesHint(
+ jobConfiguration.toJobConfig,
+ useAppendValuesOracleHint);
+
+ insertData();
+ connection.close();
+ }
+
+ @Override
+ public long getRowsWritten() {
+ return rowsWritten;
+ }
+
+ private void insertData() throws Exception {
+ // If using APPEND_VALUES, check the batch size and commit frequency...
+ if (useAppendValuesOracleHint) {
+ if(rowsPerBatch < OracleJdbcConnectorConstants.
+ MIN_APPEND_VALUES_BATCH_SIZE_DEFAULT) {
+ LOG.info(String
+ .format(
+ "The number of rows per batch-insert has been changed from %d "
+ + "to %d. This is in response "
+ + "to the Oracle APPEND_VALUES hint being used.",
+ rowsPerBatch, OracleJdbcConnectorConstants.
+ MIN_APPEND_VALUES_BATCH_SIZE_DEFAULT));
+ rowsPerBatch = OracleJdbcConnectorConstants.
+ MIN_APPEND_VALUES_BATCH_SIZE_DEFAULT;
+ }
+ // Need to commit after each batch when using APPEND_VALUES
+ if(rowsPerCommit!=rowsPerBatch) {
+ LOG.info(String
+ .format(
+ "The number of rows to insert per commit has been "
+ + "changed from %d to %d. This is in response "
+ + "to the Oracle APPEND_VALUES hint being used.",
+ rowsPerCommit, rowsPerBatch));
+ rowsPerCommit = rowsPerBatch;
+ }
+ }
+
+ mapperRowNumber = 1;
+
+ String sql = getBatchInsertSqlStatement(useAppendValuesOracleHint
+ ? "/*+APPEND_VALUES*/" : "");
+ PreparedStatement statement = connection.prepareStatement(sql);
+
+ Column[] columns = context.getSchema().getColumnsArray();
+ Object[] array;
+ boolean checkUpdateColumns = false;
+ List<Integer> updateColumnIndexes = null;
+ if(updateColumnNames!=null) {
+ checkUpdateColumns = true;
+ updateColumnIndexes = new ArrayList<Integer>();
+ for (int idx = 0; idx < this.updateColumnNames.length; idx++) {
+ for (int i = 0; i < columns.length; i++) {
+ if(columns[i].getName().equals(updateColumnNames[idx])) {
+ updateColumnIndexes.add(i);
+ }
+ }
+ }
+ }
+
+ while ((array = context.getDataReader().readArrayRecord()) != null) {
+ if(checkUpdateColumns) {
+ boolean updateKeyValueIsNull = false;
+ for (Integer i : updateColumnIndexes) {
+ Object updateKeyValue = array[i];
+ if (updateKeyValue == null) {
+ this.numberOfRowsSkipped++;
+ updateKeyValueIsNull = true;
+ break;
+ }
+ }
+
+ if (updateKeyValueIsNull) {
+ continue;
+ }
+ }
+ rowsWritten++;
+ configurePreparedStatementColumns(statement, columns, array);
+ if(rowsWritten % rowsPerBatch == 0) {
+ statement.executeBatch();
+ }
+ if(rowsWritten % rowsPerCommit == 0) {
+ connection.commit();
+ }
+ }
+ if(rowsWritten % rowsPerBatch != 0) {
+ statement.executeBatch();
+ }
+ connection.commit();
+ statement.close();
+
+ if (numberOfRowsSkipped > 0) {
+ LOG.warn(String.format(
+ "%d records were skipped due to a NULL value within one of the "
+ + "update-key column(s).\nHaving a NULL value prevents a record "
+ + "from being able to be matched to a row in the Oracle table.",
+ numberOfRowsSkipped));
+ }
+ }
+
+ private String getBatchInsertSqlStatement(String oracleHint) {
+
+ // String[] columnNames = this.getColumnNames();
+ StringBuilder sqlNames = new StringBuilder();
+ StringBuilder sqlValues = new StringBuilder();
+
+ /*
+ * NOTE: "this.oracleTableColumns" may contain a different list of columns
+ * than "this.getColumnNames()". This is because: (1)
+ * "this.getColumnNames()" includes columns with data-types that are not
+ * supported by OraOop. (2) "this.oracleTableColumns" includes any
+ * pseudo-columns that we've added to the export table (and don't exist in
+ * the HDFS file being read). For example, if exporting to a partitioned
+ * table (that OraOop created), there are two pseudo-columns we added to
+ * the table to identify the export job and the mapper.
+ */
+
+ int colCount = 0;
+ for (int idx = 0; idx < this.tableColumns.size(); idx++) {
+ OracleTableColumn oracleTableColumn = this.tableColumns.get(idx);
+ String columnName = oracleTableColumn.getName();
+
+ // column names...
+ if (colCount > 0) {
+ sqlNames.append("\n,");
+ }
+ sqlNames.append(columnName);
+
+ // column values...
+ if (colCount > 0) {
+ sqlValues.append("\n,");
+ }
+
+ String pseudoColumnValue =
+ generateInsertValueForPseudoColumn(columnName);
+
+ String bindVarName = null;
+
+ if (pseudoColumnValue != null) {
+ bindVarName = pseudoColumnValue;
+ } else if (oracleTableColumn.getOracleType() == OracleQueries
+ .getOracleType("STRUCT")) {
+ if (oracleTableColumn.getDataType().equals(
+ OracleJdbcConnectorConstants.Oracle.URITYPE)) {
+ bindVarName =
+ String.format("urifactory.getUri(%s)",
+ columnNameToBindVariable(columnName));
+ }
+ //TODO: Date as string?
+ /*} else if (getConf().getBoolean(
+ OraOopConstants.ORAOOP_MAP_TIMESTAMP_AS_STRING,
+ OraOopConstants.ORAOOP_MAP_TIMESTAMP_AS_STRING_DEFAULT)) {
+ if (oracleTableColumn.getOracleType() == OraOopOracleQueries
+ .getOracleType("DATE")) {
+ bindVarName =
+ String.format("to_date(%s, 'yyyy-mm-dd hh24:mi:ss')",
+ columnNameToBindVariable(columnName));
+ } else if (oracleTableColumn.getOracleType() == OraOopOracleQueries
+ .getOracleType("TIMESTAMP")) {
+ bindVarName =
+ String.format("to_timestamp(%s, 'yyyy-mm-dd hh24:mi:ss.ff')",
+ columnNameToBindVariable(columnName));
+ } else if (oracleTableColumn.getOracleType() == OraOopOracleQueries
+ .getOracleType("TIMESTAMPTZ")) {
+ bindVarName =
+ String.format(
+ "to_timestamp_tz(%s, 'yyyy-mm-dd hh24:mi:ss.ff TZR')",
+ columnNameToBindVariable(columnName));
+ } else if (oracleTableColumn.getOracleType() == OraOopOracleQueries
+ .getOracleType("TIMESTAMPLTZ")) {
+ bindVarName =
+ String.format(
+ "to_timestamp_tz(%s, 'yyyy-mm-dd hh24:mi:ss.ff TZR')",
+ columnNameToBindVariable(columnName));
+ }*/
+ }
+
+ if (bindVarName == null) {
+ bindVarName = columnNameToBindVariable(columnName);
+ }
+
+ sqlValues.append(bindVarName);
+
+ colCount++;
+ }
+
+ String sql =
+ String.format("insert %s into %s\n" + "(%s)\n" + "values\n"
+ + "(%s)\n", oracleHint, this.table.toString(), sqlNames
+ .toString(), sqlValues.toString());
+
+ LOG.info("Batch-Mode insert statement:\n" + sql);
+ return sql;
+ }
+
+ private String generateInsertValueForPseudoColumn(String columnName) {
+
+ if (columnName.equalsIgnoreCase(
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_PARTITION)) {
+
+ String partitionValueStr =
+ context.getString(
+ OracleJdbcConnectorConstants.ORAOOP_EXPORT_PARTITION_DATE_VALUE);
+ if (partitionValueStr == null) {
+ throw new RuntimeException(
+ "Unable to recall the value of the partition date-time.");
+ }
+
+ return String.format("to_date('%s', '%s')", partitionValueStr,
+ OracleJdbcConnectorConstants.ORAOOP_EXPORT_PARTITION_DATE_FORMAT);
+ }
+
+ if (columnName.equalsIgnoreCase(
+ OracleJdbcConnectorConstants.COLUMN_NAME_EXPORT_SUBPARTITION)) {
+ return Integer.toString(this.mapperId);
+ }
+
+ return null;
+ }
+
+ private String columnNameToBindVariable(String columnName) {
+ return ":" + columnName;
+ }
+
+ private void configurePreparedStatementColumns(
+ PreparedStatement statement, Column[] columns, Object[] array)
+ throws SQLException {
+
+ String bindValueName;
+
+ if (this.tableHasMapperRowNumberColumn) {
+ bindValueName = columnNameToBindVariable(OracleJdbcConnectorConstants.
+ COLUMN_NAME_EXPORT_MAPPER_ROW).replaceFirst(":", "");
+ try {
+ OracleQueries.setLongAtName(statement, bindValueName,
+ this.mapperRowNumber);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ this.mapperRowNumber++;
+ }
+
+ for (int i = 0; i < array.length; i++) {
+ String colName = columns[i].getName();
+ bindValueName = columnNameToBindVariable(colName).replaceFirst(":", "");
+ OracleTableColumn oracleTableColumn =
+ tableColumns.findColumnByName(colName);
+ setBindValueAtName(statement, bindValueName, array[i],
+ oracleTableColumn);
+ }
+ statement.addBatch();
+ }
+
+ private void setBindValueAtName(PreparedStatement statement,
+ String bindValueName, Object bindValue, OracleTableColumn column)
+ throws SQLException {
+ if (column.getOracleType()
+ == OracleQueries.getOracleType("NUMBER")) {
+ OracleQueries.setBigDecimalAtName(statement, bindValueName,
+ (BigDecimal) bindValue);
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("VARCHAR")) {
+ OracleQueries.setStringAtName(statement, bindValueName,
+ (String) bindValue);
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("TIMESTAMP")
+ || column.getOracleType() == OracleQueries
+ .getOracleType("TIMESTAMPTZ")
+ || column.getOracleType() == OracleQueries
+ .getOracleType("TIMESTAMPLTZ")) {
+ Object objValue = bindValue;
+ if (objValue instanceof LocalDateTime) {
+ //TODO: Improve date handling
+ LocalDateTime value = (LocalDateTime) objValue;
+ Timestamp timestampValue =
+ new Timestamp(value.toDateTime().getMillis());
+ OracleQueries.setTimestampAtName(statement, bindValueName,
+ timestampValue);
+ } else {
+ String value = (String) objValue;
+
+ if (value == null || value.equalsIgnoreCase("null")) {
+ value = "";
+ }
+
+ OracleQueries.setStringAtName(statement, bindValueName, value);
+ }
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("BINARY_DOUBLE")) {
+ Double value = (Double) bindValue;
+ if (value != null) {
+ OracleQueries.setBinaryDoubleAtName(statement, bindValueName,
+ value);
+ } else {
+ OracleQueries.setObjectAtName(statement, bindValueName, null);
+ }
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("BINARY_FLOAT")) {
+ Float value = (Float) bindValue;
+ if (value != null) {
+ OracleQueries.setBinaryFloatAtName(statement, bindValueName,
+ value);
+ } else {
+ OracleQueries.setObjectAtName(statement, bindValueName, null);
+ }
+ } else if (column.getOracleType() == OracleQueries
+ .getOracleType("STRUCT")) { // <- E.g. URITYPE
+ if (column.getDataType().equals(
+ OracleJdbcConnectorConstants.Oracle.URITYPE)) {
+ String value = (String) bindValue;
+ OracleQueries.setStringAtName(statement, bindValueName, value);
+ } else {
+ String msg =
+ String.format(
+ "%s needs to be updated to cope with the data-type: %s "
+ + "where the Oracle data_type is \"%s\".",
+ OracleUtilities.getCurrentMethodName(), column.getDataType(),
+ column.getOracleType());
+ LOG.error(msg);
+ throw new UnsupportedOperationException(msg);
+ }
+ } else {
+ // LOB data-types are currently not supported during
+ // a Sqoop Export.
+ // JIRA: SQOOP-117
+ // OraOopConstants.SUPPORTED_EXPORT_ORACLE_DATA_TYPES_CLAUSE
+ // will already have excluded all LOB columns.
+
+ // case oracle.jdbc.OracleTypes.CLOB:
+ // {
+ // oracle.sql.CLOB clob = new
+ // oracle.sql.CLOB(connection);
+ // Object value = fieldMap.get(colName);
+ // //clob.set
+ // statement.setCLOBAtName(bindValueName, clob);
+ // break;
+ // }
+ String msg =
+ String.format(
+ "%s may need to be updated to cope with the data-type: %s",
+ OracleUtilities.getCurrentMethodName(), column.getOracleType());
+ LOG.debug(msg);
+
+ OracleQueries
+ .setObjectAtName(statement, bindValueName, bindValue);
+ }
+ }
+
+ private boolean canUseOracleAppendValuesHint() {
+
+ // Should we use the APPEND_VALUES Oracle hint?...
+ // (Yes, if this is Oracle 11.2 or above)...
+ boolean result = oracleVersion.isGreaterThanOrEqualTo(11, 2, 0, 0);
+
+ // If there is a BINARY_DOUBLE or BINARY_FLOAT column, then we'll avoid
+ // using
+ // the APPEND_VALUES hint. If there is a NULL in the HDFS file, then we'll
+ // encounter
+ // "ORA-12838: cannot read/modify an object after modifying it in parallel"
+ // due to the JDBC driver issuing the INSERT statement twice to the database
+ // without a COMMIT in between (as was observed via WireShark).
+ // We're not sure why this happens - we just know how to avoid it.
+ if (result) {
+ boolean binaryDoubleColumnExists = false;
+ boolean binaryFloatColumnExists = false;
+ for (int idx = 0; idx < this.tableColumns.size(); idx++) {
+ OracleTableColumn oracleTableColumn = this.tableColumns.get(idx);
+ if(oracleTableColumn.getOracleType()==
+ OracleQueries.getOracleType("BINARY_DOUBLE")) {
+ binaryDoubleColumnExists = true;
+ }
+ if(oracleTableColumn.getOracleType()==
+ OracleQueries.getOracleType("BINARY_FLOAT")) {
+ binaryFloatColumnExists = true;
+ }
+ }
+
+ if (binaryDoubleColumnExists || binaryFloatColumnExists) {
+ result = false;
+ LOG.info("The APPEND_VALUES Oracle hint will not be used for the "
+ + "INSERT SQL statement, as the Oracle table "
+ + "contains either a BINARY_DOUBLE or BINARY_FLOAT column.");
+ }
+ }
+
+ return result;
+ }
+
+ protected boolean allowUserToOverrideUseOfTheOracleAppendValuesHint(
+ ToJobConfig jobConfig, boolean useAppendValuesOracleHint) {
+
+ boolean result = useAppendValuesOracleHint;
+
+ // Has the user forced the use of APPEND_VALUES either on or off?...
+ switch (OracleUtilities.getOracleAppendValuesHintUsage(jobConfig)) {
+
+ case OFF:
+ result = false;
+ LOG.debug(String
+ .format(
+ "Use of the APPEND_VALUES Oracle hint has been forced OFF. "
+ + "(It was %s to used).",
+ useAppendValuesOracleHint ? "going" : "not going"));
+ break;
+
+ case ON:
+ result = true;
+ LOG.debug(String
+ .format(
+ "Use of the APPEND_VALUES Oracle hint has been forced ON. "
+ + "(It was %s to used).",
+ useAppendValuesOracleHint ? "going" : "not going"));
+ break;
+
+ case AUTO:
+ LOG.debug(String.format("The APPEND_VALUES Oracle hint %s be used.",
+ result ? "will" : "will not"));
+ break;
+
+ default:
+ throw new RuntimeException("Invalid value for APPEND_VALUES.");
+ }
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartition.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartition.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartition.java
new file mode 100644
index 0000000..9aeacaf
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartition.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleDataChunk;
+import org.apache.sqoop.job.etl.Partition;
+
+public class OracleJdbcPartition extends Partition {
+
+
+ private int splitId;
+ private double totalNumberOfBlocksInAllSplits;
+ private String splitLocation;
+ private List<OracleDataChunk> oracleDataChunks;
+
+ // NB: Update write(), readFields() and getDebugDetails() if you add fields
+ // here.
+
+ public OracleJdbcPartition() {
+
+ this.splitId = -1;
+ this.splitLocation = "";
+ this.oracleDataChunks = new ArrayList<OracleDataChunk>();
+ }
+
+ public OracleJdbcPartition(List<OracleDataChunk> dataChunks) {
+
+ setOracleDataChunks(dataChunks);
+ }
+
+ public void setOracleDataChunks(List<OracleDataChunk> dataChunks) {
+
+ this.oracleDataChunks = dataChunks;
+ }
+
+ public List<OracleDataChunk> getDataChunks() {
+
+ return this.oracleDataChunks;
+ }
+
+ public int getNumberOfDataChunks() {
+
+ if (this.getDataChunks() == null) {
+ return 0;
+ } else {
+ return this.getDataChunks().size();
+ }
+ }
+
+ /**
+ * @return The total number of blocks within the data-chunks of this split
+ */
+ public long getLength() {
+
+ return this.getTotalNumberOfBlocksInThisSplit();
+ }
+
+ public int getTotalNumberOfBlocksInThisSplit() {
+
+ if (this.getNumberOfDataChunks() == 0) {
+ return 0;
+ }
+
+ int result = 0;
+ for (OracleDataChunk dataChunk : this.getDataChunks()) {
+ result += dataChunk.getNumberOfBlocks();
+ }
+
+ return result;
+ }
+
+ public OracleDataChunk findDataChunkById(String id) {
+
+ for (OracleDataChunk dataChunk : this.getDataChunks()) {
+ if (dataChunk.getId().equals(id)) {
+ return dataChunk;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ /** {@inheritDoc} */
+ public void write(DataOutput output) throws IOException {
+
+ output.writeInt(splitId);
+
+ if (this.oracleDataChunks == null) {
+ output.writeInt(0);
+ } else {
+ output.writeInt(this.oracleDataChunks.size());
+ for (OracleDataChunk dataChunk : this.oracleDataChunks) {
+ output.writeUTF(dataChunk.getClass().getName());
+ dataChunk.write(output);
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ /** {@inheritDoc} */
+ public void readFields(DataInput input) throws IOException {
+
+ this.splitId = input.readInt();
+
+ int dataChunkCount = input.readInt();
+ if (dataChunkCount == 0) {
+ this.oracleDataChunks = null;
+ } else {
+ Class<? extends OracleDataChunk> dataChunkClass;
+ OracleDataChunk dataChunk;
+ this.oracleDataChunks =
+ new ArrayList<OracleDataChunk>(dataChunkCount);
+ for (int idx = 0; idx < dataChunkCount; idx++) {
+ try {
+ dataChunkClass =
+ (Class<? extends OracleDataChunk>) Class.forName(input.readUTF());
+ dataChunk = dataChunkClass.newInstance();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ dataChunk.readFields(input);
+ this.oracleDataChunks.add(dataChunk);
+ }
+ }
+ }
+
+ public String toString() {
+
+ StringBuilder result = new StringBuilder();
+
+ if (this.getNumberOfDataChunks() == 0) {
+ result.append(String.format(
+ "Split[%s] does not contain any Oracle data-chunks.", this.splitId));
+ } else {
+ result.append(String.format(
+ "Split[%s] includes the Oracle data-chunks:\n", this.splitId));
+ for (OracleDataChunk dataChunk : getDataChunks()) {
+ result.append(dataChunk.toString());
+ }
+ }
+ return result.toString();
+ }
+
+ protected int getSplitId() {
+ return this.splitId;
+ }
+
+ protected void setSplitId(int newSplitId) {
+ this.splitId = newSplitId;
+ }
+
+ protected void setSplitLocation(String newSplitLocation) {
+ this.splitLocation = newSplitLocation;
+ }
+
+ protected void setTotalNumberOfBlocksInAllSplits(
+ int newTotalNumberOfBlocksInAllSplits) {
+ this.totalNumberOfBlocksInAllSplits = newTotalNumberOfBlocksInAllSplits;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartitioner.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartitioner.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartitioner.java
new file mode 100644
index 0000000..00c7752
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcPartitioner.java
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.FromJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleDataChunk;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.job.etl.Partition;
+import org.apache.sqoop.job.etl.Partitioner;
+import org.apache.sqoop.job.etl.PartitionerContext;
+
+public class OracleJdbcPartitioner extends
+ Partitioner<LinkConfiguration, FromJobConfiguration> {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleJdbcPartitioner.class);
+
+ @Override
+ public List<Partition> getPartitions(PartitionerContext context,
+ LinkConfiguration linkConfiguration,
+ FromJobConfiguration jobConfiguration) {
+ try {
+ Connection connection = OracleConnectionFactory.makeConnection(
+ linkConfiguration.connectionConfig);
+ OracleTable table = OracleUtilities.decodeOracleTableName(
+ linkConfiguration.connectionConfig.username,
+ jobConfiguration.fromJobConfig.tableName);
+
+ long desiredNumberOfMappers = context.getMaxPartitions();
+ List<String> partitionList = getPartitionList(
+ jobConfiguration.fromJobConfig);
+
+ List<Partition> splits = null;
+ try {
+ OracleConnectionFactory.initializeOracleConnection(connection,
+ linkConfiguration.connectionConfig);
+
+ // The number of chunks generated will *not* be a multiple of the number
+ // of splits,
+ // to ensure that each split doesn't always get data from the start of
+ // each data-file...
+ long numberOfChunksPerOracleDataFile = (desiredNumberOfMappers * 2) + 1;
+
+ // Get the Oracle data-chunks for the table...
+ List<? extends OracleDataChunk> dataChunks;
+ if (OracleUtilities.getOraOopOracleDataChunkMethod(
+ jobConfiguration.fromJobConfig).equals(
+ OracleUtilities.OracleDataChunkMethod.PARTITION)) {
+ dataChunks =
+ OracleQueries.getOracleDataChunksPartition(connection, table,
+ partitionList);
+ } else {
+ dataChunks =
+ OracleQueries.getOracleDataChunksExtent(connection, table,
+ partitionList, numberOfChunksPerOracleDataFile);
+ }
+
+ if (dataChunks.size() == 0) {
+ String errMsg;
+ if (OracleUtilities.getOraOopOracleDataChunkMethod(
+ jobConfiguration.fromJobConfig).equals(
+ OracleUtilities.OracleDataChunkMethod.PARTITION)) {
+ errMsg =
+ String
+ .format(
+ "The table %s does not contain any partitions and you "
+ + "have specified to chunk the table by partitions.",
+ table.getName());
+ } else {
+ errMsg =
+ String.format("The table %s does not contain any data.", table
+ .getName());
+ }
+ LOG.fatal(errMsg);
+ throw new RuntimeException(errMsg);
+ } else {
+ OracleUtilities.OracleBlockToSplitAllocationMethod
+ blockAllocationMethod = OracleUtilities
+ .getOracleBlockToSplitAllocationMethod(
+ jobConfiguration.fromJobConfig,
+ OracleUtilities.
+ OracleBlockToSplitAllocationMethod.ROUNDROBIN);
+
+ // Group the Oracle data-chunks into splits...
+ splits =
+ groupTableDataChunksIntoSplits(dataChunks, desiredNumberOfMappers,
+ blockAllocationMethod);
+
+ /*String oraoopLocations =
+ jobContext.getConfiguration().get("oraoop.locations", "");
+ String[] locations = oraoopLocations.split(",");
+ for (int idx = 0; idx < locations.length; idx++) {
+ if (idx < splits.size()) {
+ String location = locations[idx].trim();
+ if (!location.isEmpty()) {
+ ((OraOopDBInputSplit) splits.get(idx)).setSplitLocation(location);
+
+ LOG.info(String
+ .format("Split[%d] has been assigned location \"%s\".", idx,
+ location));
+ }
+ }
+ }*/
+
+ }
+ } catch (SQLException ex) {
+ throw new RuntimeException(ex);
+ }
+
+ return splits;
+ } catch (SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to connect to the Oracle database at %s\nError:%s",
+ linkConfiguration.connectionConfig.connectionString, ex
+ .getMessage()), ex);
+ }
+ }
+
+ private List<String> getPartitionList(FromJobConfig jobConfig) {
+ LOG.debug("Partition list = " + jobConfig.partitionList);
+ List<String> result =
+ OracleUtilities.splitOracleStringList(jobConfig.partitionList);
+ if (result != null && result.size() > 0) {
+ LOG.debug("Partition filter list: " + result.toString());
+ }
+ return result;
+ }
+
+ protected static
+ List<Partition> groupTableDataChunksIntoSplits(
+ List<? extends OracleDataChunk> dataChunks,
+ long desiredNumberOfSplits,
+ OracleUtilities.OracleBlockToSplitAllocationMethod
+ blockAllocationMethod) {
+
+ long numberOfDataChunks = dataChunks.size();
+ long actualNumberOfSplits =
+ Math.min(numberOfDataChunks, desiredNumberOfSplits);
+ long totalNumberOfBlocksInAllDataChunks = 0;
+ for (OracleDataChunk dataChunk : dataChunks) {
+ totalNumberOfBlocksInAllDataChunks += dataChunk.getNumberOfBlocks();
+ }
+
+ String debugMsg = String.format(
+ "The table being imported by sqoop has %d blocks "
+ + "that have been divided into %d chunks "
+ + "which will be processed in %d splits. "
+ + "The chunks will be allocated to the splits using the method : %s",
+ totalNumberOfBlocksInAllDataChunks, numberOfDataChunks,
+ actualNumberOfSplits, blockAllocationMethod.toString());
+ LOG.info(debugMsg);
+
+ List<Partition> splits =
+ new ArrayList<Partition>((int) actualNumberOfSplits);
+
+ for (int i = 0; i < actualNumberOfSplits; i++) {
+ OracleJdbcPartition split = new OracleJdbcPartition();
+ //split.setSplitId(i);
+ //split.setTotalNumberOfBlocksInAllSplits(
+ // totalNumberOfBlocksInAllDataChunks);
+ splits.add(split);
+ }
+
+ switch (blockAllocationMethod) {
+
+ case RANDOM:
+ // Randomize the order of the data chunks and then "fall through" into
+ // the ROUNDROBIN block below...
+ Collections.shuffle(dataChunks);
+
+ // NB: No "break;" statement here - we're intentionally falling into the
+ // ROUNDROBIN block below...
+
+ //$FALL-THROUGH$
+ case ROUNDROBIN:
+ int idxSplitRoundRobin = 0;
+ for (OracleDataChunk dataChunk : dataChunks) {
+
+ if (idxSplitRoundRobin >= splits.size()) {
+ idxSplitRoundRobin = 0;
+ }
+ OracleJdbcPartition split =
+ (OracleJdbcPartition) splits.get(idxSplitRoundRobin++);
+
+ split.getDataChunks().add(dataChunk);
+ }
+ break;
+
+ case SEQUENTIAL:
+ double dataChunksPerSplit = dataChunks.size() / (double) splits.size();
+ int dataChunksAllocatedToSplits = 0;
+
+ int idxSplitSeq = 0;
+ for (OracleDataChunk dataChunk : dataChunks) {
+
+ OracleJdbcPartition split =
+ (OracleJdbcPartition) splits.get(idxSplitSeq);
+ split.getDataChunks().add(dataChunk);
+
+ dataChunksAllocatedToSplits++;
+
+ if (dataChunksAllocatedToSplits
+ >= (dataChunksPerSplit * (idxSplitSeq + 1))
+ && idxSplitSeq < splits.size()) {
+ idxSplitSeq++;
+ }
+ }
+ break;
+
+ default:
+ throw new RuntimeException("Block allocation method not implemented.");
+
+ }
+
+ if (LOG.isDebugEnabled()) {
+ for (int idx = 0; idx < splits.size(); idx++) {
+ LOG.debug("\n\t"
+ + splits.get(idx).toString());
+ }
+ }
+
+ return splits;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToDestroyer.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToDestroyer.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToDestroyer.java
new file mode 100644
index 0000000..8429a38
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToDestroyer.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+import org.apache.log4j.Logger;
+import org.apache.sqoop.common.ImmutableContext;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ConnectionConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries.CreateExportChangesTableOptions;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTableColumns;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.ExportTableUpdateTechnique;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.InsertMode;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.UpdateMode;
+import org.apache.sqoop.job.etl.Destroyer;
+import org.apache.sqoop.job.etl.DestroyerContext;
+
+public class OracleJdbcToDestroyer extends Destroyer<LinkConfiguration, ToJobConfiguration> {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleJdbcToDestroyer.class);
+
+ protected Connection connection;
+ protected OracleTable table;
+ protected int numMappers = 8;
+
+ private void connect(ConnectionConfig connectionConfig) {
+ try {
+ connection = OracleConnectionFactory.makeConnection(connectionConfig);
+ } catch (SQLException ex) {
+ throw new RuntimeException(String.format(
+ "Unable to connect to the Oracle database at %s\n"
+ + "Error:%s", connectionConfig.connectionString, ex
+ .getMessage()), ex);
+ }
+ }
+
+ @Override
+ public void destroy(DestroyerContext context,
+ LinkConfiguration linkConfiguration,
+ ToJobConfiguration jobConfiguration) {
+ LOG.debug("Running Oracle JDBC connector destroyer");
+
+ table = OracleUtilities.decodeOracleTableName(
+ linkConfiguration.connectionConfig.username,
+ jobConfiguration.toJobConfig.tableName);
+
+ if (jobConfiguration.toJobConfig.updateKey == null ||
+ jobConfiguration.toJobConfig.updateKey.isEmpty()) {
+
+ // Is each mapper inserting rows into a unique table?...
+ InsertMode insertMode = OracleUtilities.getExportInsertMode(
+ jobConfiguration.toJobConfig, context.getContext());
+
+ if(insertMode==InsertMode.ExchangePartition) {
+ connect(linkConfiguration.connectionConfig);
+ Object sysDateTime =
+ OracleUtilities.recallOracleDateTime(context.getContext());
+
+ exchangePartitionUniqueMapperTableDataIntoMainExportTable(sysDateTime);
+
+ }
+
+ } else {
+ connect(linkConfiguration.connectionConfig);
+ Object sysDateTime =
+ OracleUtilities.recallOracleDateTime(context.getContext());
+ try {
+ updateMainExportTableFromUniqueMapperTable(jobConfiguration.toJobConfig,
+ context.getContext(), sysDateTime);
+ } catch(SQLException e) {
+ throw new RuntimeException(
+ String.format(
+ "Unable to update the table %s.",table.toString()), e);
+ }
+ }
+ }
+
+ private void exchangePartitionUniqueMapperTableDataIntoMainExportTable(
+ Object sysDateTime) {
+
+ for(int i=0; i<numMappers; i++) {
+ long start = System.nanoTime();
+
+ OracleTable mapperTable =
+ OracleUtilities.generateExportTableMapperTableName(
+ i, sysDateTime, null);
+
+ String subPartitionName =
+ OracleUtilities.generateExportTableSubPartitionName(
+ i, sysDateTime);
+
+ try {
+ OracleQueries.exchangeSubpartition(connection,
+ table, subPartitionName, mapperTable);
+
+ double timeInSec = (System.nanoTime() - start) / Math.pow(10, 9);
+ LOG.info(String
+ .format(
+ "Time spent performing an \"exchange subpartition with "
+ + "table\": %f sec.",
+ timeInSec));
+
+ LOG.debug(String.format("Dropping temporary mapper table %s",
+ mapperTable.toString()));
+ OracleQueries.dropTable(connection, mapperTable);
+ } catch (SQLException ex) {
+ throw new RuntimeException(
+ String
+ .format(
+ "Unable to perform an \"exchange subpartition\" operation "
+ + "for the table %s, for the subpartition named "
+ + "\"%s\" with the table named \"%s\".",
+ table.toString(), subPartitionName,
+ mapperTable.toString()), ex);
+ }
+ }
+ }
+
+ private void updateMainExportTableFromUniqueMapperTable(ToJobConfig jobConfig,
+ ImmutableContext context, Object sysDateTime)
+ throws SQLException {
+
+ String[] updateColumnNames = OracleUtilities.
+ getExportUpdateKeyColumnNames(jobConfig);
+
+ OracleTableColumns tableColumns = OracleQueries.getToTableColumns(
+ connection, table, true, false);
+
+ UpdateMode updateMode = OracleUtilities.getExportUpdateMode(jobConfig);
+
+ ExportTableUpdateTechnique exportTableUpdateTechnique =
+ OracleUtilities.getExportTableUpdateTechnique(context, updateMode);
+
+ CreateExportChangesTableOptions changesTableOptions;
+ boolean parallelizationEnabled =
+ OracleUtilities.enableOracleParallelProcessingDuringExport(jobConfig);
+
+ switch (exportTableUpdateTechnique) {
+
+ case ReInsertUpdatedRows:
+ case UpdateSql:
+ changesTableOptions =
+ CreateExportChangesTableOptions.OnlyRowsThatDiffer;
+ break;
+
+ case ReInsertUpdatedRowsAndNewRows:
+ case MergeSql:
+ changesTableOptions =
+ CreateExportChangesTableOptions.RowsThatDifferPlusNewRows;
+ break;
+
+ default:
+ throw new RuntimeException(String.format(
+ "Update %s to cater for the ExportTableUpdateTechnique \"%s\".",
+ OracleUtilities.getCurrentMethodName(),
+ exportTableUpdateTechnique.toString()));
+ }
+
+ String temporaryTableStorageClause =
+ OracleUtilities.getTemporaryTableStorageClause(jobConfig);
+
+ for(int i=0; i<numMappers; i++) {
+
+ OracleTable mapperTable =
+ OracleUtilities.generateExportTableMapperTableName(
+ i, sysDateTime, null);
+
+ OracleTable changesTable =
+ OracleUtilities.generateExportTableMapperTableName(Integer
+ .toString(i) + "_CHG", sysDateTime, null);
+
+ try {
+ int changeTableRowCount =
+ OracleQueries.createExportChangesTable(connection,
+ changesTable, temporaryTableStorageClause, mapperTable,
+ table, updateColumnNames, changesTableOptions,
+ parallelizationEnabled);
+
+ if (changeTableRowCount == 0) {
+ LOG.debug(String.format(
+ "The changes-table does not contain any rows. %s is now exiting.",
+ OracleUtilities.getCurrentMethodName()));
+ continue;
+ }
+
+ switch (exportTableUpdateTechnique) {
+
+ case ReInsertUpdatedRows:
+ case ReInsertUpdatedRowsAndNewRows:
+
+ OracleQueries.deleteRowsFromTable(connection,
+ table, changesTable, updateColumnNames,
+ parallelizationEnabled);
+
+ OracleQueries.insertRowsIntoExportTable(connection,
+ table, changesTable, sysDateTime, i,
+ parallelizationEnabled);
+ break;
+
+ case UpdateSql:
+
+ long start = System.nanoTime();
+
+ OracleQueries.updateTable(connection, table,
+ changesTable, updateColumnNames, tableColumns, sysDateTime, i,
+ parallelizationEnabled);
+
+ double timeInSec = (System.nanoTime() - start) / Math.pow(10, 9);
+ LOG.info(String.format("Time spent performing an update: %f sec.",
+ timeInSec));
+ break;
+
+ case MergeSql:
+
+ long mergeStart = System.nanoTime();
+
+ OracleQueries.mergeTable(connection, table,
+ changesTable, updateColumnNames, tableColumns, sysDateTime,
+ i, parallelizationEnabled);
+
+ double mergeTimeInSec = (System.nanoTime() - mergeStart)
+ / Math.pow(10, 9);
+ LOG.info(String.format("Time spent performing a merge: %f sec.",
+ mergeTimeInSec));
+
+ break;
+
+ default:
+ throw new RuntimeException(
+ String.format(
+ "Update %s to cater for the ExportTableUpdateTechnique \"%s\".",
+ OracleUtilities.getCurrentMethodName(),
+ exportTableUpdateTechnique.toString()));
+ }
+
+ connection.commit();
+ } catch (SQLException ex) {
+ connection.rollback();
+ throw ex;
+ } finally {
+ OracleQueries.dropTable(connection, changesTable);
+ LOG.debug(String.format("Dropping temporary mapper table %s",
+ mapperTable.toString()));
+ OracleQueries.dropTable(connection, mapperTable);
+ }
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToInitializer.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToInitializer.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToInitializer.java
new file mode 100644
index 0000000..f1d92f0
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/OracleJdbcToInitializer.java
@@ -0,0 +1,498 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.log4j.Logger;
+import org.apache.sqoop.common.MutableContext;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ConnectionConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.LinkConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfig;
+import org.apache.sqoop.connector.jdbc.oracle.configuration.ToJobConfiguration;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleQueries;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTable;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTablePartition;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleTablePartitions;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities;
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleUtilities.UpdateMode;
+import org.apache.sqoop.job.etl.InitializerContext;
+
+public class OracleJdbcToInitializer extends
+ OracleJdbcCommonInitializer<ToJobConfiguration> {
+
+ private static final Logger LOG =
+ Logger.getLogger(OracleJdbcToInitializer.class);
+
+ @Override
+ public void connect(InitializerContext context,
+ LinkConfiguration linkConfiguration, ToJobConfiguration jobConfiguration)
+ throws SQLException {
+ super.connect(context, linkConfiguration, jobConfiguration);
+ table = OracleUtilities.decodeOracleTableName(
+ linkConfiguration.connectionConfig.username,
+ jobConfiguration.toJobConfig.tableName);
+ }
+
+ @Override
+ public void initialize(InitializerContext context,
+ LinkConfiguration linkConfiguration,
+ ToJobConfiguration jobConfiguration) {
+ super.initialize(context, linkConfiguration, jobConfiguration);
+ LOG.debug("Running Oracle JDBC connector initializer");
+ try {
+ createAnyRequiredOracleObjects(context.getContext(),
+ jobConfiguration.toJobConfig, linkConfiguration.connectionConfig);
+
+ if (!isSqoopTableAnOracleTable(connection,
+ linkConfiguration.connectionConfig.username, table)) {
+ throw new RuntimeException("Can only load data into Oracle tables.");
+ }
+ } catch(SQLException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ private void createAnyRequiredOracleObjects(MutableContext context,
+ ToJobConfig jobConfig, ConnectionConfig connectionConfig)
+ throws SQLException {
+
+ // The SYSDATE on the Oracle database will be used as the partition value
+ // for this export job...
+ Object sysDateTime = OracleQueries.getSysDate(connection);
+ String sysDateStr =
+ OracleQueries.oraDATEToString(sysDateTime, "yyyy-mm-dd hh24:mi:ss");
+ context.setString(OracleJdbcConnectorConstants.SQOOP_ORACLE_JOB_SYSDATE,
+ sysDateStr);
+
+ checkForOldOraOopTemporaryOracleTables(connection, sysDateTime,
+ OracleQueries.getCurrentSchema(connection));
+
+ // Store the actual partition value, so the N mappers know what value to
+ // insert...
+ String partitionValue =
+ OracleQueries.oraDATEToString(sysDateTime,
+ OracleJdbcConnectorConstants.ORAOOP_EXPORT_PARTITION_DATE_FORMAT);
+ context.setString(
+ OracleJdbcConnectorConstants.ORAOOP_EXPORT_PARTITION_DATE_VALUE,
+ partitionValue);
+
+ // Generate the (22 character) partition name...
+ String partitionName =
+ OracleUtilities
+ .createExportTablePartitionNameFromOracleTimestamp(sysDateTime);
+
+ //TODO: Number of mappers needs to be fixed
+ int numMappers = 8;
+
+ String exportTableTemplate = jobConfig.templateTable;
+
+ if(exportTableTemplate==null) {
+ exportTableTemplate = "";
+ }
+
+ String user = connectionConfig.username;
+ //TODO: This is from the other Oracle Manager
+ //if (user == null) {
+ // user = OracleManager.getSessionUser(connection);
+ //}
+
+ OracleTable templateTableContext =
+ OracleUtilities.decodeOracleTableName(user, exportTableTemplate);
+
+ boolean noLoggingOnNewTable = BooleanUtils.isTrue(jobConfig.nologging);
+
+ String updateKeyCol = jobConfig.updateKey;
+
+ /* =========================== */
+ /* VALIDATION OF INPUTS */
+ /* =========================== */
+
+ if (updateKeyCol == null || updateKeyCol.isEmpty()) {
+ // We're performing an "insert" export, not an "update" export.
+
+ // Check that the "oraoop.export.merge" property has not been specified,
+ // as this would be
+ // an invalid scenario...
+ if (OracleUtilities.getExportUpdateMode(jobConfig) == UpdateMode.Merge) {
+ throw new RuntimeException("The merge option can only be used if "
+ + "an update key is specified.");
+ }
+ }
+
+ if (OracleUtilities
+ .userWantsToCreatePartitionedExportTableFromTemplate(jobConfig)
+ || OracleUtilities
+ .userWantsToCreateNonPartitionedExportTableFromTemplate(jobConfig)) {
+
+ // OraOop will create the export table.
+
+ if (table.getName().length()
+ > OracleJdbcConnectorConstants.Oracle.MAX_IDENTIFIER_LENGTH) {
+ String msg =
+ String.format(
+ "The Oracle table name \"%s\" is longer than %d characters.\n"
+ + "Oracle will not allow a table with this name to be created.",
+ table.getName(),
+ OracleJdbcConnectorConstants.Oracle.MAX_IDENTIFIER_LENGTH);
+ throw new RuntimeException(msg);
+ }
+
+ if (updateKeyCol != null && !updateKeyCol.isEmpty()) {
+
+ // We're performing an "update" export, not an "insert" export.
+
+ // Check whether the user is attempting an "update" (i.e. a non-merge).
+ // If so, they're
+ // asking to only UPDATE rows in a (about to be created) (empty) table
+ // that contains no rows.
+ // This will be a waste of time, as we'd be attempting to perform UPDATE
+ // operations against a
+ // table with no rows in it...
+ UpdateMode updateMode = OracleUtilities.getExportUpdateMode(jobConfig);
+ if (updateMode == UpdateMode.Update) {
+ throw new RuntimeException(String.format(
+ "\n\nCombining the template table option with the merge "
+ + "option is nonsensical, as this would create an "
+ + "empty table and then perform "
+ + "a lot of work that results in a table containing no rows.\n"));
+ }
+ }
+
+ // Check that the specified template table actually exists and is a
+ // table...
+ String templateTableObjectType =
+ OracleQueries.getOracleObjectType(connection,
+ templateTableContext);
+ if (templateTableObjectType == null) {
+ throw new RuntimeException(String.format(
+ "The specified Oracle template table \"%s\" does not exist.",
+ templateTableContext.toString()));
+ }
+
+ if (!templateTableObjectType.equalsIgnoreCase(
+ OracleJdbcConnectorConstants.Oracle.OBJECT_TYPE_TABLE)) {
+ throw new RuntimeException(
+ String.format(
+ "The specified Oracle template table \"%s\" is not an "
+ + "Oracle table, it's a %s.",
+ templateTableContext.toString(), templateTableObjectType));
+ }
+
+ if (BooleanUtils.isTrue(jobConfig.dropTableIfExists)) {
+ OracleQueries.dropTable(connection, table);
+ }
+
+ // Check that there is no existing database object with the same name of
+ // the table to be created...
+ String newTableObjectType =
+ OracleQueries.getOracleObjectType(connection, table);
+ if (newTableObjectType != null) {
+ throw new RuntimeException(
+ String.format(
+ "%s cannot create a new Oracle table named %s as a \"%s\" "
+ + "with this name already exists.",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME, table.toString(),
+ newTableObjectType));
+ }
+ } else {
+ // The export table already exists.
+
+ if (updateKeyCol != null && !updateKeyCol.isEmpty()) {
+
+ // We're performing an "update" export, not an "insert" export.
+
+ // Check that there exists an index on the export table on the
+ // update-key column(s).
+ // Without such an index, this export may perform like a real dog...
+ String[] updateKeyColumns =
+ OracleUtilities.getExportUpdateKeyColumnNames(jobConfig);
+ if (!OracleQueries.doesIndexOnColumnsExist(connection,
+ table, updateKeyColumns)) {
+ String msg = String.format(
+ "\n**************************************************************"
+ + "***************************************************************"
+ + "\n\tThe table %1$s does not have a valid index on "
+ + "the column(s) %2$s.\n"
+ + "\tAs a consequence, this export may take a long time to "
+ + "complete.\n"
+ + "\tIf performance is unacceptable, consider reattempting this "
+ + "job after creating an index "
+ + "on this table via the SQL...\n"
+ + "\t\tcreate index <index_name> on %1$s(%2$s);\n"
+ + "****************************************************************"
+ + "*************************************************************",
+ table.toString(),
+ OracleUtilities.stringArrayToCSV(updateKeyColumns));
+ LOG.warn(msg);
+ }
+ }
+ }
+
+ boolean createMapperTables = false;
+
+ if (updateKeyCol != null && !updateKeyCol.isEmpty()) {
+ createMapperTables = true;
+ }
+
+ if (OracleUtilities
+ .userWantsToCreatePartitionedExportTableFromTemplate(jobConfig)) {
+ /* ================================= */
+ /* CREATE A PARTITIONED TABLE */
+ /* ================================= */
+
+ // Create a new Oracle table using the specified template...
+
+ String[] subPartitionNames =
+ OracleUtilities.generateExportTableSubPartitionNames(numMappers,
+ sysDateTime);
+ // Create the export table from a template table...
+ String tableStorageClause =
+ OracleUtilities.getExportTableStorageClause(jobConfig);
+
+ OracleQueries.createExportTableFromTemplateWithPartitioning(
+ connection, table,
+ tableStorageClause, templateTableContext, noLoggingOnNewTable,
+ partitionName, sysDateTime, numMappers,
+ subPartitionNames);
+
+ createMapperTables = true;
+ } else if (OracleUtilities
+ .userWantsToCreateNonPartitionedExportTableFromTemplate(jobConfig)) {
+ /* ===================================== */
+ /* CREATE A NON-PARTITIONED TABLE */
+ /* ===================================== */
+ String tableStorageClause =
+ OracleUtilities.getExportTableStorageClause(jobConfig);
+
+ OracleQueries.createExportTableFromTemplate(connection,
+ table, tableStorageClause,
+ templateTableContext, noLoggingOnNewTable);
+ } else {
+ /* ===================================================== */
+ /* ADD ADDITIONAL PARTITIONS TO AN EXISTING TABLE */
+ /* ===================================================== */
+
+ // If the export table is partitioned, and the partitions were created by
+ // OraOop, then we need
+ // create additional partitions...
+
+ OracleTablePartitions tablePartitions =
+ OracleQueries.getPartitions(connection, table);
+ // Find any partition name starting with "ORAOOP_"...
+ OracleTablePartition oraOopPartition =
+ tablePartitions.findPartitionByRegEx("^"
+ + OracleJdbcConnectorConstants.
+ EXPORT_TABLE_PARTITION_NAME_PREFIX);
+
+ if (tablePartitions.size() > 0 && oraOopPartition == null) {
+
+ for (int idx = 0; idx < tablePartitions.size(); idx++) {
+ LOG.info(String.format(
+ "The Oracle table %s has a partition named \"%s\".",
+ table.toString(),
+ tablePartitions.get(idx).getName()));
+ }
+
+ LOG.warn(String.format(
+ "The Oracle table %s is partitioned.\n"
+ + "These partitions were not created by %s.",
+ table.toString(),
+ OracleJdbcConnectorConstants.CONNECTOR_NAME));
+ }
+
+ if (oraOopPartition != null) {
+
+ // Indicate in the configuration what's happening...
+ context.setBoolean(OracleJdbcConnectorConstants.
+ EXPORT_TABLE_HAS_SQOOP_PARTITIONS, true);
+
+ LOG.info(String.format(
+ "The Oracle table %s is partitioned.\n"
+ + "These partitions were created by %s, so "
+ + "additional partitions will now be created.\n"
+ + "The name of the new partition will be \"%s\".",
+ table.toString(), OracleJdbcConnectorConstants.
+ CONNECTOR_NAME, partitionName));
+
+ String[] subPartitionNames =
+ OracleUtilities.generateExportTableSubPartitionNames(numMappers,
+ sysDateTime);
+
+ // Add another partition (and N subpartitions) to this existing,
+ // partitioned export table...
+ OracleQueries.createMoreExportTablePartitions(connection,
+ table, partitionName,
+ sysDateTime, subPartitionNames);
+
+ createMapperTables = true;
+ }
+ }
+
+ if(createMapperTables) {
+ createUniqueMapperTable(sysDateTime, numMappers, jobConfig);
+ }
+ }
+
+ private void createUniqueMapperTable(Object sysDateTime,
+ int numMappers, ToJobConfig jobConfig)
+ throws SQLException {
+
+ // Mappers insert data into a unique table before either:
+ // - exchanging it into a subpartition of the 'real' export table; or
+ // - merging it into the 'real' export table.
+
+ for (int i=0; i<numMappers; i++) {
+ OracleTable mapperTable =
+ OracleUtilities.generateExportTableMapperTableName(i,
+ sysDateTime, null);
+
+ // If this mapper is being reattempted in response to a failure, we need
+ // to delete the
+ // temporary table created by the previous attempt...
+ OracleQueries.dropTable(connection, mapperTable);
+
+ String temporaryTableStorageClause =
+ OracleUtilities.getTemporaryTableStorageClause(jobConfig);
+
+ OracleQueries.createExportTableForMapper(connection,
+ mapperTable, temporaryTableStorageClause, table
+ , false); // <- addOraOopPartitionColumns
+
+ LOG.debug(String.format("Created temporary mapper table %s", mapperTable
+ .toString()));
+ }
+ }
+
+ private void checkForOldOraOopTemporaryOracleTables(Connection connection,
+ Object sysDateTime, String schema) {
+
+ try {
+
+ StringBuilder message = new StringBuilder();
+ message
+ .append(String.format(
+ "The following tables appear to be old temporary tables created by "
+ + "%s that have not been deleted.\n"
+ + "They are probably left over from jobs that encountered an error and "
+ + "could not clean up after themselves.\n"
+ + "You might want to drop these Oracle tables in order to reclaim "
+ + "Oracle storage space:\n",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME));
+ boolean showMessage = false;
+
+ String generatedTableName =
+ OracleUtilities.generateExportTableMapperTableName(0, sysDateTime,
+ schema).getName();
+ generatedTableName = generatedTableName.replaceAll("[0-9]", "%");
+ generatedTableName =
+ OracleUtilities.replaceAll(generatedTableName, "%%", "%");
+ Date sysDate = OracleQueries.oraDATEToDate(sysDateTime);
+
+ List<OracleTable> tables =
+ OracleQueries.getTablesWithTableNameLike(connection, schema,
+ generatedTableName);
+
+ for (OracleTable oracleTable : tables) {
+ OracleUtilities.DecodedExportMapperTableName tableName =
+ OracleUtilities.decodeExportTableMapperTableName(oracleTable);
+ if (tableName != null) {
+ Date tableDate =
+ OracleQueries.oraDATEToDate(tableName.getTableDateTime());
+ double daysApart =
+ (sysDate.getTime() - tableDate.getTime()) / (1000 * 60 * 60 * 24);
+ if (daysApart > 1.0) {
+ showMessage = true;
+ message.append(String.format("\t%s\n", oracleTable.toString()));
+ }
+ }
+ }
+
+ if (showMessage) {
+ LOG.info(message.toString());
+ }
+ } catch (Exception ex) {
+ LOG.warn(String.format(
+ "%s was unable to check for the existance of old "
+ + "temporary Oracle tables.\n" + "Error:\n%s",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME, ex.toString()));
+ }
+ }
+
+ private boolean isSqoopTableAnOracleTable(Connection connection,
+ String connectionUserName, OracleTable tableContext) {
+
+ String oracleObjectType;
+
+ try {
+
+ // Find the table via dba_tables...
+ OracleTable oracleTable =
+ OracleQueries.getTable(connection, tableContext.getSchema(),
+ tableContext.getName());
+ if (oracleTable != null) {
+ return true;
+ }
+
+ // If we could not find the table via dba_tables, then try and determine
+ // what type of database object the
+ // user was referring to. Perhaps they've specified the name of a view?...
+ oracleObjectType =
+ OracleQueries.getOracleObjectType(connection, tableContext);
+
+ if (oracleObjectType == null) {
+ LOG.info(String.format(
+ "%1$s will not process this Sqoop connection, "
+ + "as the Oracle user %2$s does not own a table named %3$s.\n"
+ + "\tPlease prefix the table name with the owner.\n "
+ + "\tNote: You may need to double-quote the owner and/or table name."
+ + "\n\tE.g. sqoop ... --username %4$s --table %2$s.%3$s\n",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME, tableContext.getSchema(),
+ tableContext.getName(), connectionUserName));
+ return false;
+ }
+
+ } catch (SQLException ex) {
+ LOG.warn(String.format(
+ "Unable to determine the Oracle-type of the object named %s owned by "
+ + "%s.\nError:\n" + "%s", tableContext.getName(), tableContext
+ .getSchema(), ex.getMessage()));
+
+ // In the absence of conflicting information, let's assume the object is
+ // actually a table...
+ return true;
+ }
+
+ boolean result =
+ oracleObjectType.equalsIgnoreCase(
+ OracleJdbcConnectorConstants.Oracle.OBJECT_TYPE_TABLE);
+
+ if (!result) {
+ LOG.info(String.format("%s will not process this sqoop connection, "
+ + "as %s is not an Oracle table, it's a %s.",
+ OracleJdbcConnectorConstants.CONNECTOR_NAME, tableContext.toString(),
+ oracleObjectType));
+ }
+
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ConnectionConfig.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ConnectionConfig.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ConnectionConfig.java
new file mode 100644
index 0000000..c355a77
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ConnectionConfig.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.configuration;
+
+import org.apache.sqoop.connector.jdbc.oracle.util.OracleConnectionFactory;
+import org.apache.sqoop.model.ConfigClass;
+import org.apache.sqoop.model.Input;
+import org.apache.sqoop.model.Validator;
+import org.apache.sqoop.validation.Status;
+import org.apache.sqoop.validation.validators.AbstractValidator;
+import org.apache.sqoop.validation.validators.StartsWith;
+
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Map;
+
+/**
+ *
+ */
+@ConfigClass(validators = {@Validator(ConnectionConfig.ConfigValidator.class)})
+public class ConnectionConfig {
+ @Input(size = 128, validators = {@Validator(value = StartsWith.class, strArg = "jdbc:")} )
+ public String connectionString;
+
+ @Input(size = 40)
+ public String username;
+
+ @Input(size = 40, sensitive = true)
+ public String password;
+
+ @Input
+ public Map<String, String> jdbcProperties;
+
+ @Input
+ public String timeZone;
+
+ @Input
+ public String actionName;
+
+ @Input
+ public Integer fetchSize;
+
+ @Input
+ public String initializationStatements;
+
+ @Input
+ public Boolean jdbcUrlVerbatim;
+
+ @Input
+ public String racServiceName;
+
+ public static class ConfigValidator extends AbstractValidator<ConnectionConfig> {
+ @Override
+ public void validate(ConnectionConfig linkConfig) {
+ // See if we can connect to the database
+ try {
+ OracleConnectionFactory.makeConnection(linkConfig);
+ } catch (SQLException e) {
+ addMessage(Status.WARNING, "Can't connect to the database with given credentials: " + e.getMessage());
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfig.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfig.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfig.java
new file mode 100644
index 0000000..38c808f
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfig.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.configuration;
+
+import org.apache.sqoop.model.ConfigClass;
+import org.apache.sqoop.model.Input;
+import org.apache.sqoop.model.Validator;
+import org.apache.sqoop.validation.validators.NotEmpty;
+
+/**
+ *
+ */
+@ConfigClass
+public class FromJobConfig {
+
+ @Input(size = 2000, validators = { @Validator(NotEmpty.class)})
+ public String tableName;
+
+ @Input
+ public Boolean consistentRead;
+
+ @Input
+ public Long consistentReadScn;
+
+ @Input(size = 2000)
+ public String partitionList;
+
+ @Input(size = 2000)
+ public String dataChunkMethod;
+
+ @Input(size = 2000)
+ public String dataChunkAllocationMethod;
+
+ @Input(size = 2000)
+ public String whereClauseLocation;
+
+ @Input
+ public Boolean omitLobColumns;
+
+ @Input
+ public String queryHint;
+
+ @Input(size = 2000)
+ public String conditions;
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfiguration.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfiguration.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfiguration.java
new file mode 100644
index 0000000..6a6c1aa
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/FromJobConfiguration.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.configuration;
+
+import org.apache.sqoop.model.ConfigurationClass;
+import org.apache.sqoop.model.Config;
+
+/**
+ *
+ */
+@ConfigurationClass
+public class FromJobConfiguration {
+ @Config public FromJobConfig fromJobConfig;
+
+ public FromJobConfiguration() {
+ fromJobConfig = new FromJobConfig();
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/LinkConfiguration.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/LinkConfiguration.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/LinkConfiguration.java
new file mode 100644
index 0000000..990343b
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/LinkConfiguration.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.configuration;
+
+import org.apache.sqoop.model.ConfigurationClass;
+import org.apache.sqoop.model.Config;
+
+/**
+ *
+ */
+@ConfigurationClass
+public class LinkConfiguration {
+
+ @Config public ConnectionConfig connectionConfig;
+
+ public LinkConfiguration() {
+ connectionConfig = new ConnectionConfig();
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfig.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfig.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfig.java
new file mode 100644
index 0000000..939a87a
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfig.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.configuration;
+
+import org.apache.sqoop.model.ConfigClass;
+import org.apache.sqoop.model.Input;
+import org.apache.sqoop.model.Validator;
+import org.apache.sqoop.validation.validators.NotEmpty;
+
+/**
+ *
+ */
+@ConfigClass
+public class ToJobConfig {
+
+ @Input(size = 2000, validators = { @Validator(NotEmpty.class)})
+ public String tableName;
+
+ @Input(size = 2000)
+ public String templateTable;
+
+ @Input
+ public Boolean partitioned;
+
+ @Input
+ public Boolean nologging;
+
+ @Input(size = 2000)
+ public String updateKey;
+
+ @Input
+ public Boolean updateMerge;
+
+ @Input
+ public Boolean dropTableIfExists;
+
+ @Input(size = 2000)
+ public String storageClause;
+
+ @Input(size = 2000)
+ public String temporaryStorageClause;
+
+ @Input(size = 2000)
+ public String appendValuesHint;
+
+ @Input
+ public Boolean parallel;
+
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfiguration.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfiguration.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfiguration.java
new file mode 100644
index 0000000..b34df1a
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/configuration/ToJobConfiguration.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sqoop.connector.jdbc.oracle.configuration;
+
+import org.apache.sqoop.model.ConfigurationClass;
+import org.apache.sqoop.model.Config;
+
+/**
+ *
+ */
+@ConfigurationClass
+public class ToJobConfiguration {
+ @Config public ToJobConfig toJobConfig;
+
+ public ToJobConfiguration() {
+ toJobConfig = new ToJobConfig();
+ }
+}
http://git-wip-us.apache.org/repos/asf/sqoop/blob/fa3c77b6/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleActiveInstance.java
----------------------------------------------------------------------
diff --git a/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleActiveInstance.java b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleActiveInstance.java
new file mode 100644
index 0000000..b46bce5
--- /dev/null
+++ b/connector/connector-oracle-jdbc/src/main/java/org/apache/sqoop/connector/jdbc/oracle/util/OracleActiveInstance.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sqoop.connector.jdbc.oracle.util;
+
+/**
+ * Wraps data from v$active_instances.
+ */
+public class OracleActiveInstance {
+
+ private String instanceName;
+ private String hostName;
+
+ public String getInstanceName() {
+ return instanceName;
+ }
+
+ public void setInstanceName(String newInstanceName) {
+ this.instanceName = newInstanceName;
+ }
+
+ public String getHostName() {
+ return hostName;
+ }
+
+ public void setHostName(String newHostName) {
+ this.hostName = newHostName;
+ }
+}