You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@manifoldcf.apache.org by kw...@apache.org on 2017/02/28 06:58:06 UTC

svn commit: r1784692 - in /manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka: APISanityHSQLDBIT.java BaseITHSQLDB.java

Author: kwright
Date: Tue Feb 28 06:58:06 2017
New Revision: 1784692

URL: http://svn.apache.org/viewvc?rev=1784692&view=rev
Log:
Get kafka APISanity test working

Modified:
    manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/APISanityHSQLDBIT.java
    manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/BaseITHSQLDB.java

Modified: manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/APISanityHSQLDBIT.java
URL: http://svn.apache.org/viewvc/manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/APISanityHSQLDBIT.java?rev=1784692&r1=1784691&r2=1784692&view=diff
==============================================================================
--- manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/APISanityHSQLDBIT.java (original)
+++ manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/APISanityHSQLDBIT.java Tue Feb 28 06:58:06 2017
@@ -31,7 +31,6 @@ public class APISanityHSQLDBIT extends B
   @Test
   public void sanityCheck()
           throws Exception {
-    try {
       int i;
 
       // Create a basic file system connection, and save it.
@@ -59,14 +58,6 @@ public class APISanityHSQLDBIT extends B
 
       child = new ConfigurationNode("configuration");
 
-      //Testing Repository Connector parameters
-      // MHL
-      /*
-       ConfigurationNode cmisBindingNode = new ConfigurationNode("_PARAMETER_");
-       cmisBindingNode.setAttribute("name", CmisConfig.BINDING_PARAM);
-       cmisBindingNode.setValue(CmisConfig.BINDING_DEFAULT_VALUE);
-       child.addChild(child.getChildCount(), cmisBindingNode);
-       */
       connectionObject.addChild(connectionObject.getChildCount(), child);
 
       requestObject = new Configuration();
@@ -199,10 +190,6 @@ public class APISanityHSQLDBIT extends B
       waitJobDeleted(jobIDString, 120000L);
 
       // Cleanup is automatic by the base class, so we can feel free to leave jobs and connections lying around.
-    } catch (Exception e) {
-      e.printStackTrace();
-      throw e;
-    }
   }
 
   protected void startJob(String jobIDString)

Modified: manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/BaseITHSQLDB.java
URL: http://svn.apache.org/viewvc/manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/BaseITHSQLDB.java?rev=1784692&r1=1784691&r2=1784692&view=diff
==============================================================================
--- manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/BaseITHSQLDB.java (original)
+++ manifoldcf/trunk/connectors/kafka/connector/src/test/java/org/apache/manifoldcf/agents/output/kafka/BaseITHSQLDB.java Tue Feb 28 06:58:06 2017
@@ -17,6 +17,8 @@
 
 package org.apache.manifoldcf.agents.output.kafka;
 
+import java.io.File;
+
 import java.util.Properties;
 import org.junit.After;
 import static org.junit.Assert.fail;
@@ -48,41 +50,41 @@ public class BaseITHSQLDB extends org.ap
     Properties kafkaProperties = new Properties();
     Properties zkProperties = new Properties();
 
-    try {
-      //load properties
-      kafkaProperties.put("broker.id", "0");
-      kafkaProperties.put("port", "9092");
-      kafkaProperties.put("num.network.threads", "3");
-      kafkaProperties.put("num.io.threads", "8");
-      kafkaProperties.put("socket.send.buffer.bytes", "102400");
-      kafkaProperties.put("socket.receive.buffer.bytes", "102400");
-      kafkaProperties.put("socket.request.max.bytes", "104857600");
-      kafkaProperties.put("log.dirs", "/tmp/kafka-logs");
-      kafkaProperties.put("num.partitions", "1");
-      kafkaProperties.put("num.recovery.threads.per.data.dir", "1");
-      kafkaProperties.put("log.retention.hours", "168");
-      kafkaProperties.put("log.segment.bytes", "1073741824");
-      kafkaProperties.put("log.retention.check.interval.ms", "300000");
-      kafkaProperties.put("log.cleaner.enable", "false");
-      kafkaProperties.put("zookeeper.connect", "localhost:2181");
-      kafkaProperties.put("zookeeper.connection.timeout.ms", "6000");
-
-      zkProperties.put("dataDir", "/tmp/zookeeper");
-      zkProperties.put("clientPort", "2181");
-      zkProperties.put("maxClientCnxns", "0");
-
-      //kafkaProperties.load(Class.class.getResourceAsStream("/kafkalocal.properties"));
-      //zkProperties.load(Class.class.getResourceAsStream("/zklocal.properties"));
-      System.out.println("Kafka is starting...");
-
-      //start kafka
-      kafka = new KafkaLocal(kafkaProperties, zkProperties);
-      Thread.sleep(5000);
-    } catch (Exception e) {
-      e.printStackTrace(System.out);
-      fail("Error running local Kafka broker");
-      e.printStackTrace(System.out);
-    }
+    String tmpDir = System.getProperty("java.io.tmpdir");
+    File logDir = new File(tmpDir, "kafka-logs");
+    logDir.mkdir();
+    File zookeeperDir = new File(tmpDir, "zookeeper");
+    zookeeperDir.mkdir();
+            
+    //load properties
+    kafkaProperties.put("broker.id", "0");
+    kafkaProperties.put("port", "9092");
+    kafkaProperties.put("num.network.threads", "3");
+    kafkaProperties.put("num.io.threads", "8");
+    kafkaProperties.put("socket.send.buffer.bytes", "102400");
+    kafkaProperties.put("socket.receive.buffer.bytes", "102400");
+    kafkaProperties.put("socket.request.max.bytes", "104857600");
+    kafkaProperties.put("log.dirs", logDir.getAbsolutePath());
+    kafkaProperties.put("num.partitions", "1");
+    kafkaProperties.put("num.recovery.threads.per.data.dir", "1");
+    kafkaProperties.put("log.retention.hours", "168");
+    kafkaProperties.put("log.segment.bytes", "1073741824");
+    kafkaProperties.put("log.retention.check.interval.ms", "300000");
+    kafkaProperties.put("log.cleaner.enable", "false");
+    kafkaProperties.put("zookeeper.connect", "localhost:2181");
+    kafkaProperties.put("zookeeper.connection.timeout.ms", "6000");
+
+    zkProperties.put("dataDir", zookeeperDir.getAbsolutePath());
+    zkProperties.put("clientPort", "2181");
+    zkProperties.put("maxClientCnxns", "0");
+
+    //kafkaProperties.load(Class.class.getResourceAsStream("/kafkalocal.properties"));
+    //zkProperties.load(Class.class.getResourceAsStream("/zklocal.properties"));
+    System.out.println("Kafka is starting...");
+
+    //start kafka
+    kafka = new KafkaLocal(kafkaProperties, zkProperties);
+    Thread.sleep(5000);
   }
 
   @After