You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kafka.apache.org by ju...@apache.org on 2014/09/25 06:13:45 UTC

[1/2] kafka-1645; some more jars in our src release; patched by Joe Stein; reviewed by Jun Rao

Repository: kafka
Updated Branches:
  refs/heads/trunk 27bc37289 -> 084566b83


http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9006/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9006/cluster_config.json b/system_test/migration_tool_testsuite/testcase_9006/cluster_config.json
deleted file mode 100644
index 9fcb3b0..0000000
--- a/system_test/migration_tool_testsuite/testcase_9006/cluster_config.json
+++ /dev/null
@@ -1,141 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9900"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9901"
-        },
-
-
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9902"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9903"
-        },
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9904"
-        },
-
-
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9905"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9906"
-        },
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9907"
-        },
-
-
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "/export/apps/jdk/JDK-1_6_0_27",
-            "jmx_port": "9908"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "/export/apps/jdk/JDK-1_6_0_27",
-            "jmx_port": "9909"
-        },
-
-
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9910"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9911"
-        },
-
-
-        {
-            "entity_id": "12",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9912"
-        },
-        {
-            "entity_id": "13",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9913"
-        }
-
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json b/system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json
deleted file mode 100644
index 21b4c40..0000000
--- a/system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json
+++ /dev/null
@@ -1,168 +0,0 @@
-{
-  "description": {"01":"To Test : 'Replication with Migration Tool'",
-                  "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
-                  "03":"Produce and consume messages to 2 topics - 2 partitions.",
-                  "04":"This test sends messages to 3 replicas",
-                  "05":"At the end it verifies the log size and contents",
-                  "06":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "07":"Producer dimensions : mode:async, acks:1, comp:1",
-                  "08":"Log segment size    : 51200"
-  },
-  "testcase_args": {
-    "bounce_migration_tool": "true",
-    "bounced_entity_downtime_sec": "30",
-    "replica_factor": "3",
-    "num_partition": "2",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "30",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2188",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "clientPort": "2191",
-      "dataDir": "/tmp/zookeeper_1",
-      "log_filename": "zookeeper_1.log",
-      "config_filename": "zookeeper_1.properties"
-    },
-
-
-    {
-      "entity_id": "2",
-      "port": "9091",
-      "brokerid": "1",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9092",
-      "brokerid": "2",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9093",
-      "brokerid": "3",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-
-
-    {
-      "entity_id": "5",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_7_logs",
-      "log_filename": "kafka_server_7.log",
-      "config_filename": "kafka_server_7.properties"
-    },
-
-
-    {
-      "entity_id": "8",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "1",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "1",
-      "async": "true",
-      "log_filename": "producer_performance_8.log",
-      "config_filename": "producer_performance_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "topic": "test_2",
-      "threads": "5",
-      "compression-codec": "1",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "1",
-      "async": "true",
-      "log_filename": "producer_performance_9.log",
-      "config_filename": "producer_performance_9.properties"
-    },
-
-
-    {
-      "entity_id": "10",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_10.log",
-      "config_filename": "console_consumer_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "topic": "test_2",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_11.log",
-      "config_filename": "console_consumer_11.properties"
-    },
-
-
-    {
-      "entity_id": "12",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_12.log",
-      "config_filename": "migration_tool_12.properties"
-    },
-    {
-      "entity_id": "13",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_13.log",
-      "config_filename": "migration_tool_13.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/testcase_to_run_all.json
----------------------------------------------------------------------
diff --git a/system_test/testcase_to_run_all.json b/system_test/testcase_to_run_all.json
index 481f8e5..3e80a1f 100644
--- a/system_test/testcase_to_run_all.json
+++ b/system_test/testcase_to_run_all.json
@@ -121,14 +121,6 @@
         "testcase_9051"
     ],
 
-    "MigrationToolTest"  : [
-        "testcase_9001",
-        "testcase_9003",
-        "testcase_9004",
-        "testcase_9005",
-        "testcase_9006"
-    ],
-
     "MirrorMakerTest"  : [
         "testcase_5001",
         "testcase_5002",


[2/2] git commit: kafka-1645; some more jars in our src release; patched by Joe Stein; reviewed by Jun Rao

Posted by ju...@apache.org.
kafka-1645; some more jars in our src release; patched by Joe Stein; reviewed by Jun Rao


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/084566b8
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/084566b8
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/084566b8

Branch: refs/heads/trunk
Commit: 084566b837ee2204b6898b82156e811d0601085f
Parents: 27bc372
Author: Joe Stein <jo...@stealth.ly>
Authored: Wed Sep 24 21:13:21 2014 -0700
Committer: Jun Rao <ju...@gmail.com>
Committed: Wed Sep 24 21:13:21 2014 -0700

----------------------------------------------------------------------
 build.gradle                                    |   5 +-
 contrib/hadoop-consumer/lib/piggybank.jar       | Bin 308044 -> 0 bytes
 contrib/hadoop-producer/lib/piggybank.jar       | Bin 308044 -> 0 bytes
 lib/apache-rat-0.8.jar                          | Bin 1165578 -> 0 bytes
 .../0.7/bin/kafka-run-class.sh                  |  87 ------
 .../0.7/bin/zookeeper-server-start.sh           |  23 --
 .../0.7/config/log4j.properties                 |  78 -----
 .../0.7/config/test-log4j.properties            |  68 ----
 .../0.7/lib/kafka-0.7.0.jar                     | Bin 1306797 -> 0 bytes
 .../0.7/lib/kafka-perf-0.7.0.jar                | Bin 55427 -> 0 bytes
 .../0.7/lib/zkclient-0.1.jar                    | Bin 62913 -> 0 bytes
 .../migration_tool_testsuite/__init__.py        |   1 -
 .../cluster_config.json                         | 103 -------
 .../config/migration_consumer.properties        |  29 --
 .../config/migration_producer.properties        |  68 ----
 .../config/server.properties                    | 125 --------
 .../config/zookeeper.properties                 |  20 --
 .../migration_tool_test.py                      | 308 -------------------
 .../testcase_9001/testcase_9001_properties.json | 125 --------
 .../testcase_9003/cluster_config.json           | 112 -------
 .../testcase_9003/testcase_9003_properties.json | 138 ---------
 .../testcase_9004/cluster_config.json           | 112 -------
 .../testcase_9004/testcase_9004_properties.json | 138 ---------
 .../testcase_9005/cluster_config.json           | 141 ---------
 .../testcase_9005/testcase_9005_properties.json | 168 ----------
 .../testcase_9006/cluster_config.json           | 141 ---------
 .../testcase_9006/testcase_9006_properties.json | 168 ----------
 system_test/testcase_to_run_all.json            |   8 -
 28 files changed, 2 insertions(+), 2164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 63e2065..2e488a1 100644
--- a/build.gradle
+++ b/build.gradle
@@ -290,7 +290,6 @@ project(':contrib:hadoop-consumer') {
     compile "org.codehaus.jackson:jackson-core-asl:1.5.5"
     compile "org.codehaus.jackson:jackson-mapper-asl:1.5.5"
     compile "org.apache.hadoop:hadoop-core:0.20.2"
-    compile files('lib/piggybank.jar')
   }
 
   configurations {
@@ -309,13 +308,13 @@ project(':contrib:hadoop-producer') {
 
   dependencies {
     compile project(':core')
-    compile "org.apache.avro:avro:1.4.0"
+    compile("org.apache.avro:avro:1.4.0") { force = true }
     compile "org.apache.pig:pig:0.8.0"
     compile "commons-logging:commons-logging:1.0.4"
     compile "org.codehaus.jackson:jackson-core-asl:1.5.5"
     compile "org.codehaus.jackson:jackson-mapper-asl:1.5.5"
     compile "org.apache.hadoop:hadoop-core:0.20.2"
-    compile files('lib/piggybank.jar')
+    compile "org.apache.pig:piggybank:0.12.0"
   }
 
   configurations {

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/contrib/hadoop-consumer/lib/piggybank.jar
----------------------------------------------------------------------
diff --git a/contrib/hadoop-consumer/lib/piggybank.jar b/contrib/hadoop-consumer/lib/piggybank.jar
deleted file mode 100644
index cbd46e0..0000000
Binary files a/contrib/hadoop-consumer/lib/piggybank.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/contrib/hadoop-producer/lib/piggybank.jar
----------------------------------------------------------------------
diff --git a/contrib/hadoop-producer/lib/piggybank.jar b/contrib/hadoop-producer/lib/piggybank.jar
deleted file mode 100644
index cbd46e0..0000000
Binary files a/contrib/hadoop-producer/lib/piggybank.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/lib/apache-rat-0.8.jar
----------------------------------------------------------------------
diff --git a/lib/apache-rat-0.8.jar b/lib/apache-rat-0.8.jar
deleted file mode 100644
index bdc4372..0000000
Binary files a/lib/apache-rat-0.8.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/bin/kafka-run-class.sh
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/bin/kafka-run-class.sh b/system_test/migration_tool_testsuite/0.7/bin/kafka-run-class.sh
deleted file mode 100755
index ec92a34..0000000
--- a/system_test/migration_tool_testsuite/0.7/bin/kafka-run-class.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [ $# -lt 1 ];
-then
-  echo "USAGE: $0 classname [opts]"
-  exit 1
-fi
-
-snappy=~/.ivy2/cache/org.xerial.snappy/snappy-java/bundles/snappy-java-1.0.5.jar
-CLASSPATH=$CLASSPATH:$snappy
-library=~/.ivy2/cache/org.scala-lang/scala-library/jars/scala-library-2.8.0.jar
-CLASSPATH=$CLASSPATH:$library
-compiler=~/.ivy2/cache/org.scala-lang/scala-compiler/jars/scala-compiler-2.8.0.jar
-CLASSPATH=$CLASSPATH:$compiler
-log4j=~/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar
-CLASSPATH=$CLASSPATH:$log4j
-slf=~/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.6.4.jar
-CLASSPATH=$CLASSPATH:$slf
-zookeeper=~/.ivy2/cache/org.apache.zookeeper/zookeeper/jars/zookeeper-3.3.4.jar
-CLASSPATH=$CLASSPATH:$zookeeper
-jopt=~/.ivy2//cache/net.sf.jopt-simple/jopt-simple/jars/jopt-simple-3.2.jar
-CLASSPATH=$CLASSPATH:$jopt
-
-base_dir=$(dirname $0)/../../../..
-kafka_07_lib_dir=$(dirname $0)/../lib
-
-# 0.8 - scala jars
-for file in $base_dir/project/boot/scala-2.8.0/lib/*.jar;
-do
-  CLASSPATH=$CLASSPATH:$file
-done
-
-# 0.7 - kafka-0.7.jar, zkclient-0.1.jar, kafka-perf-0.7.0.jar
-for file in ${kafka_07_lib_dir}/*.jar;
-do
-  CLASSPATH=$CLASSPATH:$file
-done
-
-# 0.8 - metrics jar
-for file in $base_dir/core/lib/metrics*.jar;
-do
-  CLASSPATH=$CLASSPATH:$file
-done
-
-# 0.8 - misc jars
-for file in $base_dir/core/lib_managed/scala_2.8.0/compile/*.jar;
-do
-  if [ ${file##*/} != "sbt-launch.jar" ]; then
-    CLASSPATH=$CLASSPATH:$file
-  fi
-done
-if [ -z "$KAFKA_JMX_OPTS" ]; then
-  KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false  -Dcom.sun.management.jmxremote.ssl=false "
-fi
-
-# Log4j settings
-if [ -z "$KAFKA_LOG4J_OPTS" ]; then
-  KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/config/log4j.properties"
-fi
-
-if [ -z "$KAFKA_OPTS" ]; then
-  KAFKA_OPTS="-Xmx512M -server $KAFKA_LOG4J_OPTS"
-fi
-if [  $JMX_PORT ]; then
-  KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
-fi
-if [ -z "$JAVA_HOME" ]; then
-  JAVA="java"
-else
-  JAVA="$JAVA_HOME/bin/java"
-fi
-
-$JAVA $KAFKA_OPTS $KAFKA_JMX_OPTS -cp $CLASSPATH $@

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/bin/zookeeper-server-start.sh
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/bin/zookeeper-server-start.sh b/system_test/migration_tool_testsuite/0.7/bin/zookeeper-server-start.sh
deleted file mode 100755
index 184a10b..0000000
--- a/system_test/migration_tool_testsuite/0.7/bin/zookeeper-server-start.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [ $# -ne 1 ];
-then
-	echo "USAGE: $0 zookeeper.properties"
-	exit 1
-fi
-
-$(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/config/log4j.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/config/log4j.properties b/system_test/migration_tool_testsuite/0.7/config/log4j.properties
deleted file mode 100644
index baa698b..0000000
--- a/system_test/migration_tool_testsuite/0.7/config/log4j.properties
+++ /dev/null
@@ -1,78 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kafka.logs.dir=logs
-
-log4j.rootLogger=INFO, stdout 
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
-log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
-log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
-log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.cleanerAppender.File=log-cleaner.log
-log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
-log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-# Turn on all our debugging info
-#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
-#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
-#log4j.logger.kafka.perf=DEBUG, kafkaAppender
-#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
-#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
-log4j.logger.kafka=INFO, kafkaAppender
-
-log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
-log4j.additivity.kafka.network.RequestChannel$=false
-
-#log4j.logger.kafka.network.Processor=TRACE, requestAppender
-#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
-#log4j.additivity.kafka.server.KafkaApis=false
-log4j.logger.kafka.request.logger=WARN, requestAppender
-log4j.additivity.kafka.request.logger=false
-
-log4j.logger.kafka.controller=TRACE, controllerAppender
-log4j.additivity.kafka.controller=false
-
-log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
-log4j.additivity.kafka.log.LogCleaner=false
-
-log4j.logger.state.change.logger=TRACE, stateChangeAppender
-log4j.additivity.state.change.logger=false

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/config/test-log4j.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/config/test-log4j.properties b/system_test/migration_tool_testsuite/0.7/config/test-log4j.properties
deleted file mode 100644
index a3ae33f..0000000
--- a/system_test/migration_tool_testsuite/0.7/config/test-log4j.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-log4j.rootLogger=INFO, stdout 
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.kafkaAppender.File=logs/server.log
-log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.stateChangeAppender.File=logs/state-change.log
-log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.requestAppender.File=logs/kafka-request.log
-log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.controllerAppender.File=logs/controller.log
-log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
-log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
-
-# Turn on all our debugging info
-#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
-#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
-log4j.logger.kafka.perf=DEBUG, kafkaAppender
-log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
-#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
-log4j.logger.kafka=INFO, kafkaAppender
-
-log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
-log4j.additivity.kafka.network.RequestChannel$=false
-
-#log4j.logger.kafka.network.Processor=TRACE, requestAppender
-#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
-#log4j.additivity.kafka.server.KafkaApis=false
-log4j.logger.kafka.request.logger=TRACE, requestAppender
-log4j.additivity.kafka.request.logger=false
-
-log4j.logger.kafka.controller=TRACE, controllerAppender
-log4j.additivity.kafka.controller=false
-
-log4j.logger.state.change.logger=TRACE, stateChangeAppender
-log4j.additivity.state.change.logger=false
-
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar b/system_test/migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar
deleted file mode 100644
index 982ddb9..0000000
Binary files a/system_test/migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/lib/kafka-perf-0.7.0.jar
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/lib/kafka-perf-0.7.0.jar b/system_test/migration_tool_testsuite/0.7/lib/kafka-perf-0.7.0.jar
deleted file mode 100644
index d4f89d5..0000000
Binary files a/system_test/migration_tool_testsuite/0.7/lib/kafka-perf-0.7.0.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/0.7/lib/zkclient-0.1.jar
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/0.7/lib/zkclient-0.1.jar b/system_test/migration_tool_testsuite/0.7/lib/zkclient-0.1.jar
deleted file mode 100644
index aebcc34..0000000
Binary files a/system_test/migration_tool_testsuite/0.7/lib/zkclient-0.1.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/__init__.py
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/__init__.py b/system_test/migration_tool_testsuite/__init__.py
deleted file mode 100644
index 8d1c8b6..0000000
--- a/system_test/migration_tool_testsuite/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
- 

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/cluster_config.json b/system_test/migration_tool_testsuite/cluster_config.json
deleted file mode 100644
index cd7f079..0000000
--- a/system_test/migration_tool_testsuite/cluster_config.json
+++ /dev/null
@@ -1,103 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9990"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9991"
-        },
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9992"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9993"
-        },
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9994"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9995"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9996"
-        },
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9997"
-        },
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9998"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9999"
-        },
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9890"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/config/migration_consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/config/migration_consumer.properties b/system_test/migration_tool_testsuite/config/migration_consumer.properties
deleted file mode 100644
index 184f1de..0000000
--- a/system_test/migration_tool_testsuite/config/migration_consumer.properties
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.consumer.ConsumerConfig for more details
-
-# zk connection string
-# comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
-zk.connect=127.0.0.1:2188
-
-# timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
-
-#consumer group id
-groupid=test-consumer-group
-
-#consumer timeout
-#consumer.timeout.ms=5000

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/config/migration_producer.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/config/migration_producer.properties b/system_test/migration_tool_testsuite/config/migration_producer.properties
deleted file mode 100644
index 7a2265a..0000000
--- a/system_test/migration_tool_testsuite/config/migration_producer.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.producer.ProducerConfig for more details
-
-############################# Producer Basics #############################
-
-# need to set either broker.list or zk.connect
-
-# configure brokers statically
-# format: host1:port1,host2:port2 ...
-metadata.broker.list=localhost:9094,localhost:9095,localhost:9096
-
-# discover brokers from ZK
-#zk.connect=
-
-# zookeeper session timeout; default is 6000
-#zk.session.timeout.ms=
-
-# the max time that the client waits to establish a connection to zookeeper; default is 6000
-#zk.connection.timeout.ms
-
-# name of the partitioner class for partitioning events; default partition spreads data randomly
-#partitioner.class=
-
-# specifies whether the messages are sent asynchronously (async) or synchronously (sync)
-producer.type=sync
-
-retry.backoff.ms=500
-
-# specify the compression codec for all data generated: 0: no compression, 1: gzip
-compression.codec=0
-
-# message encoder
-serializer.class=kafka.serializer.DefaultEncoder
-
-# allow topic level compression
-#compressed.topics=
-
-############################# Async Producer #############################
-# maximum time, in milliseconds, for buffering data on the producer queue 
-#queue.buffering.max.ms=
-
-# the maximum size of the blocking queue for buffering on the producer 
-#queue.buffering.max.messages=
-
-# Timeout for event enqueue:
-# 0: events will be enqueued immediately or dropped if the queue is full
-# -ve: enqueue will block indefinitely if the queue is full
-# +ve: enqueue will block up to this many milliseconds if the queue is full
-#queue.enqueue.timeout.ms=
-
-# the number of messages batched at the producer 
-#batch.num.messages=
-
-message.send.max.retries=3
-request.required.acks=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/config/server.properties b/system_test/migration_tool_testsuite/config/server.properties
deleted file mode 100644
index 54144a2..0000000
--- a/system_test/migration_tool_testsuite/config/server.properties
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9091
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.bytes=536870912
-#log.segment.bytes=102400
-log.segment.bytes=128
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zk.connect=localhost:2181
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zk.connection.timeout.ms=1000000
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/config/zookeeper.properties
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/config/zookeeper.properties b/system_test/migration_tool_testsuite/config/zookeeper.properties
deleted file mode 100644
index 74cbf90..0000000
--- a/system_test/migration_tool_testsuite/config/zookeeper.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/migration_tool_test.py
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/migration_tool_test.py b/system_test/migration_tool_testsuite/migration_tool_test.py
deleted file mode 100644
index 9594835..0000000
--- a/system_test/migration_tool_testsuite/migration_tool_test.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#!/usr/bin/env python
-
-# ===================================
-# migration_tool_test.py
-# ===================================
-
-import inspect
-import logging
-import os
-import signal
-import subprocess
-import sys
-import time
-import traceback
-
-from   system_test_env    import SystemTestEnv
-sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
-
-from   setup_utils        import SetupUtils
-from   replication_utils  import ReplicationUtils
-import system_test_utils
-from   testcase_env       import TestcaseEnv
-
-# product specific: Kafka
-import kafka_system_test_utils
-import metrics
-
-class MigrationToolTest(ReplicationUtils, SetupUtils):
-
-    testModuleAbsPathName = os.path.realpath(__file__)
-    testSuiteAbsPathName  = os.path.abspath(os.path.dirname(testModuleAbsPathName))
-
-    def __init__(self, systemTestEnv):
-
-        # SystemTestEnv - provides cluster level environment settings
-        #     such as entity_id, hostname, kafka_home, java_home which
-        #     are available in a list of dictionary named 
-        #     "clusterEntityConfigDictList"
-        self.systemTestEnv = systemTestEnv
-
-        super(MigrationToolTest, self).__init__(self)
-
-        # dict to pass user-defined attributes to logger argument: "extra"
-        d = {'name_of_class': self.__class__.__name__}
-
-    def signal_handler(self, signal, frame):
-        self.log_message("Interrupt detected - User pressed Ctrl+c")
-
-        # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
-        self.log_message("stopping all entities - please wait ...")
-        kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
-        sys.exit(1) 
-
-    def runTest(self):
-
-        # ======================================================================
-        # get all testcase directories under this testsuite
-        # ======================================================================
-        testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
-            self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
-        testCasePathNameList.sort()
-
-        replicationUtils = ReplicationUtils(self)
-
-        # =============================================================
-        # launch each testcase one by one: testcase_1, testcase_2, ...
-        # =============================================================
-        for testCasePathName in testCasePathNameList:
-   
-            skipThisTestCase = False
-
-            try: 
-                # ======================================================================
-                # A new instance of TestcaseEnv to keep track of this testcase's env vars
-                # and initialize some env vars as testCasePathName is available now
-                # ======================================================================
-                self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
-                self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
-                self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
-                self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
-
-                # ======================================================================
-                # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
-                # ======================================================================
-                testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
-
-                if self.systemTestEnv.printTestDescriptionsOnly:
-                    self.testcaseEnv.printTestCaseDescription(testcaseDirName)
-                    continue
-                elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
-                    self.log_message("Skipping : " + testcaseDirName)
-                    skipThisTestCase = True
-                    continue
-                else:
-                    self.testcaseEnv.printTestCaseDescription(testcaseDirName)
-                    system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
-
-                # ============================================================================== #
-                # ============================================================================== #
-                #                   Product Specific Testing Code Starts Here:                   #
-                # ============================================================================== #
-                # ============================================================================== #
-    
-                # initialize self.testcaseEnv with user-defined environment variables (product specific)
-                self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
-                self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"]    = False
-                self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
-
-                # initialize signal handler
-                signal.signal(signal.SIGINT, self.signal_handler)
-    
-                # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
-                #   system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
-                self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
-                    self.testcaseEnv.testcasePropJsonPathName)
-    
-                # clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
-                kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-
-                # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
-                # for collecting logs from remote machines
-                kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
-    
-                # TestcaseEnv - initialize producer & consumer config / log file pathnames
-                kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
-
-                # generate remote hosts log/config dirs if not exist
-                kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-    
-                # generate properties files for zookeeper, kafka, producer, consumer and mirror-maker:
-                # 1. copy system_test/<suite_name>_testsuite/config/*.properties to 
-                #    system_test/<suite_name>_testsuite/testcase_<n>/config/
-                # 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
-                #    by overriding the settings specified in:
-                #    system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
-                kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
-                    self.testcaseEnv, self.systemTestEnv)
-
-                # =============================================
-                # preparing all entities to start the test
-                # =============================================
-                self.log_message("starting zookeepers")
-                kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 2s")
-                time.sleep(2)
-        
-                self.log_message("starting brokers")
-                kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 5s")
-                time.sleep(5)
-
-                self.log_message("starting migration tool")
-                kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 5s")
-                time.sleep(5)
-
-                # =============================================
-                # starting producer 
-                # =============================================
-                self.log_message("starting producer in the background")
-                kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, True)
-                msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
-                self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
-                time.sleep(int(msgProducingFreeTimeSec))
-
-                # =============================================
-                # A while-loop to bounce leader as specified
-                # by "num_iterations" in testcase_n_properties.json
-                # =============================================
-                i = 1
-                numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
-                bouncedEntityDownTimeSec = 1
-                try:
-                    bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
-                except:
-                    pass
-
-                while i <= numIterations:
-
-                    self.log_message("Iteration " + str(i) + " of " + str(numIterations))
-
-                    # =============================================
-                    # Bounce Migration Tool
-                    # =============================================
-                    bounceMigrationTool = self.testcaseEnv.testcaseArgumentsDict["bounce_migration_tool"]
-                    self.log_message("bounce_migration_tool flag : " + bounceMigrationTool)
-                    if (bounceMigrationTool.lower() == "true"):
-
-                        clusterConfigList         = self.systemTestEnv.clusterEntityConfigDictList
-                        migrationToolEntityIdList = system_test_utils.get_data_from_list_of_dicts(
-                                                    clusterConfigList, "role", "migration_tool", "entity_id")
-
-                        stoppedMigrationToolEntityId = migrationToolEntityIdList[0]
-                        migrationToolPPid = self.testcaseEnv.entityMigrationToolParentPidDict[stoppedMigrationToolEntityId]
-
-                        self.log_message("stopping migration tool : " + migrationToolPPid)
-                        kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMigrationToolEntityId, migrationToolPPid)
-                        self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
-                        time.sleep(bouncedEntityDownTimeSec)
-
-                        # starting previously terminated broker 
-                        self.log_message("starting the previously terminated migration tool")
-                        kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv, stoppedMigrationToolEntityId)
-
-                    self.anonLogger.info("sleeping for 15s")
-                    time.sleep(15)
-                    i += 1
-                # while loop
-
-                # =============================================
-                # tell producer to stop
-                # =============================================
-                self.testcaseEnv.lock.acquire()
-                self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
-                time.sleep(1)
-                self.testcaseEnv.lock.release()
-                time.sleep(1)
-
-                # =============================================
-                # wait for producer thread's update of
-                # "backgroundProducerStopped" to be "True"
-                # =============================================
-                while 1:
-                    self.testcaseEnv.lock.acquire()
-                    self.logger.info("status of backgroundProducerStopped : [" + \
-                        str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
-                    if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
-                        time.sleep(1)
-                        self.testcaseEnv.lock.release()
-                        self.logger.info("all producer threads completed", extra=self.d)
-                        break
-                    time.sleep(1)
-                    self.testcaseEnv.lock.release()
-                    time.sleep(2)
-
-                #print "\n\n#### sleeping for 30 min ...\n\n"
-                #time.sleep(1800)
-                
-                # =============================================
-                # starting consumer
-                # =============================================
-                self.log_message("starting consumer in the background")
-                kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 20s")
-                time.sleep(20)
-                    
-                # =============================================
-                # this testcase is completed - stop all entities
-                # =============================================
-                self.log_message("stopping all entities")
-                for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
-
-                for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
-
-                # make sure all entities are stopped
-                kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
-
-                # =============================================
-                # collect logs from remote hosts
-                # =============================================
-                kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-    
-                # =============================================
-                # validate the data matched and checksum
-                # =============================================
-                self.log_message("validating data matched")
-                kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
-                kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv)
-
-                # =============================================
-                # draw graphs
-                # =============================================
-                metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME, 
-                                        self.testcaseEnv, 
-                                        self.systemTestEnv.clusterEntityConfigDictList)
-                
-                # build dashboard, one for each role
-                metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME,
-                                             self.testcaseEnv.testCaseDashboardsDir,
-                                             self.systemTestEnv.clusterEntityConfigDictList)
-                
-            except Exception as e:
-                self.log_message("Exception while running test {0}".format(e))
-                traceback.print_exc()
-
-            finally:
-                if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
-                    self.log_message("stopping all entities - please wait ...")
-                    kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json b/system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json
deleted file mode 100644
index 608e3bd..0000000
--- a/system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json
+++ /dev/null
@@ -1,125 +0,0 @@
-{
-  "description": {"01":"To Test : 'Replication with Migration Tool'",
-                  "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
-                  "03":"Produce and consume messages to a single topic - single partition.",
-                  "04":"This test sends messages to 3 replicas",
-                  "05":"At the end it verifies the log size and contents",
-                  "06":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "07":"Producer dimensions : mode:sync, acks:-1, comp:0",
-                  "08":"Log segment size    : 51200"
-  },
-  "testcase_args": {
-    "bounce_migration_tool": "false",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "30",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2188",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "port": "9091",
-      "brokerid": "1",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_1_logs",
-      "log_filename": "kafka_server_1.log",
-      "config_filename": "kafka_server_1.properties"
-    },
-    {
-      "entity_id": "2",
-      "port": "9092",
-      "brokerid": "2",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9093",
-      "brokerid": "3",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "0",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "-1",
-      "async": "false",
-      "log_filename": "producer_performance_7.log",
-      "config_filename": "producer_performance_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_8.log",
-      "config_filename": "console_consumer_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "clientPort": "2191",
-      "dataDir": "/tmp/zookeeper_9",
-      "log_filename": "zookeeper_9.log",
-      "config_filename": "zookeeper_9.properties"
-    },
-    {
-      "entity_id": "10",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool.log",
-      "config_filename": "migration_tool.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9003/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9003/cluster_config.json b/system_test/migration_tool_testsuite/testcase_9003/cluster_config.json
deleted file mode 100644
index 766a001..0000000
--- a/system_test/migration_tool_testsuite/testcase_9003/cluster_config.json
+++ /dev/null
@@ -1,112 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9990"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9991"
-        },
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9992"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9993"
-        },
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9994"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9995"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9996"
-        },
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "/export/apps/jdk/JDK-1_6_0_27",
-            "jmx_port": "9997"
-        },
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9998"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9999"
-        },
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9890"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9891"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json b/system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json
deleted file mode 100644
index 333256c..0000000
--- a/system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json
+++ /dev/null
@@ -1,138 +0,0 @@
-{
-  "description": {"01":"To Test : 'Replication with Migration Tool'",
-                  "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
-                  "03":"Produce and consume messages to a single topic - single partition.",
-                  "04":"This test sends messages to 3 replicas",
-                  "05":"At the end it verifies the log size and contents",
-                  "06":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "07":"Producer dimensions : mode:async, acks:-1, comp:1",
-                  "08":"Log segment size    : 51200"
-  },
-  "testcase_args": {
-    "bounce_migration_tool": "true",
-    "bounced_entity_downtime_sec": "30",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "30",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2188",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "port": "9091",
-      "brokerid": "1",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_1_logs",
-      "log_filename": "kafka_server_1.log",
-      "config_filename": "kafka_server_1.properties"
-    },
-    {
-      "entity_id": "2",
-      "port": "9092",
-      "brokerid": "2",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9093",
-      "brokerid": "3",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "1",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "-1",
-      "async": "true",
-      "log_filename": "producer_performance_7.log",
-      "config_filename": "producer_performance_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_8.log",
-      "config_filename": "console_consumer_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "clientPort": "2191",
-      "dataDir": "/tmp/zookeeper_9",
-      "log_filename": "zookeeper_9.log",
-      "config_filename": "zookeeper_9.properties"
-    },
-    {
-      "entity_id": "10",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_10.log",
-      "config_filename": "migration_tool_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_11.log",
-      "config_filename": "migration_tool_11.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9004/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9004/cluster_config.json b/system_test/migration_tool_testsuite/testcase_9004/cluster_config.json
deleted file mode 100644
index 766a001..0000000
--- a/system_test/migration_tool_testsuite/testcase_9004/cluster_config.json
+++ /dev/null
@@ -1,112 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9990"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9991"
-        },
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9992"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9993"
-        },
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9994"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9995"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9996"
-        },
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "/export/apps/jdk/JDK-1_6_0_27",
-            "jmx_port": "9997"
-        },
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9998"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9999"
-        },
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9890"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9891"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json b/system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json
deleted file mode 100644
index b2a6e85..0000000
--- a/system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json
+++ /dev/null
@@ -1,138 +0,0 @@
-{
-  "description": {"01":"To Test : 'Replication with Migration Tool'",
-                  "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
-                  "03":"Produce and consume messages to a single topic - single partition.",
-                  "04":"This test sends messages to 3 replicas",
-                  "05":"At the end it verifies the log size and contents",
-                  "06":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "07":"Producer dimensions : mode:async, acks:1, comp:1",
-                  "08":"Log segment size    : 51200"
-  },
-  "testcase_args": {
-    "bounce_migration_tool": "true",
-    "bounced_entity_downtime_sec": "30",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "30",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2188",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "port": "9091",
-      "brokerid": "1",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_1_logs",
-      "log_filename": "kafka_server_1.log",
-      "config_filename": "kafka_server_1.properties"
-    },
-    {
-      "entity_id": "2",
-      "port": "9092",
-      "brokerid": "2",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9093",
-      "brokerid": "3",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "1",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "1",
-      "async": "true",
-      "log_filename": "producer_performance_7.log",
-      "config_filename": "producer_performance_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_8.log",
-      "config_filename": "console_consumer_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "clientPort": "2191",
-      "dataDir": "/tmp/zookeeper_9",
-      "log_filename": "zookeeper_9.log",
-      "config_filename": "zookeeper_9.properties"
-    },
-    {
-      "entity_id": "10",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_10.log",
-      "config_filename": "migration_tool_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_11.log",
-      "config_filename": "migration_tool_11.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9005/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9005/cluster_config.json b/system_test/migration_tool_testsuite/testcase_9005/cluster_config.json
deleted file mode 100644
index 9fcb3b0..0000000
--- a/system_test/migration_tool_testsuite/testcase_9005/cluster_config.json
+++ /dev/null
@@ -1,141 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9900"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9901"
-        },
-
-
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9902"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9903"
-        },
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "default",
-            "jmx_port": "9904"
-        },
-
-
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9905"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9906"
-        },
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9907"
-        },
-
-
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "/export/apps/jdk/JDK-1_6_0_27",
-            "jmx_port": "9908"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "system_test/migration_tool_testsuite/0.7",
-            "java_home": "/export/apps/jdk/JDK-1_6_0_27",
-            "jmx_port": "9909"
-        },
-
-
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9910"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9911"
-        },
-
-
-        {
-            "entity_id": "12",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9912"
-        },
-        {
-            "entity_id": "13",
-            "hostname": "localhost",
-            "role": "migration_tool",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9913"
-        }
-
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/084566b8/system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json
----------------------------------------------------------------------
diff --git a/system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json b/system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json
deleted file mode 100644
index ddbc905..0000000
--- a/system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json
+++ /dev/null
@@ -1,168 +0,0 @@
-{
-  "description": {"01":"To Test : 'Replication with Migration Tool'",
-                  "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET",
-                  "03":"Produce and consume messages to 2 topics - 2 partitions.",
-                  "04":"This test sends messages to 3 replicas",
-                  "05":"At the end it verifies the log size and contents",
-                  "06":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "07":"Producer dimensions : mode:async, acks:-1, comp:1",
-                  "08":"Log segment size    : 51200"
-  },
-  "testcase_args": {
-    "bounce_migration_tool": "true",
-    "bounced_entity_downtime_sec": "30",
-    "replica_factor": "3",
-    "num_partition": "2",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "30",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2188",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "clientPort": "2191",
-      "dataDir": "/tmp/zookeeper_1",
-      "log_filename": "zookeeper_1.log",
-      "config_filename": "zookeeper_1.properties"
-    },
-
-
-    {
-      "entity_id": "2",
-      "port": "9091",
-      "brokerid": "1",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9092",
-      "brokerid": "2",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9093",
-      "brokerid": "3",
-      "version": "0.7",
-      "log.file.size": "51200",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-
-
-    {
-      "entity_id": "5",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "51200",
-      "log.dir": "/tmp/kafka_server_7_logs",
-      "log_filename": "kafka_server_7.log",
-      "config_filename": "kafka_server_7.properties"
-    },
-
-
-    {
-      "entity_id": "8",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "1",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "-1",
-      "async": "true",
-      "log_filename": "producer_performance_8.log",
-      "config_filename": "producer_performance_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "topic": "test_2",
-      "threads": "5",
-      "compression-codec": "1",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "-1",
-      "async": "true",
-      "log_filename": "producer_performance_9.log",
-      "config_filename": "producer_performance_9.properties"
-    },
-
-
-    {
-      "entity_id": "10",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_10.log",
-      "config_filename": "console_consumer_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "topic": "test_2",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_11.log",
-      "config_filename": "console_consumer_11.properties"
-    },
-
-
-    {
-      "entity_id": "12",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_12.log",
-      "config_filename": "migration_tool_12.properties"
-    },
-    {
-      "entity_id": "13",
-      "whitelist": ".*",
-      "num.producers": "2",
-      "num.streams": "2",
-      "producer.config": "migration_tool_testsuite/config/migration_producer.properties",
-      "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties",
-      "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar",
-      "kafka.07.jar"   : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar",
-      "log_filename": "migration_tool_13.log",
-      "config_filename": "migration_tool_13.properties"
-    }
-   ]
-}